Fixed Jacobians, optimization works

release/4.3a0
Frank Dellaert 2013-06-24 15:31:13 +00:00
parent b7dbcefa8b
commit aea4f31096
2 changed files with 68 additions and 29 deletions

View File

@ -33,8 +33,10 @@ protected:
double radius;
};
/// Range measurements
std::vector<double> measurements_;
typedef SmartRangeFactor This;
std::vector<double> measurements_; ///< Range measurements
double sigma_; ///< standard deviation on noise
public:
@ -43,7 +45,7 @@ public:
}
/** standard binary constructor */
SmartRangeFactor(const SharedNoiseModel& model) {
SmartRangeFactor(double sigma) : NoiseModelFactor(noiseModel::Isotropic::Sigma(1,sigma_)), sigma_(sigma) {
}
virtual ~SmartRangeFactor() {
@ -53,6 +55,7 @@ public:
void addRange(Key key, double measuredRange) {
keys_.push_back(key);
measurements_.push_back(measuredRange);
noiseModel_ = noiseModel::Isotropic::Sigma(keys_.size(),sigma_);
}
// Testable
@ -100,30 +103,40 @@ public:
*/
virtual Vector unwhitenedError(const Values& x,
boost::optional<std::vector<Matrix>&> H = boost::none) const {
size_t K = size();
Vector errors = zero(K);
if (K >= 3) {
size_t n = size();
if (H) assert(H->size()==n);
Vector errors = zero(n);
if (n >= 3) {
// create n circles corresponding to measured range around each pose
std::list<Circle2> circles;
for (size_t i = 0; i < K; i++) {
const Pose2& pose = x.at<Pose2>(keys_[i]);
circles.push_back(Circle2(pose.translation(), measurements_[i]));
for (size_t j = 0; j < n; j++) {
const Pose2& pose = x.at<Pose2>(keys_[j]);
circles.push_back(Circle2(pose.translation(), measurements_[j]));
}
// triangulate to get the optimized point
Point2 optimizedPoint = triangulate(circles);
if (H)
*H = std::vector<Matrix>();
for (size_t i = 0; i < K; i++) {
const Pose2& pose = x.at<Pose2>(keys_[i]);
// now evaluate the errors between predicted and measured range
for (size_t j = 0; j < n; j++) {
const Pose2& pose = x.at<Pose2>(keys_[j]);
if (H) {
Matrix Hi;
errors[i] = pose.range(optimizedPoint, Hi) - measurements_[i];
H->push_back(Hi);
} else
errors[i] = pose.range(optimizedPoint) - measurements_[i];
// calculate n*3 derivative for each of the n poses
(*H)[j] = zeros(n,3);
Matrix Hj;
errors[j] = pose.range(optimizedPoint, Hj) - measurements_[j];
(*H)[j].row(j) = Hj;
}
else
errors[j] = pose.range(optimizedPoint) - measurements_[j];
}
}
return errors;
}
/// @return a deep copy of this factor
virtual gtsam::NonlinearFactor::shared_ptr clone() const {
return boost::static_pointer_cast<gtsam::NonlinearFactor>(
gtsam::NonlinearFactor::shared_ptr(new This(*this))); }
};
} // \namespace gtsam

View File

@ -17,27 +17,28 @@
*/
#include <gtsam_unstable/slam/SmartRangeFactor.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
#include <CppUnitLite/TestHarness.h>
using namespace std;
using namespace gtsam;
const noiseModel::Base::shared_ptr gaussian = noiseModel::Isotropic::Sigma(1,
2.0);
static const double sigma = 2.0;
/* ************************************************************************* */
TEST( SmartRangeFactor, constructor ) {
SmartRangeFactor f1;
LONGS_EQUAL(0, f1.size())
SmartRangeFactor f2(gaussian);
SmartRangeFactor f2(sigma);
LONGS_EQUAL(0, f2.size())
}
/* ************************************************************************* */
TEST( SmartRangeFactor, addRange ) {
SmartRangeFactor f(gaussian);
SmartRangeFactor f(sigma);
f.addRange(1, 10);
f.addRange(1, 12);
LONGS_EQUAL(2, f.size())
@ -45,7 +46,7 @@ TEST( SmartRangeFactor, addRange ) {
/* ************************************************************************* */
TEST( SmartRangeFactor, unwhitenedError ) {
TEST( SmartRangeFactor, allAtOnce ) {
// Test situation:
Point2 p(0, 10);
Pose2 pose1(0, 0, 0), pose2(5, 0, 0), pose3(5, 5, 0);
@ -59,7 +60,7 @@ TEST( SmartRangeFactor, unwhitenedError ) {
values.insert(2, pose2);
values.insert(3, pose3);
SmartRangeFactor f(gaussian);
SmartRangeFactor f(sigma);
f.addRange(1, r1);
// Whenever there are two ranges or less, error should be zero
@ -70,14 +71,39 @@ TEST( SmartRangeFactor, unwhitenedError ) {
EXPECT(assert_equal(Vector2(0,0), actual2));
f.addRange(3, r3);
vector<Matrix> H;
Vector actual3 = f.unwhitenedError(values,H);
vector<Matrix> H(3);
Vector actual3 = f.unwhitenedError(values);
EXPECT_LONGS_EQUAL(3,f.keys().size());
EXPECT(assert_equal(Vector3(0,0,0), actual3));
// Check keys and Jacobian
EXPECT_LONGS_EQUAL(3,f.keys().size());
EXPECT(assert_equal(Matrix_(1,3,0.0,-1.0,0.0), H.front()));
EXPECT(assert_equal(Matrix_(1,3,sqrt(2)/2,-sqrt(2)/2,0.0), H.back()));
Vector actual4 = f.unwhitenedError(values,H); // with H now !
EXPECT(assert_equal(Vector3(0,0,0), actual4));
CHECK(assert_equal(Matrix_(3,3, 0.0,-1.0,0.0, 0.0,0.0,0.0, 0.0,0.0,0.0), H.front()));
CHECK(assert_equal(Matrix_(3,3, 0.0,0.0,0.0, 0.0,0.0,0.0, sqrt(2)/2,-sqrt(2)/2,0.0), H.back()));
// Test clone
NonlinearFactor::shared_ptr clone = f.clone();
EXPECT_LONGS_EQUAL(3,clone->keys().size());
// Create initial value for optimization
Values initial;
initial.insert(1, Pose2(0, 0, 0));
initial.insert(2, Pose2(5, 0, 0));
initial.insert(3, Pose2(5, 6, 0));
Vector actual5 = f.unwhitenedError(initial);
EXPECT(assert_equal(Vector3(0,0,sqrt(25+16)-sqrt(50)), actual5));
// Try optimizing
NonlinearFactorGraph graph;
graph.add(f);
LevenbergMarquardtParams params;
//params.setVerbosity("ERROR");
Values result = LevenbergMarquardtOptimizer(graph, initial, params).optimize();
EXPECT(assert_equal(values.at<Pose2>(1), result.at<Pose2>(1)));
EXPECT(assert_equal(values.at<Pose2>(2), result.at<Pose2>(2)));
// only the third pose will be changed, converges on following:
EXPECT(assert_equal(Pose2(5.52157630366, 5.58273895707, 0), result.at<Pose2>(3)));
}
/* ************************************************************************* */