Made more efficient by adding errors -> Jacobians back to 1*3, always. This is big savings if a landmark is seen from many poses.

release/4.3a0
Frank Dellaert 2013-06-24 15:57:03 +00:00
parent 75751cc5fa
commit 43fe036c32
2 changed files with 38 additions and 27 deletions

View File

@ -36,7 +36,7 @@ protected:
typedef SmartRangeFactor This;
std::vector<double> measurements_; ///< Range measurements
double sigma_; ///< standard deviation on noise
double variance_; ///< variance on noise
public:
@ -44,8 +44,12 @@ public:
SmartRangeFactor() {
}
/** standard binary constructor */
SmartRangeFactor(double sigma) : NoiseModelFactor(noiseModel::Isotropic::Sigma(1,sigma_)), sigma_(sigma) {
/**
* Constructor
* @param s standard deviation of range measurement noise
*/
SmartRangeFactor(double s) :
NoiseModelFactor(noiseModel::Isotropic::Sigma(1, s)), variance_(s * s) {
}
virtual ~SmartRangeFactor() {
@ -55,7 +59,9 @@ public:
void addRange(Key key, double measuredRange) {
keys_.push_back(key);
measurements_.push_back(measuredRange);
noiseModel_ = noiseModel::Isotropic::Sigma(keys_.size(),sigma_);
size_t n = keys_.size();
// Since we add the errors, the noise variance adds
noiseModel_ = noiseModel::Isotropic::Variance(1, n * variance_);
}
// Testable
@ -104,8 +110,9 @@ public:
virtual Vector unwhitenedError(const Values& x,
boost::optional<std::vector<Matrix>&> H = boost::none) const {
size_t n = size();
if (H) assert(H->size()==n);
Vector errors = zero(n);
if (H)
assert(H->size()==n);
Vector errors = zero(1);
if (n >= 3) {
// create n circles corresponding to measured range around each pose
std::list<Circle2> circles;
@ -114,19 +121,16 @@ public:
circles.push_back(Circle2(pose.translation(), measurements_[j]));
}
// triangulate to get the optimized point
// TODO: Should we have a (better?) variant that does this in relative coordinates ?
Point2 optimizedPoint = triangulate(circles);
// now evaluate the errors between predicted and measured range
for (size_t j = 0; j < n; j++) {
const Pose2& pose = x.at<Pose2>(keys_[j]);
if (H) {
// calculate n*3 derivative for each of the n poses
(*H)[j] = zeros(n,3);
Matrix Hj;
errors[j] = pose.range(optimizedPoint, Hj) - measurements_[j];
(*H)[j].row(j) = Hj;
}
if (H)
// also calculate 1*3 derivative for each of the n poses
errors[0] += pose.range(optimizedPoint, (*H)[j]) - measurements_[j];
else
errors[j] = pose.range(optimizedPoint) - measurements_[j];
errors[0] += pose.range(optimizedPoint) - measurements_[j];
}
}
return errors;
@ -135,7 +139,8 @@ public:
/// @return a deep copy of this factor
virtual gtsam::NonlinearFactor::shared_ptr clone() const {
return boost::static_pointer_cast<gtsam::NonlinearFactor>(
gtsam::NonlinearFactor::shared_ptr(new This(*this))); }
gtsam::NonlinearFactor::shared_ptr(new This(*this)));
}
};

View File

@ -19,6 +19,7 @@
#include <gtsam_unstable/slam/SmartRangeFactor.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
#include <gtsam/slam/PriorFactor.h>
#include <CppUnitLite/TestHarness.h>
using namespace std;
@ -68,23 +69,23 @@ TEST( SmartRangeFactor, allAtOnce ) {
EXPECT(assert_equal(Vector_(1,0.0), actual1));
f.addRange(2, r2);
Vector actual2 = f.unwhitenedError(values);
EXPECT(assert_equal(Vector2(0,0), actual2));
EXPECT(assert_equal(Vector_(1,0.0), actual2));
f.addRange(3, r3);
vector<Matrix> H(3);
Vector actual3 = f.unwhitenedError(values);
EXPECT_LONGS_EQUAL(3,f.keys().size());
EXPECT(assert_equal(Vector3(0,0,0), actual3));
EXPECT_LONGS_EQUAL(3, f.keys().size());
EXPECT(assert_equal(Vector_(1,0.0), actual3));
// Check keys and Jacobian
Vector actual4 = f.unwhitenedError(values,H); // with H now !
EXPECT(assert_equal(Vector3(0,0,0), actual4));
CHECK(assert_equal(Matrix_(3,3, 0.0,-1.0,0.0, 0.0,0.0,0.0, 0.0,0.0,0.0), H.front()));
CHECK(assert_equal(Matrix_(3,3, 0.0,0.0,0.0, 0.0,0.0,0.0, sqrt(2)/2,-sqrt(2)/2,0.0), H.back()));
Vector actual4 = f.unwhitenedError(values, H); // with H now !
EXPECT(assert_equal(Vector_(1,0.0), actual4));
CHECK(assert_equal(Matrix_(1,3, 0.0,-1.0,0.0), H.front()));
CHECK(assert_equal(Matrix_(1,3, sqrt(2)/2,-sqrt(2)/2,0.0), H.back()));
// Test clone
NonlinearFactor::shared_ptr clone = f.clone();
EXPECT_LONGS_EQUAL(3,clone->keys().size());
EXPECT_LONGS_EQUAL(3, clone->keys().size());
// Create initial value for optimization
Values initial;
@ -92,18 +93,23 @@ TEST( SmartRangeFactor, allAtOnce ) {
initial.insert(2, Pose2(5, 0, 0));
initial.insert(3, Pose2(5, 6, 0));
Vector actual5 = f.unwhitenedError(initial);
EXPECT(assert_equal(Vector3(0,0,sqrt(25+16)-sqrt(50)), actual5));
EXPECT(assert_equal(Vector_(1,sqrt(25+16)-sqrt(50)), actual5));
// Try optimizing
NonlinearFactorGraph graph;
graph.add(f);
const noiseModel::Base::shared_ptr //
priorNoise = noiseModel::Diagonal::Sigmas(Vector3(1, 1, M_PI));
graph.add(PriorFactor<Pose2>(1, pose1, priorNoise));
graph.add(PriorFactor<Pose2>(2, pose2, priorNoise));
LevenbergMarquardtParams params;
//params.setVerbosity("ERROR");
Values result = LevenbergMarquardtOptimizer(graph, initial, params).optimize();
// params.setVerbosity("ERROR");
Values result =
LevenbergMarquardtOptimizer(graph, initial, params).optimize();
EXPECT(assert_equal(values.at<Pose2>(1), result.at<Pose2>(1)));
EXPECT(assert_equal(values.at<Pose2>(2), result.at<Pose2>(2)));
// only the third pose will be changed, converges on following:
EXPECT(assert_equal(Pose2(5.52157630366, 5.58273895707, 0), result.at<Pose2>(3)));
EXPECT(assert_equal(Pose2(5.52159, 5.582727, 0), result.at<Pose2>(3),1e-5));
}
/* ************************************************************************* */