Fixed bug with shared solvers in recursive LM nonlinear optimizer

release/4.3a0
Alex Cunningham 2011-02-10 16:01:28 +00:00
parent 7404f78bc1
commit 06b08c6f85
2 changed files with 53 additions and 46 deletions

View File

@ -292,6 +292,7 @@ namespace gtsam {
double next_error = error_; double next_error = error_;
shared_values next_values = values_; shared_values next_values = values_;
shared_solver solver = solver_;
while(true) { while(true) {
if (verbosity >= Parameters::TRYLAMBDA) cout << "trying lambda = " << lambda << endl; if (verbosity >= Parameters::TRYLAMBDA) cout << "trying lambda = " << lambda << endl;
@ -314,9 +315,9 @@ namespace gtsam {
if (verbosity >= Parameters::DAMPED) damped->print("damped"); if (verbosity >= Parameters::DAMPED) damped->print("damped");
// solve // solve
S solver(*damped); // not solver_ !! solver.reset(new S(*damped));
VectorValues delta = *solver.optimize(); VectorValues delta = *solver->optimize();
if (verbosity >= Parameters::TRYDELTA) delta.print("delta"); if (verbosity >= Parameters::TRYDELTA) delta.print("delta");
// update values // update values
@ -332,8 +333,7 @@ namespace gtsam {
next_error = error; next_error = error;
lambda /= factor; lambda /= factor;
break; break;
} } else {
else {
// Either we're not cautious, or the same lambda was worse than the current error. // Either we're not cautious, or the same lambda was worse than the current error.
// The more adventurous lambda was worse too, so make lambda more conservative // The more adventurous lambda was worse too, so make lambda more conservative
// and keep the same values. // and keep the same values.
@ -345,7 +345,8 @@ namespace gtsam {
} }
} // end while } // end while
return newValuesErrorLambda_(next_values, next_error, lambda); return NonlinearOptimizer(graph_, next_values, next_error, ordering_, solver,
parameters_->newLambda_(lambda), dimensions_, iterations_);
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -187,13 +187,13 @@ TEST( NonlinearOptimizer, optimize_LM_recursive )
Point2 xstar(0,0); Point2 xstar(0,0);
example::Values cstar; example::Values cstar;
cstar.insert(simulated2D::PoseKey(1), xstar); cstar.insert(simulated2D::PoseKey(1), xstar);
DOUBLES_EQUAL(0.0,fg->error(cstar),0.0); EXPECT_DOUBLES_EQUAL(0.0,fg->error(cstar),0.0);
// test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 = // test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
Point2 x0(3,3); Point2 x0(3,3);
boost::shared_ptr<example::Values> c0(new example::Values); boost::shared_ptr<example::Values> c0(new example::Values);
c0->insert(simulated2D::PoseKey(1), x0); c0->insert(simulated2D::PoseKey(1), x0);
DOUBLES_EQUAL(199.0,fg->error(*c0),1e-3); EXPECT_DOUBLES_EQUAL(199.0,fg->error(*c0),1e-3);
// optimize parameters // optimize parameters
shared_ptr<Ordering> ord(new Ordering()); shared_ptr<Ordering> ord(new Ordering());
@ -207,7 +207,13 @@ TEST( NonlinearOptimizer, optimize_LM_recursive )
// Levenberg-Marquardt // Levenberg-Marquardt
Optimizer actual2 = optimizer.levenbergMarquardtRecursive(); Optimizer actual2 = optimizer.levenbergMarquardtRecursive();
DOUBLES_EQUAL(0,fg->error(*(actual2.values())),tol); EXPECT_DOUBLES_EQUAL(0,fg->error(*(actual2.values())),tol);
// calculate the marginal
Matrix actualCovariance; Vector mean;
boost::tie(mean, actualCovariance) = actual2.marginalCovariance("x1");
Matrix expectedCovariance = Matrix_(2,2, 8.60817108, 0.0, 0.0, 0.01);
EXPECT(assert_equal(expectedCovariance, actualCovariance, tol));
} }
/* ************************************************************************* */ /* ************************************************************************* */