Fixed bug with shared solvers in recursive LM nonlinear optimizer
parent
7404f78bc1
commit
06b08c6f85
|
@ -292,60 +292,61 @@ namespace gtsam {
|
||||||
double next_error = error_;
|
double next_error = error_;
|
||||||
|
|
||||||
shared_values next_values = values_;
|
shared_values next_values = values_;
|
||||||
|
shared_solver solver = solver_;
|
||||||
|
|
||||||
while(true) {
|
while(true) {
|
||||||
if (verbosity >= Parameters::TRYLAMBDA) cout << "trying lambda = " << lambda << endl;
|
if (verbosity >= Parameters::TRYLAMBDA) cout << "trying lambda = " << lambda << endl;
|
||||||
|
|
||||||
// add prior-factors
|
// add prior-factors
|
||||||
typename L::shared_ptr damped(new L(linear));
|
typename L::shared_ptr damped(new L(linear));
|
||||||
{
|
{
|
||||||
double sigma = 1.0 / sqrt(lambda);
|
double sigma = 1.0 / sqrt(lambda);
|
||||||
damped->reserve(damped->size() + dimensions_->size());
|
damped->reserve(damped->size() + dimensions_->size());
|
||||||
// for each of the variables, add a prior
|
// for each of the variables, add a prior
|
||||||
for(Index j=0; j<dimensions_->size(); ++j) {
|
for(Index j=0; j<dimensions_->size(); ++j) {
|
||||||
size_t dim = (*dimensions_)[j];
|
size_t dim = (*dimensions_)[j];
|
||||||
Matrix A = eye(dim);
|
Matrix A = eye(dim);
|
||||||
Vector b = zero(dim);
|
Vector b = zero(dim);
|
||||||
SharedDiagonal model = noiseModel::Isotropic::Sigma(dim,sigma);
|
SharedDiagonal model = noiseModel::Isotropic::Sigma(dim,sigma);
|
||||||
GaussianFactor::shared_ptr prior(new JacobianFactor(j, A, b, model));
|
GaussianFactor::shared_ptr prior(new JacobianFactor(j, A, b, model));
|
||||||
damped->push_back(prior);
|
damped->push_back(prior);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (verbosity >= Parameters::DAMPED) damped->print("damped");
|
if (verbosity >= Parameters::DAMPED) damped->print("damped");
|
||||||
|
|
||||||
// solve
|
// solve
|
||||||
S solver(*damped); // not solver_ !!
|
solver.reset(new S(*damped));
|
||||||
|
|
||||||
VectorValues delta = *solver.optimize();
|
VectorValues delta = *solver->optimize();
|
||||||
if (verbosity >= Parameters::TRYDELTA) delta.print("delta");
|
if (verbosity >= Parameters::TRYDELTA) delta.print("delta");
|
||||||
|
|
||||||
// update values
|
// update values
|
||||||
shared_values newValues(new C(values_->expmap(delta, *ordering_))); // TODO: updateValues
|
shared_values newValues(new C(values_->expmap(delta, *ordering_))); // TODO: updateValues
|
||||||
|
|
||||||
// create new optimization state with more adventurous lambda
|
// create new optimization state with more adventurous lambda
|
||||||
double error = graph_->error(*newValues);
|
double error = graph_->error(*newValues);
|
||||||
|
|
||||||
if (verbosity >= Parameters::TRYLAMBDA) cout << "next error = " << error << endl;
|
if (verbosity >= Parameters::TRYLAMBDA) cout << "next error = " << error << endl;
|
||||||
|
|
||||||
if( error <= error_ ) {
|
if( error <= error_ ) {
|
||||||
next_values = newValues;
|
next_values = newValues;
|
||||||
next_error = error;
|
next_error = error;
|
||||||
lambda /= factor;
|
lambda /= factor;
|
||||||
break;
|
break;
|
||||||
}
|
} else {
|
||||||
else {
|
// Either we're not cautious, or the same lambda was worse than the current error.
|
||||||
// Either we're not cautious, or the same lambda was worse than the current error.
|
// The more adventurous lambda was worse too, so make lambda more conservative
|
||||||
// The more adventurous lambda was worse too, so make lambda more conservative
|
// and keep the same values.
|
||||||
// and keep the same values.
|
if(lambdaMode >= Parameters::BOUNDED && lambda >= 1.0e5) {
|
||||||
if(lambdaMode >= Parameters::BOUNDED && lambda >= 1.0e5) {
|
break;
|
||||||
break;
|
} else {
|
||||||
} else {
|
lambda *= factor;
|
||||||
lambda *= factor;
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
} // end while
|
} // end while
|
||||||
|
|
||||||
return newValuesErrorLambda_(next_values, next_error, lambda);
|
return NonlinearOptimizer(graph_, next_values, next_error, ordering_, solver,
|
||||||
|
parameters_->newLambda_(lambda), dimensions_, iterations_);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
|
@ -187,13 +187,13 @@ TEST( NonlinearOptimizer, optimize_LM_recursive )
|
||||||
Point2 xstar(0,0);
|
Point2 xstar(0,0);
|
||||||
example::Values cstar;
|
example::Values cstar;
|
||||||
cstar.insert(simulated2D::PoseKey(1), xstar);
|
cstar.insert(simulated2D::PoseKey(1), xstar);
|
||||||
DOUBLES_EQUAL(0.0,fg->error(cstar),0.0);
|
EXPECT_DOUBLES_EQUAL(0.0,fg->error(cstar),0.0);
|
||||||
|
|
||||||
// test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
|
// test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
|
||||||
Point2 x0(3,3);
|
Point2 x0(3,3);
|
||||||
boost::shared_ptr<example::Values> c0(new example::Values);
|
boost::shared_ptr<example::Values> c0(new example::Values);
|
||||||
c0->insert(simulated2D::PoseKey(1), x0);
|
c0->insert(simulated2D::PoseKey(1), x0);
|
||||||
DOUBLES_EQUAL(199.0,fg->error(*c0),1e-3);
|
EXPECT_DOUBLES_EQUAL(199.0,fg->error(*c0),1e-3);
|
||||||
|
|
||||||
// optimize parameters
|
// optimize parameters
|
||||||
shared_ptr<Ordering> ord(new Ordering());
|
shared_ptr<Ordering> ord(new Ordering());
|
||||||
|
@ -207,7 +207,13 @@ TEST( NonlinearOptimizer, optimize_LM_recursive )
|
||||||
|
|
||||||
// Levenberg-Marquardt
|
// Levenberg-Marquardt
|
||||||
Optimizer actual2 = optimizer.levenbergMarquardtRecursive();
|
Optimizer actual2 = optimizer.levenbergMarquardtRecursive();
|
||||||
DOUBLES_EQUAL(0,fg->error(*(actual2.values())),tol);
|
EXPECT_DOUBLES_EQUAL(0,fg->error(*(actual2.values())),tol);
|
||||||
|
|
||||||
|
// calculate the marginal
|
||||||
|
Matrix actualCovariance; Vector mean;
|
||||||
|
boost::tie(mean, actualCovariance) = actual2.marginalCovariance("x1");
|
||||||
|
Matrix expectedCovariance = Matrix_(2,2, 8.60817108, 0.0, 0.0, 0.01);
|
||||||
|
EXPECT(assert_equal(expectedCovariance, actualCovariance, tol));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
Loading…
Reference in New Issue