Added a functional version of LM optimization for comparison
parent
ea2f0e10e1
commit
719e851643
|
@ -214,7 +214,7 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
|
||||
template<class G, class C, class L, class S, class W>
|
||||
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterateLM(){
|
||||
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterateLM() {
|
||||
|
||||
const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ;
|
||||
const double lambda = parameters_->lambda_ ;
|
||||
|
@ -236,7 +236,6 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
||||
template<class G, class C, class L, class S, class W>
|
||||
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::levenbergMarquardt() {
|
||||
|
||||
|
@ -274,6 +273,132 @@ namespace gtsam {
|
|||
iterations_++;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// New version of functional approach
|
||||
/* ************************************************************************* */
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class G, class C, class L, class S, class W>
|
||||
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::try_lambdaRecursive(const L& linear) const {
|
||||
|
||||
const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ;
|
||||
double lambda = parameters_->lambda_ ;
|
||||
const Parameters::LambdaMode lambdaMode = parameters_->lambdaMode_ ;
|
||||
const double factor = parameters_->lambdaFactor_ ;
|
||||
|
||||
if( lambdaMode >= Parameters::CAUTIOUS) throw runtime_error("CAUTIOUS mode not working yet, please use BOUNDED.");
|
||||
|
||||
double next_error = error_;
|
||||
|
||||
shared_values next_values = values_;
|
||||
|
||||
while(true) {
|
||||
if (verbosity >= Parameters::TRYLAMBDA) cout << "trying lambda = " << lambda << endl;
|
||||
|
||||
// add prior-factors
|
||||
typename L::shared_ptr damped(new L(linear));
|
||||
{
|
||||
double sigma = 1.0 / sqrt(lambda);
|
||||
damped->reserve(damped->size() + dimensions_->size());
|
||||
// for each of the variables, add a prior
|
||||
for(Index j=0; j<dimensions_->size(); ++j) {
|
||||
size_t dim = (*dimensions_)[j];
|
||||
Matrix A = eye(dim);
|
||||
Vector b = zero(dim);
|
||||
SharedDiagonal model = noiseModel::Isotropic::Sigma(dim,sigma);
|
||||
GaussianFactor::shared_ptr prior(new JacobianFactor(j, A, b, model));
|
||||
damped->push_back(prior);
|
||||
}
|
||||
}
|
||||
if (verbosity >= Parameters::DAMPED) damped->print("damped");
|
||||
|
||||
// solve
|
||||
S solver(*damped); // not solver_ !!
|
||||
|
||||
VectorValues delta = *solver.optimize();
|
||||
if (verbosity >= Parameters::TRYDELTA) delta.print("delta");
|
||||
|
||||
// update values
|
||||
shared_values newValues(new C(values_->expmap(delta, *ordering_))); // TODO: updateValues
|
||||
|
||||
// create new optimization state with more adventurous lambda
|
||||
double error = graph_->error(*newValues);
|
||||
|
||||
if (verbosity >= Parameters::TRYLAMBDA) cout << "next error = " << error << endl;
|
||||
|
||||
if( error <= error_ ) {
|
||||
next_values = newValues;
|
||||
next_error = error;
|
||||
lambda /= factor;
|
||||
break;
|
||||
}
|
||||
else {
|
||||
// Either we're not cautious, or the same lambda was worse than the current error.
|
||||
// The more adventurous lambda was worse too, so make lambda more conservative
|
||||
// and keep the same values.
|
||||
if(lambdaMode >= Parameters::BOUNDED && lambda >= 1.0e5) {
|
||||
break;
|
||||
} else {
|
||||
lambda *= factor;
|
||||
}
|
||||
}
|
||||
} // end while
|
||||
|
||||
return newValuesErrorLambda_(next_values, next_error, lambda);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class G, class C, class L, class S, class W>
|
||||
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterateLMRecursive() const {
|
||||
|
||||
const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ;
|
||||
const double lambda = parameters_->lambda_ ;
|
||||
|
||||
// show output
|
||||
if (verbosity >= Parameters::VALUES) values_->print("values");
|
||||
if (verbosity >= Parameters::ERROR) cout << "error: " << error_ << endl;
|
||||
if (verbosity >= Parameters::LAMBDA) cout << "lambda = " << lambda << endl;
|
||||
|
||||
// linearize all factors once
|
||||
boost::shared_ptr<L> linear = graph_->linearize(*values_, *ordering_)->template dynamicCastFactors<L>();
|
||||
|
||||
if (verbosity >= Parameters::LINEAR) linear->print("linear");
|
||||
|
||||
// try lambda steps with successively larger lambda until we achieve descent
|
||||
if (verbosity >= Parameters::LAMBDA) cout << "Trying Lambda for the first time" << endl;
|
||||
|
||||
return try_lambdaRecursive(*linear);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class G, class C, class L, class S, class W>
|
||||
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::levenbergMarquardtRecursive() const {
|
||||
|
||||
const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ;
|
||||
|
||||
// check if we're already close enough
|
||||
if (error_ < parameters_->sumError_) {
|
||||
if ( verbosity >= Parameters::ERROR )
|
||||
cout << "Exiting, as sumError = " << error_ << " < " << parameters_->sumError_ << endl;
|
||||
return *this;
|
||||
}
|
||||
|
||||
// do one iteration of LM
|
||||
NonlinearOptimizer next = iterateLMRecursive();
|
||||
|
||||
// check convergence
|
||||
// TODO: move convergence checks here and incorporate in verbosity levels
|
||||
bool converged = gtsam::check_convergence(*parameters_, error_, next.error_);
|
||||
|
||||
if(iterations_ >= parameters_->maxIterations_ || converged) {
|
||||
if (verbosity >= Parameters::VALUES) values_->print("final values");
|
||||
if (verbosity >= Parameters::ERROR) cout << "final error: " << error_ << endl;
|
||||
if (verbosity >= Parameters::LAMBDA) cout << "final lambda = " << lambda() << endl;
|
||||
return *this;
|
||||
} else {
|
||||
return next.newIterations_(iterations_ + 1).levenbergMarquardtRecursive();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -115,16 +115,18 @@ namespace gtsam {
|
|||
// error_(error), ordering_(ordering), solver_(solver), lambda_(lambda), dimensions_(dimensions) {}
|
||||
|
||||
NonlinearOptimizer(shared_graph graph, shared_values values, const double error, shared_ordering ordering,
|
||||
shared_solver solver, shared_parameters parameters, shared_dimensions dimensions): graph_(graph), values_(values),
|
||||
error_(error), ordering_(ordering), solver_(solver), parameters_(parameters), dimensions_(dimensions) {}
|
||||
shared_solver solver, shared_parameters parameters, shared_dimensions dimensions, size_t iterations): graph_(graph), values_(values),
|
||||
error_(error), ordering_(ordering), solver_(solver), parameters_(parameters), iterations_(iterations), dimensions_(dimensions) {}
|
||||
|
||||
/** Create a new NonlinearOptimizer with a different lambda */
|
||||
This newValuesSolver_(shared_values newValues, shared_solver newSolver) const {
|
||||
return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), ordering_, newSolver, parameters_, dimensions_); }
|
||||
return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), ordering_, newSolver, parameters_, dimensions_, iterations_); }
|
||||
|
||||
This newValuesErrorLambda_(shared_values newValues, double newError, double newLambda) const {
|
||||
return NonlinearOptimizer(graph_, newValues, newError, ordering_, solver_, parameters_->newLambda_(newLambda), dimensions_); }
|
||||
return NonlinearOptimizer(graph_, newValues, newError, ordering_, solver_, parameters_->newLambda_(newLambda), dimensions_, iterations_); }
|
||||
|
||||
This newIterations_(int iterations) const {
|
||||
return NonlinearOptimizer(graph_, values_, error_, ordering_, solver_, parameters_, dimensions_, iterations); }
|
||||
|
||||
/*
|
||||
This newLambda_(double newLambda) const {
|
||||
|
@ -278,6 +280,14 @@ namespace gtsam {
|
|||
// suggested interface
|
||||
NonlinearOptimizer levenbergMarquardt();
|
||||
|
||||
/**
|
||||
* The following are const versions of the LM algorithm for comparison and
|
||||
* testing - functions are largely identical, but maintain constness
|
||||
*/
|
||||
NonlinearOptimizer try_lambdaRecursive(const L& linear) const;
|
||||
NonlinearOptimizer iterateLMRecursive() const;
|
||||
NonlinearOptimizer levenbergMarquardtRecursive() const;
|
||||
|
||||
/**
|
||||
* Static interface to LM optimization using default ordering and thresholds
|
||||
* @param graph Nonlinear factor graph to optimize
|
||||
|
|
|
@ -177,6 +177,39 @@ TEST( NonlinearOptimizer, optimize )
|
|||
DOUBLES_EQUAL(0,fg->error(*(actual2.values())),tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( NonlinearOptimizer, optimize_LM_recursive )
|
||||
{
|
||||
shared_ptr<example::Graph> fg(new example::Graph(
|
||||
example::createReallyNonlinearFactorGraph()));
|
||||
|
||||
// test error at minimum
|
||||
Point2 xstar(0,0);
|
||||
example::Values cstar;
|
||||
cstar.insert(simulated2D::PoseKey(1), xstar);
|
||||
DOUBLES_EQUAL(0.0,fg->error(cstar),0.0);
|
||||
|
||||
// test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
|
||||
Point2 x0(3,3);
|
||||
boost::shared_ptr<example::Values> c0(new example::Values);
|
||||
c0->insert(simulated2D::PoseKey(1), x0);
|
||||
DOUBLES_EQUAL(199.0,fg->error(*c0),1e-3);
|
||||
|
||||
// optimize parameters
|
||||
shared_ptr<Ordering> ord(new Ordering());
|
||||
ord->push_back("x1");
|
||||
|
||||
// initial optimization state is the same in both cases tested
|
||||
boost::shared_ptr<NonlinearOptimizationParameters> params = boost::make_shared<NonlinearOptimizationParameters>();
|
||||
params->relDecrease_ = 1e-5;
|
||||
params->absDecrease_ = 1e-5;
|
||||
Optimizer optimizer(fg, c0, ord, params);
|
||||
|
||||
// Levenberg-Marquardt
|
||||
Optimizer actual2 = optimizer.levenbergMarquardtRecursive();
|
||||
DOUBLES_EQUAL(0,fg->error(*(actual2.values())),tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( NonlinearOptimizer, SimpleLMOptimizer )
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue