Levenberg Marquardt changed from recursive to iterative implementation to reduce memory footprint.

release/4.3a0
Chris Beall 2010-11-09 05:58:31 +00:00
parent 86f1b89eda
commit 0bd3617630
2 changed files with 84 additions and 80 deletions

View File

@ -185,76 +185,72 @@ namespace gtsam {
}
/* ************************************************************************* */
// Recursively try to do tempered Gauss-Newton steps until we succeed.
// Iteratively try to do tempered Gauss-Newton steps until we succeed.
// Form damped system with given lambda, and return a new, more optimistic
// optimizer if error decreased or recurse with a larger lambda if not.
// optimizer if error decreased or iterate with a larger lambda if not.
// TODO: in theory we can't infinitely recurse, but maybe we should put a max.
/* ************************************************************************* */
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::try_lambda(const L& linear) const {
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::try_lambda(L& linear) {
const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ;
const double lambda = parameters_->lambda_ ;
double lambda = parameters_->lambda_ ;
const Parameters::LambdaMode lambdaMode = parameters_->lambdaMode_ ;
const double factor = parameters_->lambdaFactor_ ;
if (verbosity >= Parameters::TRYLAMBDA) cout << "trying lambda = " << lambda << endl;
// add prior-factors
L damped = linear.add_priors(1.0/sqrt(lambda), *dimensions_);
if (verbosity >= Parameters::DAMPED) damped.print("damped");
// solve
shared_solver newSolver = solver_;
if(newSolver) newSolver = newSolver->update(damped);
else newSolver.reset(new S(damped));
VectorValues delta = *newSolver->optimize();
if (verbosity >= Parameters::TRYDELTA) delta.print("delta");
// update values
shared_values newValues(new C(values_->expmap(delta, *ordering_))); // TODO: updateValues
// create new optimization state with more adventurous lambda
NonlinearOptimizer next(newValuesSolverLambda_(newValues, newSolver, lambda / factor));
if (verbosity >= Parameters::TRYLAMBDA) cout << "next error = " << next.error_ << endl;
if( lambdaMode >= Parameters::CAUTIOUS) throw runtime_error("CAUTIOUS mode not working yet, please use BOUNDED.");
if( next.error_ <= error_ ) {
// If we're cautious, see if the current lambda is better
// todo: include stopping criterion here?
if( lambdaMode == Parameters::CAUTIOUS ) {
NonlinearOptimizer sameLambda(newValuesSolver_(newValues, newSolver));
if(sameLambda.error_ <= next.error_) return sameLambda;
}
bool first_iteration = true;
double next_error = error_;
// Either we're not cautious, or we are but the adventerous lambda is better than the same one.
return next;
shared_values next_values;
} else if (lambda > 1e+10) // if lambda gets too big, something is broken
throw runtime_error("Lambda has grown too large!");
else {
// A more adventerous lambda was worse. If we're cautious, try the same lambda.
if(lambdaMode == Parameters::CAUTIOUS) {
NonlinearOptimizer sameLambda(newValuesSolver_(newValues, newSolver));
if(sameLambda.error_ <= error_) return sameLambda;
}
while(true) {
if (verbosity >= Parameters::TRYLAMBDA) cout << "trying lambda = " << lambda << endl;
// Either we're not cautious, or the same lambda was worse than the current error.
// The more adventerous lambda was worse too, so make lambda more conservative
// and keep the same values.
// add prior-factors
L damped = linear.add_priors(1.0/sqrt(lambda), *dimensions_);
if (verbosity >= Parameters::DAMPED) damped.print("damped");
// TODO: can we avoid copying the values ?
if(lambdaMode >= Parameters::BOUNDED && lambda >= 1.0e5) {
return NonlinearOptimizer(newValuesSolver_(newValues, newSolver));
} else {
NonlinearOptimizer cautious(newLambda_(lambda * factor));
return cautious.try_lambda(linear);
}
}
// solve
if(solver_) solver_ = solver_->update(damped);
else solver_.reset(new S(damped));
VectorValues delta = *solver_->optimize();
if (verbosity >= Parameters::TRYDELTA) delta.print("delta");
// update values
shared_values newValues(new C(values_->expmap(delta, *ordering_))); // TODO: updateValues
// create new optimization state with more adventurous lambda
//NonlinearOptimizer next(newValuesSolverLambda_(newValues, newSolver, lambda / factor));
double error = graph_->error(*newValues);
if (verbosity >= Parameters::TRYLAMBDA) cout << "next error = " << error << endl;
if(first_iteration || error <= error_) {
next_values = newValues;
first_iteration = false;
}
if( error <= error_ ) {
next_error = error;
lambda /= factor;
break;
}
else {
// Either we're not cautious, or the same lambda was worse than the current error.
// The more adventurous lambda was worse too, so make lambda more conservative
// and keep the same values.
if(lambdaMode >= Parameters::BOUNDED && lambda >= 1.0e5) {
break;
} else {
lambda *= factor;
}
}
} // end while
return newValuesErrorLambda_(next_values, next_error, lambda);
}
/* ************************************************************************* */
@ -262,7 +258,7 @@ namespace gtsam {
/* ************************************************************************* */
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterateLM() const {
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterateLM(){
const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ;
const double lambda = parameters_->lambda_ ;
@ -301,11 +297,10 @@ namespace gtsam {
/* ************************************************************************* */
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::levenbergMarquardt() const {
const int maxIterations = parameters_->maxIterations_ ;
if ( maxIterations <= 0) return *this;
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::levenbergMarquardt() {
int maxIterations = parameters_->maxIterations_ ;
bool converged = false;
const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ;
// check if we're already close enough
@ -315,23 +310,28 @@ namespace gtsam {
return *this;
}
// do one iteration of LM
NonlinearOptimizer next = iterateLM();
while (true) {
double previous_error = error_;
// do one iteration of LM
NonlinearOptimizer next = iterateLM();
error_ = next.error_;
values_ = next.values_;
parameters_ = next.parameters_;
// check convergence
// TODO: move convergence checks here and incorporate in verbosity levels
// TODO: build into iterations somehow as an instance variable
bool converged = gtsam::check_convergence(*parameters_, error_, next.error_);
// check convergence
// TODO: move convergence checks here and incorporate in verbosity levels
// TODO: build into iterations somehow as an instance variable
converged = gtsam::check_convergence(*parameters_, previous_error, error_);
// return converged state or iterate
if ( converged || maxIterations <= 1 ) {
if (verbosity >= Parameters::VALUES) next.values_->print("final values");
if (verbosity >= Parameters::ERROR) cout << "final error: " << next.error_ << endl;
if (verbosity >= Parameters::LAMBDA) cout << "final lambda = " << next.lambda() << endl;
return next;
} else {
return next.newMaxIterations_(maxIterations - 1).levenbergMarquardt() ;
if(maxIterations <= 0 || converged == true) {
if (verbosity >= Parameters::VALUES) values_->print("final values");
if (verbosity >= Parameters::ERROR) cout << "final error: " << error_ << endl;
if (verbosity >= Parameters::LAMBDA) cout << "final lambda = " << lambda() << endl;
return *this;
}
maxIterations--;
}
}
template<class G, class C, class L, class S, class W>

View File

@ -86,16 +86,16 @@ namespace gtsam {
// keep a values structure and its error
// These typically change once per iteration (in a functional way)
const shared_values values_;
const double error_;
shared_values values_;
double error_;
// the variable ordering
const shared_ordering ordering_;
// the linear system solver
const shared_solver solver_;
shared_solver solver_;
const shared_parameters parameters_;
shared_parameters parameters_;
// // keep current lambda for use within LM only
// // TODO: red flag, should we have an LM class ?
@ -107,7 +107,7 @@ namespace gtsam {
// Recursively try to do tempered Gauss-Newton steps until we succeed
// NonlinearOptimizer try_lambda(const L& linear,
// Parameters::verbosityLevel verbosity, double factor, Parameters::LambdaMode lambdaMode) const;
NonlinearOptimizer try_lambda(const L& linear) const;
NonlinearOptimizer try_lambda(L& linear);
/**
* Constructor that does not do any computation
@ -140,6 +140,10 @@ namespace gtsam {
This newValuesSolverLambda_(shared_values newValues, shared_solver newSolver, double newLambda) const {
return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), ordering_, newSolver, parameters_->newLambda_(newLambda), dimensions_); }
This newValuesErrorLambda_(shared_values newValues, double newError, double newLambda) const {
return NonlinearOptimizer(graph_, newValues, newError, ordering_, solver_, parameters_->newLambda_(newLambda), dimensions_); }
This newVerbosity_(Parameters::verbosityLevel verbosity) const {
return NonlinearOptimizer(graph_, values_, error_, ordering_, solver_, parameters_->newVerbosity_(verbosity), dimensions_); }
@ -249,7 +253,7 @@ namespace gtsam {
*/
// suggested interface
NonlinearOptimizer iterateLM() const;
NonlinearOptimizer iterateLM();
// backward compatible
NonlinearOptimizer iterateLM(Parameters::verbosityLevel verbosity,
@ -271,7 +275,7 @@ namespace gtsam {
*/
// suggested interface
NonlinearOptimizer levenbergMarquardt() const;
NonlinearOptimizer levenbergMarquardt();
// backward compatible
NonlinearOptimizer