new nonlinear optimizer interface, all parameters are pulled out to the NonlinearOptimizationParameters. Some redundancy remains for full backward compatibility

release/4.3a0
Yong-Dian Jian 2010-10-25 22:23:57 +00:00
parent c81f33eb9e
commit 7bfd8b36f4
4 changed files with 435 additions and 215 deletions

View File

@ -8,6 +8,9 @@
#pragma once
#include <boost/shared_ptr.hpp>
#include <boost/make_shared.hpp>
namespace gtsam {
// a container for all related parameters
@ -35,17 +38,62 @@ namespace gtsam {
double relDecrease_; /* threshold for the relative decrease per iteration */
double sumError_; /* threshold for the sum of error */
int maxIterations_ ;
double lambda_ ;
double lambdaFactor_ ;
verbosityLevel verbosity_;
LambdaMode lambdaMode_;
typedef NonlinearOptimizationParameters This;
typedef boost::shared_ptr<NonlinearOptimizationParameters> sharedThis ;
NonlinearOptimizationParameters(): absDecrease_(1), relDecrease_(1e-3), sumError_(0.0),
maxIterations_(100), lambdaFactor_(10.0), verbosity_(ERROR), lambdaMode_(BOUNDED){}
maxIterations_(100), lambda_(1e-5), lambdaFactor_(10.0), verbosity_(ERROR), lambdaMode_(BOUNDED){}
NonlinearOptimizationParameters(double absDecrease, double relDecrease, double sumError,
int iIters = 100, double lambdaFactor = 10, verbosityLevel v = ERROR, LambdaMode lambdaMode = BOUNDED)
int iIters = 100, double lambda = 1e-5, double lambdaFactor = 10, verbosityLevel v = ERROR, LambdaMode lambdaMode = BOUNDED)
:absDecrease_(absDecrease), relDecrease_(relDecrease), sumError_(sumError),
maxIterations_(iIters), lambdaFactor_(lambdaFactor), verbosity_(v), lambdaMode_(lambdaMode){}
maxIterations_(iIters), lambda_(lambda), lambdaFactor_(lambdaFactor), verbosity_(v), lambdaMode_(lambdaMode){}
NonlinearOptimizationParameters(const NonlinearOptimizationParameters &parameters):
absDecrease_(parameters.absDecrease_),
relDecrease_(parameters.relDecrease_),
sumError_(parameters.sumError_),
maxIterations_(parameters.maxIterations_),
lambda_(parameters.lambda_),
lambdaFactor_(parameters.lambdaFactor_),
verbosity_(parameters.verbosity_), lambdaMode_(parameters.lambdaMode_){}
sharedThis newVerbosity_(verbosityLevel verbosity) const {
sharedThis ptr (boost::make_shared<NonlinearOptimizationParameters>(*this)) ;
ptr->verbosity_ = verbosity ;
return ptr ;
}
sharedThis newLambda_(double lambda) const {
sharedThis ptr (boost::make_shared<NonlinearOptimizationParameters>(*this)) ;
ptr->lambda_ = lambda ;
return ptr ;
}
sharedThis newMaxIterations_(int maxIterations) const {
sharedThis ptr (boost::make_shared<NonlinearOptimizationParameters>(*this)) ;
ptr->maxIterations_ = maxIterations ;
return ptr ;
}
static sharedThis newLambda(double lambda) {
sharedThis ptr (boost::make_shared<NonlinearOptimizationParameters>());
ptr->lambda_ = lambda ;
return ptr ;
}
};
}

View File

@ -32,51 +32,13 @@ using namespace std;
namespace gtsam {
/* ************************************************************************* */
inline bool check_convergence(
double relativeErrorTreshold,
double absoluteErrorTreshold,
double errorThreshold,
double currentError, double newError, int verbosity) {
if ( verbosity >= 2 ) {
if ( newError <= errorThreshold )
cout << "errorThreshold: " << newError << " < " << errorThreshold << endl;
else
cout << "errorThreshold: " << newError << " > " << errorThreshold << endl;
}
if ( newError <= errorThreshold ) return true ;
// check if diverges
double absoluteDecrease = currentError - newError;
if (verbosity >= 2) {
if (absoluteDecrease < absoluteErrorTreshold)
cout << "absoluteDecrease: " << absoluteDecrease << " < " << absoluteErrorTreshold << endl;
else
cout << "absoluteDecrease: " << absoluteDecrease << " >= " << absoluteErrorTreshold << endl;
}
// calculate relative error decrease and update currentError
double relativeDecrease = absoluteDecrease / currentError;
if (verbosity >= 2) {
if (relativeDecrease < relativeErrorTreshold)
cout << "relativeDecrease: " << relativeDecrease << " < " << relativeErrorTreshold << endl;
else
cout << "relativeDecrease: " << relativeDecrease << " >= " << relativeErrorTreshold << endl;
}
bool converged = (relativeDecrease < relativeErrorTreshold)
|| (absoluteDecrease < absoluteErrorTreshold);
if (verbosity >= 1 && converged)
cout << "converged" << endl;
return converged;
}
/* ************************************************************************* */
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W>::NonlinearOptimizer(shared_graph graph,
shared_values values, shared_ordering ordering, double lambda) :
graph_(graph), values_(values), error_(graph->error(*values)),
ordering_(ordering), lambda_(lambda), dimensions_(new vector<size_t>(values->dims(*ordering))) {
graph_(graph), values_(values), error_(graph->error(*values)), ordering_(ordering),
parameters_(Parameters::newLambda(lambda)), dimensions_(new vector<size_t>(values->dims(*ordering))) {
if (!graph) throw std::invalid_argument(
"NonlinearOptimizer constructor: graph = NULL");
if (!values) throw std::invalid_argument(
@ -89,7 +51,7 @@ namespace gtsam {
NonlinearOptimizer<G, C, L, S, W>::NonlinearOptimizer(
shared_graph graph, shared_values values, shared_ordering ordering, shared_solver solver, const double lambda):
graph_(graph), values_(values), error_(graph->error(*values)), ordering_(ordering), solver_(solver),
lambda_(lambda), dimensions_(new vector<size_t>(values->dims(*ordering))) {
parameters_(Parameters::newLambda(lambda)), dimensions_(new vector<size_t>(values->dims(*ordering))) {
if (!graph) throw std::invalid_argument(
"NonlinearOptimizer constructor: graph = NULL");
if (!values) throw std::invalid_argument(
@ -100,8 +62,37 @@ namespace gtsam {
"NonlinearOptimizer constructor: solver = NULL");
}
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W>::NonlinearOptimizer(shared_graph graph,
shared_values values, shared_ordering ordering, shared_parameters parameters) :
graph_(graph), values_(values), error_(graph->error(*values)),
ordering_(ordering), parameters_(parameters), dimensions_(new vector<size_t>(values->dims(*ordering))) {
if (!graph) throw std::invalid_argument(
"NonlinearOptimizer constructor: graph = NULL");
if (!values) throw std::invalid_argument(
"NonlinearOptimizer constructor: values = NULL");
if (!ordering) throw std::invalid_argument(
"NonlinearOptimizer constructor: ordering = NULL");
}
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W>::NonlinearOptimizer(
shared_graph graph,
shared_values values,
shared_ordering ordering,
shared_solver solver,
shared_parameters parameters):
graph_(graph), values_(values), error_(graph->error(*values)), ordering_(ordering), solver_(solver),
parameters_(parameters), dimensions_(new vector<size_t>(values->dims(*ordering))) {
if (!graph) throw std::invalid_argument(
"NonlinearOptimizer constructor: graph = NULL");
if (!values) throw std::invalid_argument(
"NonlinearOptimizer constructor: values = NULL");
if (!ordering) throw std::invalid_argument(
"NonlinearOptimizer constructor: ordering = NULL");
if (!solver) throw std::invalid_argument(
"NonlinearOptimizer constructor: solver = NULL");
}
/* ************************************************************************* */
// linearize and optimize
@ -116,71 +107,81 @@ namespace gtsam {
/* ************************************************************************* */
// One iteration of Gauss Newton
/* ************************************************************************* */
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterate(
Parameters::verbosityLevel verbosity) const {
// linearize and optimize
boost::shared_ptr<L> linearized = graph_->linearize(*values_, *ordering_);
shared_solver newSolver = solver_;
if(newSolver)
newSolver = newSolver->update(*linearized);
else
newSolver.reset(new S(*linearized));
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterate() const {
Parameters::verbosityLevel verbosity = parameters_->verbosity_ ;
boost::shared_ptr<L> linearized = graph_->linearize(*values_, *ordering_);
shared_solver newSolver = solver_;
if(newSolver) newSolver = newSolver->update(*linearized);
else newSolver.reset(new S(*linearized));
VectorValues delta = *newSolver->optimize();
// maybe show output
if (verbosity >= Parameters::DELTA)
delta.print("delta");
if (verbosity >= Parameters::DELTA) delta.print("delta");
// take old values and update it
shared_values newValues(new C(values_->expmap(delta, *ordering_)));
// maybe show output
if (verbosity >= Parameters::VALUES)
newValues->print("newValues");
if (verbosity >= Parameters::VALUES) newValues->print("newValues");
NonlinearOptimizer newOptimizer = newValuesSolver_(newValues, newSolver);
if (verbosity >= Parameters::ERROR)
cout << "error: " << newOptimizer.error_ << endl;
if (verbosity >= Parameters::ERROR) cout << "error: " << newOptimizer.error_ << endl;
return newOptimizer;
}
/* ************************************************************************* */
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::gaussNewton(
double relativeThreshold, double absoluteThreshold,
Parameters::verbosityLevel verbosity, int maxIterations) const {
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterate(
Parameters::verbosityLevel verbosity) const {
return this->newVerbosity_(verbosity).iterate();
}
/* ************************************************************************* */
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::gaussNewton() const {
static W writer(error_);
// check if we're already close enough
if (error_ < absoluteThreshold) {
if (verbosity >= Parameters::ERROR) cout << "Exiting, as error = " << error_
<< " < absoluteThreshold (" << absoluteThreshold << ")" << endl;
if (error_ < parameters_->sumError_ ) {
if ( parameters_->verbosity_ >= Parameters::ERROR)
cout << "Exiting, as error = " << error_
<< " < sumError (" << parameters_->sumError_ << ")" << endl;
return *this;
}
// linearize, solve, update
NonlinearOptimizer next = iterate(verbosity);
NonlinearOptimizer next = iterate();
writer.write(next.error_);
// check convergence
bool converged = gtsam::check_convergence(
relativeThreshold,
absoluteThreshold,
0.0,
error_,
next.error_,
verbosity);
bool converged = gtsam::check_convergence(*parameters_, error_, next.error_);
// return converged state or iterate
if (converged)
return next;
else
return next.gaussNewton(relativeThreshold, absoluteThreshold, verbosity);
// return converged state or iterate
if (converged) return next;
else return next.gaussNewton();
}
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::gaussNewton(
double relativeThreshold,
double absoluteThreshold,
Parameters::verbosityLevel verbosity,
int maxIterations) const {
Parameters def ;
def.relDecrease_ = relativeThreshold ;
def.absDecrease_ = absoluteThreshold ;
def.verbosity_ = verbosity ;
def.maxIterations_ = maxIterations ;
shared_parameters ptr(boost::make_shared<NonlinearOptimizationParameters>(def)) ;
return newParameters_(ptr).gaussNewton() ;
}
/* ************************************************************************* */
@ -190,61 +191,55 @@ namespace gtsam {
// TODO: in theory we can't infinitely recurse, but maybe we should put a max.
/* ************************************************************************* */
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::try_lambda(
const L& linear, Parameters::verbosityLevel verbosity, double factor, Parameters::LambdaMode lambdaMode) const {
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::try_lambda(const L& linear) const {
if (verbosity >= Parameters::TRYLAMBDA)
cout << "trying lambda = " << lambda_ << endl;
const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ;
const double lambda = parameters_->lambda_ ;
const Parameters::LambdaMode lambdaMode = parameters_->lambdaMode_ ;
const double factor = parameters_->lambdaFactor_ ;
if (verbosity >= Parameters::TRYLAMBDA) cout << "trying lambda = " << lambda << endl;
// add prior-factors
L damped = linear.add_priors(1.0/sqrt(lambda_), *dimensions_);
if (verbosity >= Parameters::DAMPED)
damped.print("damped");
L damped = linear.add_priors(1.0/sqrt(lambda), *dimensions_);
if (verbosity >= Parameters::DAMPED) damped.print("damped");
// solve
shared_solver newSolver = solver_;
if(newSolver)
newSolver = newSolver->update(damped);
else
newSolver.reset(new S(damped));
if(newSolver) newSolver = newSolver->update(damped);
else newSolver.reset(new S(damped));
VectorValues delta = *newSolver->optimize();
if (verbosity >= Parameters::TRYDELTA)
delta.print("delta");
if (verbosity >= Parameters::TRYDELTA) delta.print("delta");
// update values
shared_values newValues(new C(values_->expmap(delta, *ordering_))); // TODO: updateValues
// if (verbosity >= TRYvalues)
// newValues->print("values");
// create new optimization state with more adventurous lambda
NonlinearOptimizer next(newValuesSolverLambda_(newValues, newSolver, lambda_ / factor));
NonlinearOptimizer next(newValuesSolverLambda_(newValues, newSolver, lambda / factor));
if (verbosity >= Parameters::TRYLAMBDA) cout << "next error = " << next.error_ << endl;
if(lambdaMode >= Parameters::CAUTIOUS) {
throw runtime_error("CAUTIOUS mode not working yet, please use BOUNDED.");
}
if(next.error_ <= error_) {
if( lambdaMode >= Parameters::CAUTIOUS) throw runtime_error("CAUTIOUS mode not working yet, please use BOUNDED.");
if( next.error_ <= error_ ) {
// If we're cautious, see if the current lambda is better
// todo: include stopping criterion here?
if(lambdaMode == Parameters::CAUTIOUS) {
if( lambdaMode == Parameters::CAUTIOUS ) {
NonlinearOptimizer sameLambda(newValuesSolver_(newValues, newSolver));
if(sameLambda.error_ <= next.error_)
return sameLambda;
if(sameLambda.error_ <= next.error_) return sameLambda;
}
// Either we're not cautious, or we are but the adventerous lambda is better than the same one.
return next;
} else if (lambda_ > 1e+10) // if lambda gets too big, something is broken
} else if (lambda > 1e+10) // if lambda gets too big, something is broken
throw runtime_error("Lambda has grown too large!");
else {
// A more adventerous lambda was worse. If we're cautious, try the same lambda.
if(lambdaMode == Parameters::CAUTIOUS) {
NonlinearOptimizer sameLambda(newValuesSolver_(newValues, newSolver));
if(sameLambda.error_ <= error_)
return sameLambda;
if(sameLambda.error_ <= error_) return sameLambda;
}
// Either we're not cautious, or the same lambda was worse than the current error.
@ -252,96 +247,119 @@ namespace gtsam {
// and keep the same values.
// TODO: can we avoid copying the values ?
if(lambdaMode >= Parameters::BOUNDED && lambda_ >= 1.0e5) {
if(lambdaMode >= Parameters::BOUNDED && lambda >= 1.0e5) {
return NonlinearOptimizer(newValuesSolver_(newValues, newSolver));
} else {
NonlinearOptimizer cautious(newLambda_(lambda_ * factor));
return cautious.try_lambda(linear, verbosity, factor, lambdaMode);
NonlinearOptimizer cautious(newLambda_(lambda * factor));
return cautious.try_lambda(linear);
}
}
}
/* ************************************************************************* */
// One iteration of Levenberg Marquardt
/* ************************************************************************* */
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterateLM(
Parameters::verbosityLevel verbosity, double lambdaFactor, Parameters::LambdaMode lambdaMode) const {
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterateLM() const {
const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ;
const double lambda = parameters_->lambda_ ;
// show output
if (verbosity >= Parameters::VALUES)
values_->print("values");
if (verbosity >= Parameters::ERROR)
cout << "error: " << error_ << endl;
if (verbosity >= Parameters::LAMBDA)
cout << "lambda = " << lambda_ << endl;
if (verbosity >= Parameters::VALUES) values_->print("values");
if (verbosity >= Parameters::ERROR) cout << "error: " << error_ << endl;
if (verbosity >= Parameters::LAMBDA) cout << "lambda = " << lambda << endl;
// linearize all factors once
boost::shared_ptr<L> linear = graph_->linearize(*values_, *ordering_);
if (verbosity >= Parameters::LINEAR)
linear->print("linear");
if (verbosity >= Parameters::LINEAR) linear->print("linear");
// try lambda steps with successively larger lambda until we achieve descent
if (verbosity >= Parameters::LAMBDA) cout << "Trying Lambda for the first time" << endl;
return try_lambda(*linear, verbosity, lambdaFactor, lambdaMode);
return try_lambda(*linear);
}
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterateLM(
Parameters::verbosityLevel verbosity,
double lambdaFactor,
Parameters::LambdaMode lambdaMode) const {
NonlinearOptimizationParameters def(*parameters_) ;
def.verbosity_ = verbosity ;
def.lambdaFactor_ = lambdaFactor ;
def.lambdaMode_ = lambdaMode ;
shared_parameters ptr(boost::make_shared<Parameters>(def)) ;
return newParameters_(ptr).iterateLM();
}
/* ************************************************************************* */
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::levenbergMarquardt(
double relativeThreshold, double absoluteThreshold,
Parameters::verbosityLevel verbosity, int maxIterations, double lambdaFactor, Parameters::LambdaMode lambdaMode) const {
return levenbergMarquardt(NonlinearOptimizationParameters (absoluteThreshold, relativeThreshold, absoluteThreshold,
maxIterations, lambdaFactor, verbosity, lambdaMode)) ;
}
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::
levenbergMarquardt(const NonlinearOptimizationParameters &para) const {
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::levenbergMarquardt() const {
if (para.maxIterations_ <= 0) return *this;
const int maxIterations = parameters_->maxIterations_ ;
if ( maxIterations <= 0) return *this;
const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ;
// check if we're already close enough
if (error_ < para.sumError_) {
if (para.verbosity_ >= Parameters::ERROR)
cout << "Exiting, as error = " << error_ << " < " << para.sumError_ << endl;
if (error_ < parameters_->sumError_) {
if ( verbosity >= Parameters::ERROR )
cout << "Exiting, as sumError = " << error_ << " < " << parameters_->sumError_ << endl;
return *this;
}
// do one iteration of LM
NonlinearOptimizer next = iterateLM(para.verbosity_, para.lambdaFactor_, para.lambdaMode_);
NonlinearOptimizer next = iterateLM();
// check convergence
// TODO: move convergence checks here and incorporate in verbosity levels
// TODO: build into iterations somehow as an instance variable
bool converged = gtsam::check_convergence(
para.relDecrease_,
para.absDecrease_,
para.sumError_,
error_,
next.error_,
para.verbosity_);
bool converged = gtsam::check_convergence(*parameters_, error_, next.error_);
// return converged state or iterate
if (converged || para.maxIterations_ <= 1) {
// maybe show output
if (para.verbosity_ >= Parameters::VALUES)
next.values_->print("final values");
if (para.verbosity_ >= Parameters::ERROR)
cout << "final error: " << next.error_ << endl;
if (para.verbosity_ >= Parameters::LAMBDA)
cout << "final lambda = " << next.lambda_ << endl;
if ( converged || maxIterations <= 1 ) {
if (verbosity >= Parameters::VALUES) next.values_->print("final values");
if (verbosity >= Parameters::ERROR) cout << "final error: " << next.error_ << endl;
if (verbosity >= Parameters::LAMBDA) cout << "final lambda = " << next.lambda() << endl;
return next;
} else {
NonlinearOptimizationParameters newPara = para ;
newPara.maxIterations_ = newPara.maxIterations_ - 1;
return next.levenbergMarquardt(newPara) ;
return next.newMaxIterations_(maxIterations - 1).levenbergMarquardt() ;
}
}
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::levenbergMarquardt(
double relativeThreshold,
double absoluteThreshold,
Parameters::verbosityLevel verbosity,
int maxIterations,
double lambdaFactor,
Parameters::LambdaMode lambdaMode) const {
NonlinearOptimizationParameters def;
def.relDecrease_ = relativeThreshold ;
def.absDecrease_ = absoluteThreshold ;
def.verbosity_ = verbosity ;
def.maxIterations_ = maxIterations ;
def.lambdaFactor_ = lambdaFactor ;
def.lambdaMode_ = lambdaMode ;
shared_parameters ptr = boost::make_shared<Parameters>(def) ;
return newParameters_(ptr).levenbergMarquardt() ;
}
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::
levenbergMarquardt(const NonlinearOptimizationParameters &parameters) const {
return newParameters_(boost::make_shared<Parameters>(parameters)).levenbergMarquardt() ;
}
/* ************************************************************************* */
}

View File

@ -16,9 +16,62 @@
* Created on: Jul 17, 2010
*/
#include <iostream>
#include <gtsam/nonlinear/NonlinearOptimizer.h>
using namespace std;
namespace gtsam {
bool check_convergence (
const NonlinearOptimizationParameters &parameters,
double currentError, double newError) {
return check_convergence(parameters.relDecrease_,
parameters.absDecrease_,
parameters.sumError_,
currentError, newError,
parameters.verbosity_) ;
}
bool check_convergence(
double relativeErrorTreshold,
double absoluteErrorTreshold,
double errorThreshold,
double currentError, double newError, int verbosity) {
if ( verbosity >= 2 ) {
if ( newError <= errorThreshold )
cout << "errorThreshold: " << newError << " < " << errorThreshold << endl;
else
cout << "errorThreshold: " << newError << " > " << errorThreshold << endl;
}
if ( newError <= errorThreshold ) return true ;
// check if diverges
double absoluteDecrease = currentError - newError;
if (verbosity >= 2) {
if (absoluteDecrease < absoluteErrorTreshold)
cout << "absoluteDecrease: " << absoluteDecrease << " < " << absoluteErrorTreshold << endl;
else
cout << "absoluteDecrease: " << absoluteDecrease << " >= " << absoluteErrorTreshold << endl;
}
// calculate relative error decrease and update currentError
double relativeDecrease = absoluteDecrease / currentError;
if (verbosity >= 2) {
if (relativeDecrease < relativeErrorTreshold)
cout << "relativeDecrease: " << relativeDecrease << " < " << relativeErrorTreshold << endl;
else
cout << "relativeDecrease: " << relativeDecrease << " >= " << relativeErrorTreshold << endl;
}
bool converged = (relativeDecrease < relativeErrorTreshold)
|| (absoluteDecrease < absoluteErrorTreshold);
if (verbosity >= 1 && converged)
cout << "converged" << endl;
return converged;
}
}

View File

@ -73,6 +73,7 @@ namespace gtsam {
typedef boost::shared_ptr<const Ordering> shared_ordering;
typedef boost::shared_ptr<const GS> shared_solver;
typedef NonlinearOptimizationParameters Parameters;
typedef boost::shared_ptr<const Parameters> shared_parameters ;
private:
@ -94,76 +95,116 @@ namespace gtsam {
// the linear system solver
const shared_solver solver_;
// keep current lambda for use within LM only
// TODO: red flag, should we have an LM class ?
const double lambda_;
const shared_parameters parameters_;
// // keep current lambda for use within LM only
// // TODO: red flag, should we have an LM class ?
// const double lambda_;
// The dimensions of each linearized variable
const shared_dimensions dimensions_;
// Recursively try to do tempered Gauss-Newton steps until we succeed
NonlinearOptimizer try_lambda(const L& linear,
Parameters::verbosityLevel verbosity, double factor, Parameters::LambdaMode lambdaMode) const;
// NonlinearOptimizer try_lambda(const L& linear,
// Parameters::verbosityLevel verbosity, double factor, Parameters::LambdaMode lambdaMode) const;
NonlinearOptimizer try_lambda(const L& linear) const;
/**
* Constructor that does not do any computation
*/
NonlinearOptimizer(shared_graph graph, shared_values values, const double error, shared_ordering ordering,
shared_solver solver, const double lambda, shared_dimensions dimensions): graph_(graph), values_(values),
error_(error), ordering_(ordering), solver_(solver), lambda_(lambda), dimensions_(dimensions) {}
// NonlinearOptimizer(shared_graph graph, shared_values values, const double error, shared_ordering ordering,
// shared_solver solver, const double lambda, shared_dimensions dimensions): graph_(graph), values_(values),
// error_(error), ordering_(ordering), solver_(solver), lambda_(lambda), dimensions_(dimensions) {}
NonlinearOptimizer(shared_graph graph, shared_values values, const double error, shared_ordering ordering,
shared_solver solver, shared_parameters parameters, shared_dimensions dimensions): graph_(graph), values_(values),
error_(error), ordering_(ordering), solver_(solver), parameters_(parameters), dimensions_(dimensions) {}
/** Create a new NonlinearOptimizer with a different lambda */
// This newLambda_(double newLambda) const {
// return NonlinearOptimizer(graph_, values_, error_, ordering_, solver_, newLambda, dimensions_); }
//
// This newValuesSolver_(shared_values newValues, shared_solver newSolver) const {
// return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), ordering_, newSolver, lambda_, dimensions_); }
//
// This newValuesSolverLambda_(shared_values newValues, shared_solver newSolver, double newLambda) const {
// return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), ordering_, newSolver, newLambda, dimensions_); }
/** Create a new NonlinearOptimizer with a different lambda */
This newLambda_(double newLambda) const {
return NonlinearOptimizer(graph_, values_, error_, ordering_, solver_, newLambda, dimensions_); }
return NonlinearOptimizer(graph_, values_, error_, ordering_, solver_, parameters_->newLambda_(newLambda), dimensions_); }
This newValuesSolver_(shared_values newValues, shared_solver newSolver) const {
return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), ordering_, newSolver, lambda_, dimensions_); }
return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), ordering_, newSolver, parameters_, dimensions_); }
This newValuesSolverLambda_(shared_values newValues, shared_solver newSolver, double newLambda) const {
return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), ordering_, newSolver, newLambda, dimensions_); }
return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), ordering_, newSolver, parameters_->newLambda_(newLambda), dimensions_); }
This newVerbosity_(Parameters::verbosityLevel verbosity) const {
return NonlinearOptimizer(graph_, values_, error_, ordering_, solver_, parameters_->newVerbosity_(verbosity), dimensions_); }
This newParameters_(shared_parameters parameters) const {
return NonlinearOptimizer(graph_, values_, error_, ordering_, solver_, parameters, dimensions_); }
This newMaxIterations_(int maxIterations) const {
return NonlinearOptimizer(graph_, values_, error_, ordering_, solver_, parameters_->newMaxIterations_(maxIterations), dimensions_); }
public:
/**
* Constructor that evaluates new error
*/
NonlinearOptimizer(shared_graph graph, shared_values values, shared_ordering ordering,
const double lambda = 1e-5);
// backward compatibility
NonlinearOptimizer(shared_graph graph,
shared_values values,
shared_ordering ordering,
const double lambda);
// backward compatibility
NonlinearOptimizer(shared_graph graph,
shared_values values,
shared_ordering ordering,
shared_solver solver,
const double lambda = 1e-5);
const double lambda);
// suggested constructors
NonlinearOptimizer(shared_graph graph,
shared_values values,
shared_ordering ordering,
shared_parameters parameters = boost::make_shared<Parameters>());
// suggested constructors
NonlinearOptimizer(shared_graph graph,
shared_values values,
shared_ordering ordering,
shared_solver solver,
shared_parameters parameters = boost::make_shared<Parameters>());
/**
* Copy constructor
*/
// NonlinearOptimizer(const NonlinearOptimizer<G, T, L, GS> &optimizer) :
// graph_(optimizer.graph_), values_(optimizer.values_), error_(optimizer.error_),
// ordering_(optimizer.ordering_), solver_(optimizer.solver_), lambda_(optimizer.lambda_), dimensions_(optimizer.dimensions_) {}
NonlinearOptimizer(const NonlinearOptimizer<G, T, L, GS> &optimizer) :
graph_(optimizer.graph_), values_(optimizer.values_), error_(optimizer.error_),
ordering_(optimizer.ordering_), solver_(optimizer.solver_), lambda_(optimizer.lambda_), dimensions_(optimizer.dimensions_) {}
ordering_(optimizer.ordering_), solver_(optimizer.solver_), parameters_(optimizer.parameters_), dimensions_(optimizer.dimensions_) {}
/**
* Return current error
*/
double error() const {
return error_;
}
double error() const { return error_; }
/**
* Return current lambda
*/
double lambda() const {
return lambda_;
}
double lambda() const { return parameters_->lambda_; }
/**
* Return the values
*/
shared_values values() const{
return values_;
}
shared_values values() const{ return values_; }
/**
* Return mean and covariance on a single variable
@ -181,7 +222,12 @@ namespace gtsam {
/**
* Do one Gauss-Newton iteration and return next state
*/
NonlinearOptimizer iterate(Parameters::verbosityLevel verbosity = Parameters::SILENT) const;
// suggested interface
NonlinearOptimizer iterate() const;
// backward compatible
NonlinearOptimizer iterate(Parameters::verbosityLevel verbosity) const;
/**
* Optimize a solution for a non linear factor graph
@ -189,6 +235,11 @@ namespace gtsam {
* @param absoluteTreshold
* @param verbosity Integer specifying how much output to provide
*/
// suggested interface
NonlinearOptimizer gaussNewton() const;
// backward compatible
NonlinearOptimizer
gaussNewton(double relativeThreshold, double absoluteThreshold,
Parameters::verbosityLevel verbosity = Parameters::SILENT, int maxIterations = 100) const;
@ -196,7 +247,12 @@ namespace gtsam {
/**
* One iteration of Levenberg Marquardt
*/
NonlinearOptimizer iterateLM(Parameters::verbosityLevel verbosity = Parameters::SILENT,
// suggested interface
NonlinearOptimizer iterateLM() const;
// backward compatible
NonlinearOptimizer iterateLM(Parameters::verbosityLevel verbosity,
double lambdaFactor = 10, Parameters::LambdaMode lambdaMode = Parameters::BOUNDED) const;
/**
@ -213,12 +269,20 @@ namespace gtsam {
* @param verbosity Integer specifying how much output to provide
* @param lambdaFactor Factor by which to decrease/increase lambda
*/
// suggested interface
NonlinearOptimizer levenbergMarquardt() const;
// backward compatible
NonlinearOptimizer
levenbergMarquardt(double relativeThreshold, double absoluteThreshold,
Parameters::verbosityLevel verbosity = Parameters::SILENT, int maxIterations = 100,
double lambdaFactor = 10, Parameters::LambdaMode lambdaMode = Parameters::BOUNDED) const;
levenbergMarquardt(double relativeThreshold,
double absoluteThreshold,
Parameters::verbosityLevel verbosity = Parameters::SILENT,
int maxIterations = 100,
double lambdaFactor = 10,
Parameters::LambdaMode lambdaMode = Parameters::BOUNDED) const;
// backward compatible
NonlinearOptimizer
levenbergMarquardt(const NonlinearOptimizationParameters &para) const;
@ -229,31 +293,47 @@ namespace gtsam {
* @param verbosity Integer specifying how much output to provide
* @return an optimized values structure
*/
static shared_values optimizeLM(shared_graph graph, shared_values values,
Parameters::verbosityLevel verbosity = Parameters::SILENT) {
// Use a variable ordering from COLAMD
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values);
double relativeThreshold = 1e-5, absoluteThreshold = 1e-5;
static shared_values optimizeLM(shared_graph graph,
shared_values values,
shared_parameters parameters = boost::make_shared<Parameters>()) {
// Use a variable ordering from COLAMD
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values);
// initial optimization state is the same in both cases tested
GS solver(*graph->linearize(*values, *ordering));
NonlinearOptimizer optimizer(graph, values, ordering);
// Levenberg-Marquardt
NonlinearOptimizer result = optimizer.levenbergMarquardt(relativeThreshold,
absoluteThreshold, verbosity);
NonlinearOptimizer optimizer(graph, values, ordering, parameters);
NonlinearOptimizer result = optimizer.levenbergMarquardt();
return result.values();
}
static shared_values optimizeLM(shared_graph graph,
shared_values values,
Parameters::verbosityLevel verbosity = Parameters::SILENT)
{
Parameters def ;
shared_parameters parameters = def.newVerbosity_(verbosity);
return optimizeLM(graph, values, parameters);
}
/**
* Static interface to LM optimization (no shared_ptr arguments) - see above
*/
inline static shared_values optimizeLM(const G& graph, const T& values,
Parameters::verbosityLevel verbosity = Parameters::SILENT) {
static shared_values optimizeLM(const G& graph,
const T& values,
const Parameters parameters) {
return optimizeLM(boost::make_shared<const G>(graph),
boost::make_shared<const T>(values), verbosity);
boost::make_shared<const T>(values),
boost::make_shared<Parameters>(parameters));
}
static shared_values optimizeLM(const G& graph,
const T& values,
Parameters::verbosityLevel verbosity = Parameters::SILENT) {
return optimizeLM(boost::make_shared<const G>(graph),
boost::make_shared<const T>(values),
verbosity);
}
/**
@ -263,28 +343,46 @@ namespace gtsam {
* @param verbosity Integer specifying how much output to provide
* @return an optimized values structure
*/
static shared_values optimizeGN(shared_graph graph, shared_values values,
Parameters::verbosityLevel verbosity = Parameters::SILENT) {
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values);
double relativeThreshold = 1e-5, absoluteThreshold = 1e-5;
// suggested interface
static shared_values optimizeGN(shared_graph graph,
shared_values values,
shared_parameters parameters) {
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values);
// initial optimization state is the same in both cases tested
GS solver(*graph->linearize(*values, *ordering));
NonlinearOptimizer optimizer(graph, values, ordering);
// Gauss-Newton
NonlinearOptimizer result = optimizer.gaussNewton(relativeThreshold,
absoluteThreshold, verbosity);
NonlinearOptimizer optimizer(graph, values, ordering, parameters);
NonlinearOptimizer result = optimizer.gaussNewton();
return result.values();
}
// backward compatible
static shared_values optimizeGN(shared_graph graph,
shared_values values,
Parameters::verbosityLevel verbosity = Parameters::SILENT) {
Parameters def ;
shared_parameters parameters = def.newVerbosity_(verbosity);
return optimizeGN(graph, values, parameters);
}
/**
* Static interface to GN optimization (no shared_ptr arguments) - see above
*/
inline static shared_values optimizeGN(const G& graph, const T& values,
Parameters::verbosityLevel verbosity = Parameters::SILENT) {
// suggested interface
static shared_values optimizeGN(const G& graph, const T& values, const Parameters parameters = Parameters()) {
return optimizeGN(boost::make_shared<const G>(graph),
boost::make_shared<const T>(values), verbosity);
boost::make_shared<const T>(values),
boost::make_shared<Parameters>(parameters));
}
// backward compatible
static shared_values optimizeGN(const G& graph, const T& values, Parameters::verbosityLevel verbosity = Parameters::SILENT) {
return optimizeGN(boost::make_shared<const G>(graph),
boost::make_shared<const T>(values),
verbosity);
}
};
@ -298,6 +396,9 @@ namespace gtsam {
double errorThreshold,
double currentError, double newError, int verbosity);
bool check_convergence (
const NonlinearOptimizationParameters &parameters,
double currentError, double newError);
} // gtsam