(in branch) NonlinearOptimizer base class

release/4.3a0
Richard Roberts 2012-02-26 02:44:58 +00:00
parent ba5bb0298d
commit 0309b6b184
6 changed files with 222 additions and 883 deletions

View File

@ -1,134 +0,0 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file NonlinearOptimization-inl.h
* @date Oct 17, 2010
* @author Kai Ni
* @brief Easy interfaces for NonlinearOptimizer
*/
#pragma once
#include <gtsam/linear/GaussianSequentialSolver.h>
#include <gtsam/linear/GaussianMultifrontalSolver.h>
#if ENABLE_SPCG
#include <gtsam/linear/SubgraphSolver.h>
#endif
#include <gtsam/nonlinear/NonlinearOptimizer.h>
using namespace std;
namespace gtsam {
/**
* The Elimination solver
*/
template<class G>
Values optimizeSequential(const G& graph, const Values& initialEstimate,
const NonlinearOptimizationParameters& parameters, bool useLM) {
// Use a variable ordering from COLAMD
Ordering::shared_ptr ordering = graph.orderingCOLAMD(initialEstimate);
// Create an nonlinear Optimizer that uses a Sequential Solver
typedef NonlinearOptimizer<G, GaussianFactorGraph, GaussianSequentialSolver> Optimizer;
Optimizer optimizer(boost::make_shared<const G>(graph),
boost::make_shared<const Values>(initialEstimate), ordering,
boost::make_shared<NonlinearOptimizationParameters>(parameters));
// Now optimize using either LM or GN methods.
if (useLM)
return *optimizer.levenbergMarquardt().values();
else
return *optimizer.gaussNewton().values();
}
/**
* The multifrontal solver
*/
template<class G>
Values optimizeMultiFrontal(const G& graph, const Values& initialEstimate,
const NonlinearOptimizationParameters& parameters, bool useLM) {
// Use a variable ordering from COLAMD
Ordering::shared_ptr ordering = graph.orderingCOLAMD(initialEstimate);
// Create an nonlinear Optimizer that uses a Multifrontal Solver
typedef NonlinearOptimizer<G, GaussianFactorGraph, GaussianMultifrontalSolver> Optimizer;
Optimizer optimizer(boost::make_shared<const G>(graph),
boost::make_shared<const Values>(initialEstimate), ordering,
boost::make_shared<NonlinearOptimizationParameters>(parameters));
// now optimize using either LM or GN methods
if (useLM)
return *optimizer.levenbergMarquardt().values();
else
return *optimizer.gaussNewton().values();
}
#if ENABLE_SPCG
/**
* The sparse preconditioned conjugate gradient solver
*/
template<class G>
Values optimizeSPCG(const G& graph, const Values& initialEstimate,
const NonlinearOptimizationParameters& parameters = NonlinearOptimizationParameters(),
bool useLM = true) {
// initial optimization state is the same in both cases tested
typedef SubgraphSolver<G,GaussianFactorGraph,Values> Solver;
typedef boost::shared_ptr<Solver> shared_Solver;
typedef NonlinearOptimizer<G, GaussianFactorGraph, Solver> SPCGOptimizer;
shared_Solver solver = boost::make_shared<Solver>(
graph, initialEstimate, IterativeOptimizationParameters());
SPCGOptimizer optimizer(
boost::make_shared<const G>(graph),
boost::make_shared<const Values>(initialEstimate),
solver->ordering(),
solver,
boost::make_shared<NonlinearOptimizationParameters>(parameters));
// choose nonlinear optimization method
if (useLM)
return *optimizer.levenbergMarquardt().values();
else
return *optimizer.gaussNewton().values();
}
#endif
/**
* optimization that returns the values
*/
template<class G>
Values optimize(const G& graph, const Values& initialEstimate, const NonlinearOptimizationParameters& parameters,
const LinearSolver& solver,
const NonlinearOptimizationMethod& nonlinear_method) {
switch (solver) {
case SEQUENTIAL:
return optimizeSequential<G>(graph, initialEstimate, parameters,
nonlinear_method == LM);
case MULTIFRONTAL:
return optimizeMultiFrontal<G>(graph, initialEstimate, parameters,
nonlinear_method == LM);
#if ENABLE_SPCG
case SPCG:
// return optimizeSPCG<G,Values>(graph, initialEstimate, parameters,
// nonlinear_method == LM);
throw runtime_error("optimize: SPCG not supported yet due to the specific pose constraint");
#endif
}
throw runtime_error("optimize: undefined solver");
}
} //namespace gtsam

View File

@ -1,55 +0,0 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file NonlinearOptimization.h
* @date Oct 14, 2010
* @author Kai Ni
* @brief Easy interfaces for NonlinearOptimizer
*/
#pragma once
#include <gtsam/nonlinear/NonlinearOptimizationParameters.h>
namespace gtsam {
/**
* all the nonlinear optimization methods
*/
typedef enum {
LM, // Levenberg Marquardt
GN // Gaussian-Newton
} NonlinearOptimizationMethod;
/**
* all the linear solver types
*/
typedef enum {
SEQUENTIAL, // Sequential elimination
MULTIFRONTAL, // Multi-frontal elimination
#if ENABLE_SPCG
SPCG, // Subgraph Preconditioned Conjugate Gradient
#endif
} LinearSolver;
/**
* optimization that returns the values
*/
template<class G>
Values optimize(const G& graph, const Values& initialEstimate,
const NonlinearOptimizationParameters& parameters = NonlinearOptimizationParameters(),
const LinearSolver& solver = MULTIFRONTAL,
const NonlinearOptimizationMethod& nonlinear_method = LM);
}
#include <gtsam/nonlinear/NonlinearOptimization-inl.h>

View File

@ -1,121 +0,0 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file NonlinearOptimizationParameters.cpp
* @date Jan 28, 2012
* @author Alex Cunningham
* @brief Implements parameters structure
*/
#include <iostream>
#include <boost/make_shared.hpp>
#include <gtsam/nonlinear/NonlinearOptimizationParameters.h>
namespace gtsam {
using namespace std;
/* ************************************************************************* */
NonlinearOptimizationParameters::NonlinearOptimizationParameters() :
absDecrease_(1e-6), relDecrease_(1e-6), sumError_(0.0), maxIterations_(
100), lambda_(1e-5), lambdaFactor_(10.0), verbosity_(SILENT), lambdaMode_(
BOUNDED), useQR_(false) {
}
/* ************************************************************************* */
NonlinearOptimizationParameters::NonlinearOptimizationParameters(double absDecrease, double relDecrease,
double sumError, int iIters, double lambda,
double lambdaFactor, verbosityLevel v,
LambdaMode lambdaMode, bool useQR) :
absDecrease_(absDecrease), relDecrease_(relDecrease), sumError_(
sumError), maxIterations_(iIters), lambda_(lambda), lambdaFactor_(
lambdaFactor), verbosity_(v), lambdaMode_(lambdaMode), useQR_(useQR) {
}
/* ************************************************************************* */
NonlinearOptimizationParameters::NonlinearOptimizationParameters(
const NonlinearOptimizationParameters &parameters) :
absDecrease_(parameters.absDecrease_), relDecrease_(
parameters.relDecrease_), sumError_(parameters.sumError_), maxIterations_(
parameters.maxIterations_), lambda_(parameters.lambda_), lambdaFactor_(
parameters.lambdaFactor_), verbosity_(parameters.verbosity_), lambdaMode_(
parameters.lambdaMode_), useQR_(parameters.useQR_) {
}
/* ************************************************************************* */
void NonlinearOptimizationParameters::print(const std::string& s) const {
cout << "NonlinearOptimizationParameters " << s << endl;
cout << "absolute decrease threshold: " << absDecrease_ << endl;
cout << "relative decrease threshold: " << relDecrease_ << endl;
cout << " error sum threshold: " << sumError_ << endl;
cout << " maximum nr. of iterations: " << maxIterations_ << endl;
cout << " initial lambda value: " << lambda_ << endl;
cout << " factor to multiply lambda: " << lambdaFactor_ << endl;
cout << " verbosity level: " << verbosity_ << endl;
cout << " lambda mode: " << lambdaMode_ << endl;
cout << " use QR: " << useQR_ << endl;
}
/* ************************************************************************* */
NonlinearOptimizationParameters::shared_ptr
NonlinearOptimizationParameters::newLambda_(double lambda) const {
shared_ptr ptr(
boost::make_shared < NonlinearOptimizationParameters > (*this));
ptr->lambda_ = lambda;
return ptr;
}
/* ************************************************************************* */
NonlinearOptimizationParameters::shared_ptr
NonlinearOptimizationParameters::newVerbosity(verbosityLevel verbosity) {
shared_ptr ptr(boost::make_shared<NonlinearOptimizationParameters>());
ptr->verbosity_ = verbosity;
return ptr;
}
/* ************************************************************************* */
NonlinearOptimizationParameters::shared_ptr
NonlinearOptimizationParameters::newMaxIterations(int maxIterations) {
shared_ptr ptr(boost::make_shared<NonlinearOptimizationParameters>());
ptr->maxIterations_ = maxIterations;
return ptr;
}
/* ************************************************************************* */
NonlinearOptimizationParameters::shared_ptr
NonlinearOptimizationParameters::newLambda(double lambda) {
shared_ptr ptr(boost::make_shared<NonlinearOptimizationParameters>());
ptr->lambda_ = lambda;
return ptr;
}
/* ************************************************************************* */
NonlinearOptimizationParameters::shared_ptr
NonlinearOptimizationParameters::newDecreaseThresholds(double absDecrease,
double relDecrease) {
shared_ptr ptr(boost::make_shared<NonlinearOptimizationParameters>());
ptr->absDecrease_ = absDecrease;
ptr->relDecrease_ = relDecrease;
return ptr;
}
/* ************************************************************************* */
NonlinearOptimizationParameters::shared_ptr
NonlinearOptimizationParameters::newFactorization(bool useQR) {
shared_ptr ptr(boost::make_shared<NonlinearOptimizationParameters>());
ptr->useQR_ = useQR;
return ptr;
}
} // \namespace gtsam

View File

@ -1,138 +0,0 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file NonlinearOptimizationParameters.h
* @date Oct 14, 2010
* @author Kai Ni
* @brief structure to store parameters for nonlinear optimization
*/
#pragma once
#include <string>
#include <boost/shared_ptr.hpp>
#include <boost/serialization/nvp.hpp>
namespace gtsam {
/**
* a container for all related parameters
* \nosubgrouping
*/
struct NonlinearOptimizationParameters {
typedef boost::shared_ptr<NonlinearOptimizationParameters> shared_ptr;
typedef NonlinearOptimizationParameters This;
double absDecrease_; ///< threshold for the absolute decrease per iteration
/**
* Relative decrease threshold, where relative error = (new-current)/current.
* This can be set to 0 if there is a possibility for negative error values.
*/
double relDecrease_; ///< threshold for the relative decrease per iteration
double sumError_; ///< threshold for the sum of error
size_t maxIterations_; ///< maximum number of iterations
double lambda_; ///< starting lambda value
double lambdaFactor_; ///< factor by which lambda is multiplied
/// verbosity levels
typedef enum {
SILENT,
ERROR,
LAMBDA,
TRYLAMBDA,
VALUES,
DELTA,
TRYCONFIG,
TRYDELTA,
LINEAR,
DAMPED
} verbosityLevel;
verbosityLevel verbosity_; ///< verbosity
/// trust-region method mode
typedef enum {
FAST, BOUNDED, CAUTIOUS
} LambdaMode;
LambdaMode lambdaMode_; ///<trust-region method mode
/// if true, solve whole system with QR, otherwise use LDL when possible
bool useQR_;
/// @name Standard Constructors
/// @{
/// Default constructor
NonlinearOptimizationParameters();
/// Constructor from doubles
NonlinearOptimizationParameters(double absDecrease, double relDecrease,
double sumError, int iIters = 100, double lambda = 1e-5,
double lambdaFactor = 10, verbosityLevel v = SILENT,
LambdaMode lambdaMode = BOUNDED, bool useQR = false);
/// Copy constructor
NonlinearOptimizationParameters(
const NonlinearOptimizationParameters &parameters);
/// @}
/// @name Standard Interface
/// @{
/// print
void print(const std::string& s="") const;
/// a copy of old instance except new lambda
shared_ptr newLambda_(double lambda) const;
/// @}
/// @name Advanced Interface
/// @{
/// a copy of old instance except new verbosity
static shared_ptr newVerbosity(verbosityLevel verbosity);
/// a copy of old instance except new maxIterations
static shared_ptr newMaxIterations(int maxIterations);
/// a copy of old instance except new lambda
static shared_ptr newLambda(double lambda);
/// a copy of old instance except new thresholds
static shared_ptr newDecreaseThresholds(double absDecrease,
double relDecrease);
/// a copy of old instance except new QR flag
static shared_ptr newFactorization(bool useQR);
/// @}
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int version)
{
ar & BOOST_SERIALIZATION_NVP(absDecrease_);
ar & BOOST_SERIALIZATION_NVP(relDecrease_);
ar & BOOST_SERIALIZATION_NVP(sumError_);
ar & BOOST_SERIALIZATION_NVP(maxIterations_);
ar & BOOST_SERIALIZATION_NVP(lambda_);
ar & BOOST_SERIALIZATION_NVP(lambdaFactor_);
ar & BOOST_SERIALIZATION_NVP(verbosity_);
ar & BOOST_SERIALIZATION_NVP(lambdaMode_);
ar & BOOST_SERIALIZATION_NVP(useQR_);
}
};
}

View File

@ -24,22 +24,50 @@ using namespace std;
namespace gtsam { namespace gtsam {
/* ************************************************************************* */
auto_ptr NonlinearOptimizer::defaultOptimize() const {
bool check_convergence ( double currentError = this->error();
const NonlinearOptimizationParameters &parameters,
double currentError, double newError) { // check if we're already close enough
return check_convergence(parameters.relDecrease_, if(currentError <= params_->errorTol) {
parameters.absDecrease_, if (params_->verbosity >= NonlinearOptimizerParams::ERROR)
parameters.sumError_, cout << "Exiting, as error = " << currentError << " < " << errorTol << endl;
currentError, newError, return this->clone();
parameters.verbosity_) ;
} }
bool check_convergence( // Return if we already have too many iterations
double relativeErrorTreshold, if(this->iterations() >= params_->maxIterations)
double absoluteErrorTreshold, return this->clone();
double errorThreshold,
double currentError, double newError, int verbosity) { // Iterative loop
auto_ptr next = this->iterate(); // First iteration happens here
while(next->iterations() < params_->maxIterations &&
!checkConvergence(params_->relativeErrorTol, params_->absoluteErrorTol,
params_->errorTol, currentError, next->error(), params_->verbosity)) {
// Do next iteration
currentError = next->error();
next = next->iterate();
}
// Printing if verbose
if (params_->verbosity >= NonlinearOptimizerParams::VALUES)
next->values()->print("final values");
if (params_->verbosity >= NonlinearOptimizerParams::ERROR &&
next->iterations() >= params_->maxIterations)
cout << "Terminating because reached maximum iterations" << endl;
if (params_->verbosity >= NonlinearOptimizerParams::ERROR)
cout << "final error: " << next->error() << endl;
// Return optimizer from final iteration
return next;
}
/* ************************************************************************* */
bool checkConvergence(double relativeErrorTreshold, double absoluteErrorTreshold,
double errorThreshold, double currentError, double newError,
NonlinearOptimizerParams::Verbosity verbosity) {
if ( verbosity >= 2 ) { if ( verbosity >= 2 ) {
if ( newError <= errorThreshold ) if ( newError <= errorThreshold )

View File

@ -18,442 +18,201 @@
#pragma once #pragma once
#include <gtsam/linear/GaussianMultifrontalSolver.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h> #include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/nonlinear/NonlinearOptimizationParameters.h> #include <gtsam/nonlinear/NonlinearOptimizationParameters.h>
namespace gtsam { namespace gtsam {
class NullOptimizerWriter { /** The common parameters for Nonlinear optimizers. Most optimizers
public: * deriving from NonlinearOptimizer also subclass the parameters.
NullOptimizerWriter(double error) {} ///Contructor
virtual ~NullOptimizerWriter() {}
virtual void write(double error) {} ///Capturing the values of the parameters after the optimization
}; ///
/**
* The class NonlinearOptimizer encapsulates an optimization state.
* Typically it is instantiated with a NonlinearFactorGraph and initial values
* and then one of the optimization routines is called. These iterate
* until convergence. All methods are functional and return a new state.
*
* The class is parameterized by the Graph type $G$, Values class type $Values$,
* linear system class $L$, the non linear solver type $S$, and the writer type $W$
*
* The values class type $Values$ is in order to be able to optimize over non-vector values structures.
*
* A nonlinear system solver $S$
* Concept NonLinearSolver<G,Values,L> implements
* linearize: G * Values -> L
* solve : L -> Values
*
* The writer $W$ generates output to disk or the screen.
*
* For example, in a 2D case, $G$ can be pose2SLAM::Graph, $Values$ can be Pose2Values,
* $L$ can be GaussianFactorGraph and $S$ can be Factorization<pose2SLAM::Graph, Pose2Values>.
* The solver class has two main functions: linearize and optimize. The first one
* linearizes the nonlinear cost function around the current estimate, and the second
* one optimizes the linearized system using various methods.
*
* To use the optimizer in code, include <gtsam/NonlinearOptimizer-inl.h> in your cpp file
* \nosubgrouping
*/ */
template<class G, class L = GaussianFactorGraph, class GS = GaussianMultifrontalSolver, class W = NullOptimizerWriter> class NonlinearOptimizerParams {
class NonlinearOptimizer {
public: public:
// For performance reasons in recursion, we store values in a shared_ptr /** Control the printing verbosity */
typedef boost::shared_ptr<const Values> shared_values; ///Prevent memory leaks in Values enum Verbosity {
typedef boost::shared_ptr<const G> shared_graph; /// Prevent memory leaks in Graph SILENT,
typedef boost::shared_ptr<L> shared_linear; /// Not sure ERROR,
typedef boost::shared_ptr<const Ordering> shared_ordering; ///ordering parameters LAMBDA,
typedef boost::shared_ptr<GS> shared_solver; /// Solver TRYLAMBDA,
typedef NonlinearOptimizationParameters Parameters; ///These take the parameters defined in NonLinearOptimizationParameters.h VALUES,
typedef boost::shared_ptr<const Parameters> shared_parameters ; /// DELTA,
typedef boost::shared_ptr<VariableIndex> shared_structure; // TODO: make this const TRYCONFIG,
TRYDELTA,
private: LINEAR,
DAMPED
typedef NonlinearOptimizer<G, L, GS> This; };
typedef boost::shared_ptr<const std::vector<size_t> > shared_dimensions;
int maxIterations; ///< The maximum iterations to stop iterating (default 100)
/// keep a reference to const version of the graph double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5)
/// These normally do not change double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 0.0)
const shared_graph graph_; double errorTol; ///< The maximum total error to stop iterating (default 0.0)
Verbosity verbosity; ///< The printing verbosity during optimization (default SILENT)
// keep a values structure and its error
// These typically change once per iteration (in a functional way) NonlinearOptimizerParams() :
shared_values values_; maxIterations(100.0), relativeErrorTol(1e-5), absoluteErrorTol(0.0),
errorTol(0.0), verbosity(SILENT) {}
// current error for this state
double error_; void print(const std::string& str = "") const {
cout << s << "\n";
// the variable ordering cout << "relative decrease threshold: " << relativeErrorTol << "\n";
const shared_ordering ordering_; cout << "absolute decrease threshold: " << absoluteErrorTol << "\n";
cout << " total error threshold: " << errorTol << "\n";
// storage for parameters, lambda, thresholds, etc. cout << " maximum iterations: " << maxIterations << "\n";
shared_parameters parameters_; cout << " verbosity level: " << verbosity << "\n";
// for performance track
size_t iterations_;
// The dimensions of each linearized variable
const shared_dimensions dimensions_;
// storage of structural components that don't change between iterations
// used at creation of solvers in each iteration
// TODO: make this structure component specific to solver type
const shared_structure structure_;
// solver used only for SPCG
// FIXME: remove this!
shared_solver spcg_solver_;
/// @name Advanced Constructors
/// @{
/**
* Constructor that does not do any computation
*/
NonlinearOptimizer(shared_graph graph, shared_values values, const double error,
shared_ordering ordering, shared_parameters parameters, shared_dimensions dimensions,
size_t iterations, shared_structure structure)
: graph_(graph), values_(values), error_(error), ordering_(ordering),
parameters_(parameters), iterations_(iterations), dimensions_(dimensions),
structure_(structure) {}
/** constructors to replace specific components */
///TODO: comment
This newValues_(shared_values newValues) const {
return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues),
ordering_, parameters_, dimensions_, iterations_, structure_); }
///TODO: comment
This newValuesErrorLambda_(shared_values newValues, double newError, double newLambda) const {
return NonlinearOptimizer(graph_, newValues, newError, ordering_,
parameters_->newLambda_(newLambda), dimensions_, iterations_, structure_); }
///TODO: comment
This newIterations_(int iterations) const {
return NonlinearOptimizer(graph_, values_, error_, ordering_, parameters_, dimensions_,
iterations, structure_); }
///TODO: comment
This newLambda_(double newLambda) const {
return NonlinearOptimizer(graph_, values_, error_, ordering_,
parameters_->newLambda_(newLambda), dimensions_, iterations_, structure_); }
///TODO: comment
This newValuesLambda_(shared_values newValues, double newLambda) const {
return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues),
ordering_, parameters_->newLambda_(newLambda), dimensions_, iterations_, structure_); }
///TODO: comment
This newParameters_(shared_parameters parameters) const {
return NonlinearOptimizer(graph_, values_, error_, ordering_, parameters, dimensions_,
iterations_, structure_); }
/// @}
public:
/// @name Standard Constructors
/// @{
/**
* Constructor that evaluates new error
*/
NonlinearOptimizer(shared_graph graph,
shared_values values,
shared_ordering ordering,
shared_parameters parameters = boost::make_shared<Parameters>());
/**
* Constructor that takes a solver directly - only useful for SPCG
* FIXME: REMOVE THIS FUNCTION!
*/
NonlinearOptimizer(shared_graph graph,
shared_values values,
shared_ordering ordering,
shared_solver spcg_solver,
shared_parameters parameters = boost::make_shared<Parameters>());
/**
* Copy constructor
*/
NonlinearOptimizer(const NonlinearOptimizer<G, L, GS> &optimizer) :
graph_(optimizer.graph_), values_(optimizer.values_), error_(optimizer.error_),
ordering_(optimizer.ordering_), parameters_(optimizer.parameters_),
iterations_(optimizer.iterations_), dimensions_(optimizer.dimensions_), structure_(optimizer.structure_) {}
// access to member variables
/// @}
/// @name Standard Interface
/// @{
/// print
void print(const std::string& s="") const {
cout << "NonlinearOptimizer " << s << endl;
cout << " current error: " << error_ << endl;
cout << "iterations count: " << iterations_ << endl;
}
/** Return current error */
double error() const { return error_; }
/** Return current lambda */
double lambda() const { return parameters_->lambda_; }
/** Return the values */
shared_values values() const { return values_; }
/** Return the graph */
shared_graph graph() const { return graph_; }
/** Return the itertions */
size_t iterations() const { return iterations_; }
/** Return the ordering */
shared_ordering ordering() const { return ordering_; }
/** Return the parameters */
shared_parameters parameters() const { return parameters_; }
/** Return the structure variable (variable index) */
shared_structure structure() const { return structure_; }
/**
* Return a linearized graph at the current graph/values/ordering
*/
shared_linear linearize() const {
return shared_linear(new L(*graph_->linearize(*values_, *ordering_)));
}
/**
* create a solver around the current graph/values
* NOTE: this will actually solve a linear system
*/
shared_solver createSolver() const {
return shared_solver(new GS(linearize(), structure_, parameters_->useQR_));
}
/**
* Return mean and covariance on a single variable
*/
Matrix marginalCovariance(Key j) const {
return createSolver()->marginalCovariance((*ordering_)[j]);
}
/**
* linearize and optimize
* This returns an VectorValues, i.e., vectors in tangent space of Values
*/
VectorValues linearizeAndOptimizeForDelta() const {
return *createSolver()->optimize();
}
/**
* Do one Gauss-Newton iteration and return next state
* suggested interface
*/
NonlinearOptimizer iterate() const;
///
///Optimize a solution for a non linear factor graph
///param relativeTreshold
///@param absoluteTreshold
///@param verbosity Integer specifying how much output to provide
///
// suggested interface
NonlinearOptimizer gaussNewton() const;
/// @}
/// @name Advanced Interface
/// @{
/** Recursively try to do tempered Gauss-Newton steps until we succeed */
NonlinearOptimizer try_lambda(const L& linear);
/**
* One iteration of Levenberg Marquardt
*/
NonlinearOptimizer iterateLM();
///
///Optimize using Levenberg-Marquardt. Really Levenberg's
///algorithm at this moment, as we just add I*\lambda to Hessian
///H'H. The probabilistic explanation is very simple: every
///variable gets an extra Gaussian prior that biases staying at
///current value, with variance 1/lambda. This is done very easily
///(but perhaps wastefully) by adding a prior factor for each of
///the variables, after linearization.
///
///@param relativeThreshold
///@param absoluteThreshold
///@param verbosity Integer specifying how much output to provide
///@param lambdaFactor Factor by which to decrease/increase lambda
///
NonlinearOptimizer levenbergMarquardt();
/**
* One iteration of the dog leg algorithm
*/
NonlinearOptimizer iterateDogLeg();
/**
* Optimize using the Dog Leg algorithm
*/
NonlinearOptimizer dogLeg();
// static interfaces to LM, Dog leg, and GN optimization techniques
///Static interface to Dog leg optimization using default ordering
///@param graph Nonlinear factor graph to optimize
///@param values Initial values
///@param parameters Optimization parameters
///@return an optimized values structure
static shared_values optimizeLM(shared_graph graph,
shared_values values,
shared_parameters parameters = boost::make_shared<Parameters>()) {
// Use a variable ordering from COLAMD
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values);
// initial optimization state is the same in both cases tested
//GS solver(*graph->linearize(*values, *ordering));
NonlinearOptimizer optimizer(graph, values, ordering, parameters);
NonlinearOptimizer result = optimizer.levenbergMarquardt();
return result.values();
}
///TODO: comment
static shared_values optimizeLM(shared_graph graph,
shared_values values,
Parameters::verbosityLevel verbosity) {
return optimizeLM(graph, values, Parameters::newVerbosity(verbosity));
}
/**
* Static interface to LM optimization (no shared_ptr arguments) - see above
*/
static shared_values optimizeLM(const G& graph,
const Values& values,
const Parameters parameters = Parameters()) {
return optimizeLM(boost::make_shared<const G>(graph),
boost::make_shared<const Values>(values),
boost::make_shared<Parameters>(parameters));
}
///TODO: comment
static shared_values optimizeLM(const G& graph,
const Values& values,
Parameters::verbosityLevel verbosity) {
return optimizeLM(boost::make_shared<const G>(graph),
boost::make_shared<const Values>(values),
verbosity);
}
///Static interface to Dog leg optimization using default ordering
///@param graph Nonlinear factor graph to optimize
///@param values Initial values
///@param parameters Optimization parameters
///@return an optimized values structure
static shared_values optimizeDogLeg(shared_graph graph,
shared_values values,
shared_parameters parameters = boost::make_shared<Parameters>()) {
// Use a variable ordering from COLAMD
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values);
// initial optimization state is the same in both cases tested
//GS solver(*graph->linearize(*values, *ordering));
NonlinearOptimizer optimizer(graph, values, ordering, parameters);
NonlinearOptimizer result = optimizer.dogLeg();
return result.values();
}
///
///Static interface to Dog leg optimization using default ordering and thresholds
///@param graph Nonlinear factor graph to optimize
///@param values Initial values
///@param verbosity Integer specifying how much output to provide
///@return an optimized values structure
///
static shared_values optimizeDogLeg(shared_graph graph,
shared_values values,
Parameters::verbosityLevel verbosity) {
return optimizeDogLeg(graph, values, Parameters::newVerbosity(verbosity)->newLambda_(1.0));
}
/**
* Static interface to Dogleg optimization (no shared_ptr arguments) - see above
*/
static shared_values optimizeDogLeg(const G& graph,
const Values& values,
const Parameters parameters = Parameters()) {
return optimizeDogLeg(boost::make_shared<const G>(graph),
boost::make_shared<const Values>(values),
boost::make_shared<Parameters>(parameters));
}
///TODO: comment
static shared_values optimizeDogLeg(const G& graph,
const Values& values,
Parameters::verbosityLevel verbosity) {
return optimizeDogLeg(boost::make_shared<const G>(graph),
boost::make_shared<const Values>(values),
verbosity);
}
///
///Static interface to GN optimization using default ordering and thresholds
///@param graph Nonlinear factor graph to optimize
///@param values Initial values
///@param verbosity Integer specifying how much output to provide
///@return an optimized values structure
///
static shared_values optimizeGN(shared_graph graph,
shared_values values,
shared_parameters parameters = boost::make_shared<Parameters>()) {
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values);
// initial optimization state is the same in both cases tested
GS solver(*graph->linearize(*values, *ordering));
NonlinearOptimizer optimizer(graph, values, ordering, parameters);
NonlinearOptimizer result = optimizer.gaussNewton();
return result.values();
}
/**
* Static interface to GN optimization (no shared_ptr arguments) - see above
*/
static shared_values optimizeGN(const G& graph, const Values& values, const Parameters parameters = Parameters()) {
return optimizeGN(boost::make_shared<const G>(graph),
boost::make_shared<const Values>(values),
boost::make_shared<Parameters>(parameters));
} }
}; };
/** /**
* Check convergence * This is the abstract interface for classes that can optimize for the
* maximum-likelihood estimate of a NonlinearFactorGraph.
*
* To use a class derived from this interface, construct the class with a
* NonlinearFactorGraph and an initial variable assignment. Next, call the
* optimize() method, which returns a new NonlinearOptimizer object containing
* the optimized variable assignment. Call the values() method to retrieve the
* optimized estimate.
*
* Note: This class is immutable, optimize() and iterate() return new
* NonlinearOptimizer objects, so be sure to use the returned object and not
* simply keep the unchanged original.
*
* Example:
* \code
NonlinearOptimizer::shared_ptr optimizer = DoglegOptimizer::Create(graph, initialValues);
optimizer = optimizer->optimizer();
Values result = optimizer->values();
useTheResult(result);
\endcode
*
* Equivalent, but more compact:
* \code
useTheResult(DoglegOptimizer(graph, initialValues).optimize()->values());
\endcode
*
* This interface also exposes an iterate() method, which performs one
* iteration, returning a NonlinearOptimizer containing the adjusted variable
* assignment. The optimize() method simply calls iterate() multiple times,
* until the error changes less than a threshold. We expose iterate() so that
* you can easily control what happens between iterations, such as drawing or
* printing, moving points from behind the camera to in front, etc.
*
* To modify the graph, values, or parameters between iterations, call the
* update() functions, which preserve all other state (for example, the trust
* region size in DoglegOptimizer). Derived optimizer classes also have
* additional update methods, not in this abstract interface, for updating
* algorithm-specific state.
*
* For more flexibility, since all functions are virtual, you may override them
* in your own derived class.
*/ */
bool check_convergence ( class NonlinearOptimizer {
double relativeErrorTreshold,
double absoluteErrorTreshold,
double errorThreshold,
double currentError, double newError, int verbosity);
///TODO: comment public:
bool check_convergence (
const NonlinearOptimizationParameters &parameters,
double currentError, double newError);
} // gtsam /** An auto pointer to this class */
typedef std::auto_ptr<NonlinearOptimizer> auto_ptr;
/** A const shared_ptr to a NonlinearFactorGraph */
typedef boost::shared_ptr<const NonlinearFactorGraph> SharedGraph;
/** A const shared_ptr to a NonlinearFactorGraph */
typedef boost::shared_ptr<const Values> SharedValues;
/** A const shared_ptr to the parameters */
typedef boost::shared_ptr<const NonlinearOptimizerParams> SharedParams;
protected:
const SharedGraph graph_;
const SharedValues values_;
const SharedParams params_;
const double error_;
const int iterations_;
NonlinearOptimizer(const SharedGraph& graph, const SharedValues& values,
const SharedParams& params) :
graph_(graph), values_(values), params_(params),
error_(graph_->error(*values_)), iterations_(0) {}
NonlinearOptimizer(const SharedGraph& graph, const SharedValues& values,
const SharedParams& params, double error, int iterations) :
graph_(graph), values_(values), params_(params),
error_(error), iterations_(iterations) {}
public:
/// @name Standard interface
/// @{
/** Optimize for the maximum-likelihood estimate, returning a new
* NonlinearOptimizer class containing the optimized variable assignments,
* which may be retrieved with values().
*/
virtual auto_ptr optimize() const { return defaultOptimize(); }
/** Retrieve the current variable assignment estimate. */
virtual const SharedValues& values() const { return values_; }
/** Retrieve the parameters. */
virtual const SharedParams& params() const { return params_; }
/** Return the current factor graph error */
virtual double error() const { return error_; }
/** Return the number of iterations that have been performed */
virtual int iterations() const { return iterations_; }
/// @} /// @}
#include <gtsam/nonlinear/NonlinearOptimizer-inl.h> /// @name Advanced interface
/// @{
/** Perform a single iteration, returning a new NonlinearOptimizer class
* containing the updated variable assignments, which may be retrieved with
* values().
*/
virtual auto_ptr iterate() const = 0;
/** Update the nonlinear factor graph, leaving all other state the same.
* Returns a new updated NonlinearOptimzier object, the original is not
* modified.
*/
virtual auto_ptr update(const SharedGraph& newGraph) const = 0;
/** Update the variable assignments, leaving all other state the same.
* Returns a new updated NonlinearOptimzier object, the original is not
* modified.
*/
virtual auto_ptr update(const SharedValues& newValues) const = 0;
/** Update the parameters, leaving all other state the same.
* Returns a new updated NonlinearOptimzier object, the original is not
* modified.
*/
virtual auto_ptr update(const SharedParams& newParams) const = 0;
/** Create a copy of the NonlinearOptimizer */
virtual auto_ptr clone() const = 0;
/// @}
protected:
/** A default implementation of the optimization loop, which calls iterate()
* until checkConvergence returns true.
*/
auto_ptr defaultOptimize() const;
};
/** Check whether the relative error decrease is less than relativeErrorTreshold,
* the absolute error decrease is less than absoluteErrorTreshold, <emph>or</emph>
* the error itself is less than errorThreshold.
*/
bool checkConvergence(double relativeErrorTreshold,
double absoluteErrorTreshold, double errorThreshold,
double currentError, double newError, NonlinearOptimizerParams::Verbosity verbosity);
} // gtsam