From 0309b6b1849443f318b29e2245e0f8b5f41d402d Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Sun, 26 Feb 2012 02:44:58 +0000 Subject: [PATCH] (in branch) NonlinearOptimizer base class --- gtsam/nonlinear/NonlinearOptimization-inl.h | 134 ---- gtsam/nonlinear/NonlinearOptimization.h | 55 -- .../NonlinearOptimizationParameters.cpp | 121 ---- .../NonlinearOptimizationParameters.h | 138 ---- gtsam/nonlinear/NonlinearOptimizer.cpp | 54 +- gtsam/nonlinear/NonlinearOptimizer.h | 603 ++++++------------ 6 files changed, 222 insertions(+), 883 deletions(-) delete mode 100644 gtsam/nonlinear/NonlinearOptimization-inl.h delete mode 100644 gtsam/nonlinear/NonlinearOptimization.h delete mode 100644 gtsam/nonlinear/NonlinearOptimizationParameters.cpp delete mode 100644 gtsam/nonlinear/NonlinearOptimizationParameters.h diff --git a/gtsam/nonlinear/NonlinearOptimization-inl.h b/gtsam/nonlinear/NonlinearOptimization-inl.h deleted file mode 100644 index 361de59e2..000000000 --- a/gtsam/nonlinear/NonlinearOptimization-inl.h +++ /dev/null @@ -1,134 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file NonlinearOptimization-inl.h - * @date Oct 17, 2010 - * @author Kai Ni - * @brief Easy interfaces for NonlinearOptimizer - */ - -#pragma once - -#include -#include - -#if ENABLE_SPCG -#include -#endif - -#include - -using namespace std; - -namespace gtsam { - - /** - * The Elimination solver - */ - template - Values optimizeSequential(const G& graph, const Values& initialEstimate, - const NonlinearOptimizationParameters& parameters, bool useLM) { - - // Use a variable ordering from COLAMD - Ordering::shared_ptr ordering = graph.orderingCOLAMD(initialEstimate); - - // Create an nonlinear Optimizer that uses a Sequential Solver - typedef NonlinearOptimizer Optimizer; - Optimizer optimizer(boost::make_shared(graph), - boost::make_shared(initialEstimate), ordering, - boost::make_shared(parameters)); - - // Now optimize using either LM or GN methods. - if (useLM) - return *optimizer.levenbergMarquardt().values(); - else - return *optimizer.gaussNewton().values(); - } - - /** - * The multifrontal solver - */ - template - Values optimizeMultiFrontal(const G& graph, const Values& initialEstimate, - const NonlinearOptimizationParameters& parameters, bool useLM) { - - // Use a variable ordering from COLAMD - Ordering::shared_ptr ordering = graph.orderingCOLAMD(initialEstimate); - - // Create an nonlinear Optimizer that uses a Multifrontal Solver - typedef NonlinearOptimizer Optimizer; - Optimizer optimizer(boost::make_shared(graph), - boost::make_shared(initialEstimate), ordering, - boost::make_shared(parameters)); - - // now optimize using either LM or GN methods - if (useLM) - return *optimizer.levenbergMarquardt().values(); - else - return *optimizer.gaussNewton().values(); - } - -#if ENABLE_SPCG - /** - * The sparse preconditioned conjugate gradient solver - */ - template - Values optimizeSPCG(const G& graph, const Values& initialEstimate, - const NonlinearOptimizationParameters& parameters = NonlinearOptimizationParameters(), - bool useLM = true) { - - // initial optimization state is the same in both cases tested - typedef SubgraphSolver Solver; - typedef boost::shared_ptr shared_Solver; - typedef NonlinearOptimizer SPCGOptimizer; - shared_Solver solver = boost::make_shared( - graph, initialEstimate, IterativeOptimizationParameters()); - SPCGOptimizer optimizer( - boost::make_shared(graph), - boost::make_shared(initialEstimate), - solver->ordering(), - solver, - boost::make_shared(parameters)); - - // choose nonlinear optimization method - if (useLM) - return *optimizer.levenbergMarquardt().values(); - else - return *optimizer.gaussNewton().values(); - } -#endif - - /** - * optimization that returns the values - */ - template - Values optimize(const G& graph, const Values& initialEstimate, const NonlinearOptimizationParameters& parameters, - const LinearSolver& solver, - const NonlinearOptimizationMethod& nonlinear_method) { - switch (solver) { - case SEQUENTIAL: - return optimizeSequential(graph, initialEstimate, parameters, - nonlinear_method == LM); - case MULTIFRONTAL: - return optimizeMultiFrontal(graph, initialEstimate, parameters, - nonlinear_method == LM); -#if ENABLE_SPCG - case SPCG: -// return optimizeSPCG(graph, initialEstimate, parameters, -// nonlinear_method == LM); - throw runtime_error("optimize: SPCG not supported yet due to the specific pose constraint"); -#endif - } - throw runtime_error("optimize: undefined solver"); - } - -} //namespace gtsam diff --git a/gtsam/nonlinear/NonlinearOptimization.h b/gtsam/nonlinear/NonlinearOptimization.h deleted file mode 100644 index 6521ded8e..000000000 --- a/gtsam/nonlinear/NonlinearOptimization.h +++ /dev/null @@ -1,55 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file NonlinearOptimization.h - * @date Oct 14, 2010 - * @author Kai Ni - * @brief Easy interfaces for NonlinearOptimizer - */ - -#pragma once - -#include - -namespace gtsam { - - /** - * all the nonlinear optimization methods - */ - typedef enum { - LM, // Levenberg Marquardt - GN // Gaussian-Newton - } NonlinearOptimizationMethod; - - /** - * all the linear solver types - */ - typedef enum { - SEQUENTIAL, // Sequential elimination - MULTIFRONTAL, // Multi-frontal elimination -#if ENABLE_SPCG - SPCG, // Subgraph Preconditioned Conjugate Gradient -#endif - } LinearSolver; - - /** - * optimization that returns the values - */ - template - Values optimize(const G& graph, const Values& initialEstimate, - const NonlinearOptimizationParameters& parameters = NonlinearOptimizationParameters(), - const LinearSolver& solver = MULTIFRONTAL, - const NonlinearOptimizationMethod& nonlinear_method = LM); - -} - -#include diff --git a/gtsam/nonlinear/NonlinearOptimizationParameters.cpp b/gtsam/nonlinear/NonlinearOptimizationParameters.cpp deleted file mode 100644 index e80c3b59c..000000000 --- a/gtsam/nonlinear/NonlinearOptimizationParameters.cpp +++ /dev/null @@ -1,121 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file NonlinearOptimizationParameters.cpp - * @date Jan 28, 2012 - * @author Alex Cunningham - * @brief Implements parameters structure - */ - -#include - -#include - -#include - -namespace gtsam { - -using namespace std; - -/* ************************************************************************* */ -NonlinearOptimizationParameters::NonlinearOptimizationParameters() : - absDecrease_(1e-6), relDecrease_(1e-6), sumError_(0.0), maxIterations_( - 100), lambda_(1e-5), lambdaFactor_(10.0), verbosity_(SILENT), lambdaMode_( - BOUNDED), useQR_(false) { -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::NonlinearOptimizationParameters(double absDecrease, double relDecrease, - double sumError, int iIters, double lambda, - double lambdaFactor, verbosityLevel v, - LambdaMode lambdaMode, bool useQR) : - absDecrease_(absDecrease), relDecrease_(relDecrease), sumError_( - sumError), maxIterations_(iIters), lambda_(lambda), lambdaFactor_( - lambdaFactor), verbosity_(v), lambdaMode_(lambdaMode), useQR_(useQR) { -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::NonlinearOptimizationParameters( - const NonlinearOptimizationParameters ¶meters) : - absDecrease_(parameters.absDecrease_), relDecrease_( - parameters.relDecrease_), sumError_(parameters.sumError_), maxIterations_( - parameters.maxIterations_), lambda_(parameters.lambda_), lambdaFactor_( - parameters.lambdaFactor_), verbosity_(parameters.verbosity_), lambdaMode_( - parameters.lambdaMode_), useQR_(parameters.useQR_) { -} - -/* ************************************************************************* */ -void NonlinearOptimizationParameters::print(const std::string& s) const { - cout << "NonlinearOptimizationParameters " << s << endl; - cout << "absolute decrease threshold: " << absDecrease_ << endl; - cout << "relative decrease threshold: " << relDecrease_ << endl; - cout << " error sum threshold: " << sumError_ << endl; - cout << " maximum nr. of iterations: " << maxIterations_ << endl; - cout << " initial lambda value: " << lambda_ << endl; - cout << " factor to multiply lambda: " << lambdaFactor_ << endl; - cout << " verbosity level: " << verbosity_ << endl; - cout << " lambda mode: " << lambdaMode_ << endl; - cout << " use QR: " << useQR_ << endl; -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::shared_ptr -NonlinearOptimizationParameters::newLambda_(double lambda) const { - shared_ptr ptr( - boost::make_shared < NonlinearOptimizationParameters > (*this)); - ptr->lambda_ = lambda; - return ptr; -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::shared_ptr -NonlinearOptimizationParameters::newVerbosity(verbosityLevel verbosity) { - shared_ptr ptr(boost::make_shared()); - ptr->verbosity_ = verbosity; - return ptr; -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::shared_ptr -NonlinearOptimizationParameters::newMaxIterations(int maxIterations) { - shared_ptr ptr(boost::make_shared()); - ptr->maxIterations_ = maxIterations; - return ptr; -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::shared_ptr -NonlinearOptimizationParameters::newLambda(double lambda) { - shared_ptr ptr(boost::make_shared()); - ptr->lambda_ = lambda; - return ptr; -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::shared_ptr -NonlinearOptimizationParameters::newDecreaseThresholds(double absDecrease, - double relDecrease) { - shared_ptr ptr(boost::make_shared()); - ptr->absDecrease_ = absDecrease; - ptr->relDecrease_ = relDecrease; - return ptr; -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::shared_ptr -NonlinearOptimizationParameters::newFactorization(bool useQR) { - shared_ptr ptr(boost::make_shared()); - ptr->useQR_ = useQR; - return ptr; -} - -} // \namespace gtsam diff --git a/gtsam/nonlinear/NonlinearOptimizationParameters.h b/gtsam/nonlinear/NonlinearOptimizationParameters.h deleted file mode 100644 index 07e01ea58..000000000 --- a/gtsam/nonlinear/NonlinearOptimizationParameters.h +++ /dev/null @@ -1,138 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file NonlinearOptimizationParameters.h - * @date Oct 14, 2010 - * @author Kai Ni - * @brief structure to store parameters for nonlinear optimization - */ - -#pragma once - -#include -#include -#include - -namespace gtsam { - - /** - * a container for all related parameters - * \nosubgrouping - */ - struct NonlinearOptimizationParameters { - - typedef boost::shared_ptr shared_ptr; - typedef NonlinearOptimizationParameters This; - - double absDecrease_; ///< threshold for the absolute decrease per iteration - - /** - * Relative decrease threshold, where relative error = (new-current)/current. - * This can be set to 0 if there is a possibility for negative error values. - */ - double relDecrease_; ///< threshold for the relative decrease per iteration - - double sumError_; ///< threshold for the sum of error - size_t maxIterations_; ///< maximum number of iterations - double lambda_; ///< starting lambda value - double lambdaFactor_; ///< factor by which lambda is multiplied - - /// verbosity levels - typedef enum { - SILENT, - ERROR, - LAMBDA, - TRYLAMBDA, - VALUES, - DELTA, - TRYCONFIG, - TRYDELTA, - LINEAR, - DAMPED - } verbosityLevel; - verbosityLevel verbosity_; ///< verbosity - - /// trust-region method mode - typedef enum { - FAST, BOUNDED, CAUTIOUS - } LambdaMode; - LambdaMode lambdaMode_; /// - void serialize(ARCHIVE & ar, const unsigned int version) - { - ar & BOOST_SERIALIZATION_NVP(absDecrease_); - ar & BOOST_SERIALIZATION_NVP(relDecrease_); - ar & BOOST_SERIALIZATION_NVP(sumError_); - ar & BOOST_SERIALIZATION_NVP(maxIterations_); - ar & BOOST_SERIALIZATION_NVP(lambda_); - ar & BOOST_SERIALIZATION_NVP(lambdaFactor_); - ar & BOOST_SERIALIZATION_NVP(verbosity_); - ar & BOOST_SERIALIZATION_NVP(lambdaMode_); - ar & BOOST_SERIALIZATION_NVP(useQR_); - } - }; -} diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index 4d7d76bfc..e0c10bfe9 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -24,22 +24,50 @@ using namespace std; namespace gtsam { +/* ************************************************************************* */ +auto_ptr NonlinearOptimizer::defaultOptimize() const { -bool check_convergence ( - const NonlinearOptimizationParameters ¶meters, - double currentError, double newError) { - return check_convergence(parameters.relDecrease_, - parameters.absDecrease_, - parameters.sumError_, - currentError, newError, - parameters.verbosity_) ; + double currentError = this->error(); + + // check if we're already close enough + if(currentError <= params_->errorTol) { + if (params_->verbosity >= NonlinearOptimizerParams::ERROR) + cout << "Exiting, as error = " << currentError << " < " << errorTol << endl; + return this->clone(); + } + + // Return if we already have too many iterations + if(this->iterations() >= params_->maxIterations) + return this->clone(); + + // Iterative loop + auto_ptr next = this->iterate(); // First iteration happens here + while(next->iterations() < params_->maxIterations && + !checkConvergence(params_->relativeErrorTol, params_->absoluteErrorTol, + params_->errorTol, currentError, next->error(), params_->verbosity)) { + + // Do next iteration + currentError = next->error(); + next = next->iterate(); + } + + // Printing if verbose + if (params_->verbosity >= NonlinearOptimizerParams::VALUES) + next->values()->print("final values"); + if (params_->verbosity >= NonlinearOptimizerParams::ERROR && + next->iterations() >= params_->maxIterations) + cout << "Terminating because reached maximum iterations" << endl; + if (params_->verbosity >= NonlinearOptimizerParams::ERROR) + cout << "final error: " << next->error() << endl; + + // Return optimizer from final iteration + return next; } -bool check_convergence( - double relativeErrorTreshold, - double absoluteErrorTreshold, - double errorThreshold, - double currentError, double newError, int verbosity) { +/* ************************************************************************* */ +bool checkConvergence(double relativeErrorTreshold, double absoluteErrorTreshold, + double errorThreshold, double currentError, double newError, + NonlinearOptimizerParams::Verbosity verbosity) { if ( verbosity >= 2 ) { if ( newError <= errorThreshold ) diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index 4e41649e6..7981146d7 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -18,442 +18,201 @@ #pragma once -#include #include #include namespace gtsam { -class NullOptimizerWriter { -public: - NullOptimizerWriter(double error) {} ///Contructor - virtual ~NullOptimizerWriter() {} - virtual void write(double error) {} ///Capturing the values of the parameters after the optimization -}; /// - -/** - * The class NonlinearOptimizer encapsulates an optimization state. - * Typically it is instantiated with a NonlinearFactorGraph and initial values - * and then one of the optimization routines is called. These iterate - * until convergence. All methods are functional and return a new state. - * - * The class is parameterized by the Graph type $G$, Values class type $Values$, - * linear system class $L$, the non linear solver type $S$, and the writer type $W$ - * - * The values class type $Values$ is in order to be able to optimize over non-vector values structures. - * - * A nonlinear system solver $S$ - * Concept NonLinearSolver implements - * linearize: G * Values -> L - * solve : L -> Values - * - * The writer $W$ generates output to disk or the screen. - * - * For example, in a 2D case, $G$ can be pose2SLAM::Graph, $Values$ can be Pose2Values, - * $L$ can be GaussianFactorGraph and $S$ can be Factorization. - * The solver class has two main functions: linearize and optimize. The first one - * linearizes the nonlinear cost function around the current estimate, and the second - * one optimizes the linearized system using various methods. - * - * To use the optimizer in code, include in your cpp file - * \nosubgrouping +/** The common parameters for Nonlinear optimizers. Most optimizers + * deriving from NonlinearOptimizer also subclass the parameters. */ -template -class NonlinearOptimizer { +class NonlinearOptimizerParams { public: - // For performance reasons in recursion, we store values in a shared_ptr - typedef boost::shared_ptr shared_values; ///Prevent memory leaks in Values - typedef boost::shared_ptr shared_graph; /// Prevent memory leaks in Graph - typedef boost::shared_ptr shared_linear; /// Not sure - typedef boost::shared_ptr shared_ordering; ///ordering parameters - typedef boost::shared_ptr shared_solver; /// Solver - typedef NonlinearOptimizationParameters Parameters; ///These take the parameters defined in NonLinearOptimizationParameters.h - typedef boost::shared_ptr shared_parameters ; /// - typedef boost::shared_ptr shared_structure; // TODO: make this const + /** Control the printing verbosity */ + enum Verbosity { + SILENT, + ERROR, + LAMBDA, + TRYLAMBDA, + VALUES, + DELTA, + TRYCONFIG, + TRYDELTA, + LINEAR, + DAMPED + }; -private: + int maxIterations; ///< The maximum iterations to stop iterating (default 100) + double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5) + double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 0.0) + double errorTol; ///< The maximum total error to stop iterating (default 0.0) + Verbosity verbosity; ///< The printing verbosity during optimization (default SILENT) - typedef NonlinearOptimizer This; - typedef boost::shared_ptr > shared_dimensions; + NonlinearOptimizerParams() : + maxIterations(100.0), relativeErrorTol(1e-5), absoluteErrorTol(0.0), + errorTol(0.0), verbosity(SILENT) {} - /// keep a reference to const version of the graph - /// These normally do not change - const shared_graph graph_; - - // keep a values structure and its error - // These typically change once per iteration (in a functional way) - shared_values values_; - - // current error for this state - double error_; - - // the variable ordering - const shared_ordering ordering_; - - // storage for parameters, lambda, thresholds, etc. - shared_parameters parameters_; - - // for performance track - size_t iterations_; - - // The dimensions of each linearized variable - const shared_dimensions dimensions_; - - // storage of structural components that don't change between iterations - // used at creation of solvers in each iteration - // TODO: make this structure component specific to solver type - const shared_structure structure_; - - // solver used only for SPCG - // FIXME: remove this! - shared_solver spcg_solver_; - - /// @name Advanced Constructors - /// @{ - - /** - * Constructor that does not do any computation - */ - NonlinearOptimizer(shared_graph graph, shared_values values, const double error, - shared_ordering ordering, shared_parameters parameters, shared_dimensions dimensions, - size_t iterations, shared_structure structure) - : graph_(graph), values_(values), error_(error), ordering_(ordering), - parameters_(parameters), iterations_(iterations), dimensions_(dimensions), - structure_(structure) {} - - /** constructors to replace specific components */ - - ///TODO: comment - This newValues_(shared_values newValues) const { - return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), - ordering_, parameters_, dimensions_, iterations_, structure_); } - - ///TODO: comment - This newValuesErrorLambda_(shared_values newValues, double newError, double newLambda) const { - return NonlinearOptimizer(graph_, newValues, newError, ordering_, - parameters_->newLambda_(newLambda), dimensions_, iterations_, structure_); } - - ///TODO: comment - This newIterations_(int iterations) const { - return NonlinearOptimizer(graph_, values_, error_, ordering_, parameters_, dimensions_, - iterations, structure_); } - - ///TODO: comment - This newLambda_(double newLambda) const { - return NonlinearOptimizer(graph_, values_, error_, ordering_, - parameters_->newLambda_(newLambda), dimensions_, iterations_, structure_); } - - ///TODO: comment - This newValuesLambda_(shared_values newValues, double newLambda) const { - return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), - ordering_, parameters_->newLambda_(newLambda), dimensions_, iterations_, structure_); } - - ///TODO: comment - This newParameters_(shared_parameters parameters) const { - return NonlinearOptimizer(graph_, values_, error_, ordering_, parameters, dimensions_, - iterations_, structure_); } - - /// @} - -public: - - /// @name Standard Constructors - /// @{ - - /** - * Constructor that evaluates new error - */ - NonlinearOptimizer(shared_graph graph, - shared_values values, - shared_ordering ordering, - shared_parameters parameters = boost::make_shared()); - - /** - * Constructor that takes a solver directly - only useful for SPCG - * FIXME: REMOVE THIS FUNCTION! - */ - NonlinearOptimizer(shared_graph graph, - shared_values values, - shared_ordering ordering, - shared_solver spcg_solver, - shared_parameters parameters = boost::make_shared()); - - /** - * Copy constructor - */ - NonlinearOptimizer(const NonlinearOptimizer &optimizer) : - graph_(optimizer.graph_), values_(optimizer.values_), error_(optimizer.error_), - ordering_(optimizer.ordering_), parameters_(optimizer.parameters_), - iterations_(optimizer.iterations_), dimensions_(optimizer.dimensions_), structure_(optimizer.structure_) {} - - // access to member variables - - /// @} - /// @name Standard Interface - /// @{ - - /// print - void print(const std::string& s="") const { - cout << "NonlinearOptimizer " << s << endl; - cout << " current error: " << error_ << endl; - cout << "iterations count: " << iterations_ << endl; - } - - /** Return current error */ - double error() const { return error_; } - - /** Return current lambda */ - double lambda() const { return parameters_->lambda_; } - - /** Return the values */ - shared_values values() const { return values_; } - - /** Return the graph */ - shared_graph graph() const { return graph_; } - - /** Return the itertions */ - size_t iterations() const { return iterations_; } - - /** Return the ordering */ - shared_ordering ordering() const { return ordering_; } - - /** Return the parameters */ - shared_parameters parameters() const { return parameters_; } - - /** Return the structure variable (variable index) */ - shared_structure structure() const { return structure_; } - - /** - * Return a linearized graph at the current graph/values/ordering - */ - shared_linear linearize() const { - return shared_linear(new L(*graph_->linearize(*values_, *ordering_))); - } - - /** - * create a solver around the current graph/values - * NOTE: this will actually solve a linear system - */ - shared_solver createSolver() const { - return shared_solver(new GS(linearize(), structure_, parameters_->useQR_)); - } - - /** - * Return mean and covariance on a single variable - */ - Matrix marginalCovariance(Key j) const { - return createSolver()->marginalCovariance((*ordering_)[j]); - } - - /** - * linearize and optimize - * This returns an VectorValues, i.e., vectors in tangent space of Values - */ - VectorValues linearizeAndOptimizeForDelta() const { - return *createSolver()->optimize(); - } - - /** - * Do one Gauss-Newton iteration and return next state - * suggested interface - */ - NonlinearOptimizer iterate() const; - - /// - ///Optimize a solution for a non linear factor graph - ///param relativeTreshold - ///@param absoluteTreshold - ///@param verbosity Integer specifying how much output to provide - /// - - // suggested interface - NonlinearOptimizer gaussNewton() const; - - /// @} - /// @name Advanced Interface - /// @{ - - /** Recursively try to do tempered Gauss-Newton steps until we succeed */ - NonlinearOptimizer try_lambda(const L& linear); - - /** - * One iteration of Levenberg Marquardt - */ - NonlinearOptimizer iterateLM(); - - /// - ///Optimize using Levenberg-Marquardt. Really Levenberg's - ///algorithm at this moment, as we just add I*\lambda to Hessian - ///H'H. The probabilistic explanation is very simple: every - ///variable gets an extra Gaussian prior that biases staying at - ///current value, with variance 1/lambda. This is done very easily - ///(but perhaps wastefully) by adding a prior factor for each of - ///the variables, after linearization. - /// - ///@param relativeThreshold - ///@param absoluteThreshold - ///@param verbosity Integer specifying how much output to provide - ///@param lambdaFactor Factor by which to decrease/increase lambda - /// - NonlinearOptimizer levenbergMarquardt(); - - /** - * One iteration of the dog leg algorithm - */ - NonlinearOptimizer iterateDogLeg(); - - /** - * Optimize using the Dog Leg algorithm - */ - NonlinearOptimizer dogLeg(); - - // static interfaces to LM, Dog leg, and GN optimization techniques - - ///Static interface to Dog leg optimization using default ordering - ///@param graph Nonlinear factor graph to optimize - ///@param values Initial values - ///@param parameters Optimization parameters - ///@return an optimized values structure - static shared_values optimizeLM(shared_graph graph, - shared_values values, - shared_parameters parameters = boost::make_shared()) { - - // Use a variable ordering from COLAMD - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values); - // initial optimization state is the same in both cases tested - //GS solver(*graph->linearize(*values, *ordering)); - - NonlinearOptimizer optimizer(graph, values, ordering, parameters); - NonlinearOptimizer result = optimizer.levenbergMarquardt(); - return result.values(); - } - - ///TODO: comment - static shared_values optimizeLM(shared_graph graph, - shared_values values, - Parameters::verbosityLevel verbosity) { - return optimizeLM(graph, values, Parameters::newVerbosity(verbosity)); - } - - /** - * Static interface to LM optimization (no shared_ptr arguments) - see above - */ - static shared_values optimizeLM(const G& graph, - const Values& values, - const Parameters parameters = Parameters()) { - return optimizeLM(boost::make_shared(graph), - boost::make_shared(values), - boost::make_shared(parameters)); - } - - ///TODO: comment - static shared_values optimizeLM(const G& graph, - const Values& values, - Parameters::verbosityLevel verbosity) { - return optimizeLM(boost::make_shared(graph), - boost::make_shared(values), - verbosity); - } - - ///Static interface to Dog leg optimization using default ordering - ///@param graph Nonlinear factor graph to optimize - ///@param values Initial values - ///@param parameters Optimization parameters - ///@return an optimized values structure - static shared_values optimizeDogLeg(shared_graph graph, - shared_values values, - shared_parameters parameters = boost::make_shared()) { - - // Use a variable ordering from COLAMD - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values); - // initial optimization state is the same in both cases tested - //GS solver(*graph->linearize(*values, *ordering)); - - NonlinearOptimizer optimizer(graph, values, ordering, parameters); - NonlinearOptimizer result = optimizer.dogLeg(); - return result.values(); + void print(const std::string& str = "") const { + cout << s << "\n"; + cout << "relative decrease threshold: " << relativeErrorTol << "\n"; + cout << "absolute decrease threshold: " << absoluteErrorTol << "\n"; + cout << " total error threshold: " << errorTol << "\n"; + cout << " maximum iterations: " << maxIterations << "\n"; + cout << " verbosity level: " << verbosity << "\n"; } - - /// - ///Static interface to Dog leg optimization using default ordering and thresholds - ///@param graph Nonlinear factor graph to optimize - ///@param values Initial values - ///@param verbosity Integer specifying how much output to provide - ///@return an optimized values structure - /// - static shared_values optimizeDogLeg(shared_graph graph, - shared_values values, - Parameters::verbosityLevel verbosity) { - return optimizeDogLeg(graph, values, Parameters::newVerbosity(verbosity)->newLambda_(1.0)); - } - - /** - * Static interface to Dogleg optimization (no shared_ptr arguments) - see above - */ - static shared_values optimizeDogLeg(const G& graph, - const Values& values, - const Parameters parameters = Parameters()) { - return optimizeDogLeg(boost::make_shared(graph), - boost::make_shared(values), - boost::make_shared(parameters)); - } - - ///TODO: comment - static shared_values optimizeDogLeg(const G& graph, - const Values& values, - Parameters::verbosityLevel verbosity) { - return optimizeDogLeg(boost::make_shared(graph), - boost::make_shared(values), - verbosity); - } - - /// - ///Static interface to GN optimization using default ordering and thresholds - ///@param graph Nonlinear factor graph to optimize - ///@param values Initial values - ///@param verbosity Integer specifying how much output to provide - ///@return an optimized values structure - /// - static shared_values optimizeGN(shared_graph graph, - shared_values values, - shared_parameters parameters = boost::make_shared()) { - - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values); - // initial optimization state is the same in both cases tested - GS solver(*graph->linearize(*values, *ordering)); - - NonlinearOptimizer optimizer(graph, values, ordering, parameters); - NonlinearOptimizer result = optimizer.gaussNewton(); - return result.values(); - } - - /** - * Static interface to GN optimization (no shared_ptr arguments) - see above - */ - static shared_values optimizeGN(const G& graph, const Values& values, const Parameters parameters = Parameters()) { - return optimizeGN(boost::make_shared(graph), - boost::make_shared(values), - boost::make_shared(parameters)); - } }; -/** - * Check convergence - */ -bool check_convergence ( - double relativeErrorTreshold, - double absoluteErrorTreshold, - double errorThreshold, - double currentError, double newError, int verbosity); -///TODO: comment -bool check_convergence ( - const NonlinearOptimizationParameters ¶meters, - double currentError, double newError); +/** + * This is the abstract interface for classes that can optimize for the + * maximum-likelihood estimate of a NonlinearFactorGraph. + * + * To use a class derived from this interface, construct the class with a + * NonlinearFactorGraph and an initial variable assignment. Next, call the + * optimize() method, which returns a new NonlinearOptimizer object containing + * the optimized variable assignment. Call the values() method to retrieve the + * optimized estimate. + * + * Note: This class is immutable, optimize() and iterate() return new + * NonlinearOptimizer objects, so be sure to use the returned object and not + * simply keep the unchanged original. + * + * Example: + * \code +NonlinearOptimizer::shared_ptr optimizer = DoglegOptimizer::Create(graph, initialValues); +optimizer = optimizer->optimizer(); +Values result = optimizer->values(); +useTheResult(result); +\endcode + * + * Equivalent, but more compact: + * \code +useTheResult(DoglegOptimizer(graph, initialValues).optimize()->values()); +\endcode + * + * This interface also exposes an iterate() method, which performs one + * iteration, returning a NonlinearOptimizer containing the adjusted variable + * assignment. The optimize() method simply calls iterate() multiple times, + * until the error changes less than a threshold. We expose iterate() so that + * you can easily control what happens between iterations, such as drawing or + * printing, moving points from behind the camera to in front, etc. + * + * To modify the graph, values, or parameters between iterations, call the + * update() functions, which preserve all other state (for example, the trust + * region size in DoglegOptimizer). Derived optimizer classes also have + * additional update methods, not in this abstract interface, for updating + * algorithm-specific state. + * + * For more flexibility, since all functions are virtual, you may override them + * in your own derived class. + */ +class NonlinearOptimizer { + +public: + + /** An auto pointer to this class */ + typedef std::auto_ptr auto_ptr; + + /** A const shared_ptr to a NonlinearFactorGraph */ + typedef boost::shared_ptr SharedGraph; + + /** A const shared_ptr to a NonlinearFactorGraph */ + typedef boost::shared_ptr SharedValues; + + /** A const shared_ptr to the parameters */ + typedef boost::shared_ptr SharedParams; + +protected: + + const SharedGraph graph_; + const SharedValues values_; + const SharedParams params_; + const double error_; + const int iterations_; + + NonlinearOptimizer(const SharedGraph& graph, const SharedValues& values, + const SharedParams& params) : + graph_(graph), values_(values), params_(params), + error_(graph_->error(*values_)), iterations_(0) {} + + NonlinearOptimizer(const SharedGraph& graph, const SharedValues& values, + const SharedParams& params, double error, int iterations) : + graph_(graph), values_(values), params_(params), + error_(error), iterations_(iterations) {} + +public: + + /// @name Standard interface + /// @{ + + /** Optimize for the maximum-likelihood estimate, returning a new + * NonlinearOptimizer class containing the optimized variable assignments, + * which may be retrieved with values(). + */ + virtual auto_ptr optimize() const { return defaultOptimize(); } + + /** Retrieve the current variable assignment estimate. */ + virtual const SharedValues& values() const { return values_; } + + /** Retrieve the parameters. */ + virtual const SharedParams& params() const { return params_; } + + /** Return the current factor graph error */ + virtual double error() const { return error_; } + + /** Return the number of iterations that have been performed */ + virtual int iterations() const { return iterations_; } + + /// @} + + /// @name Advanced interface + /// @{ + + /** Perform a single iteration, returning a new NonlinearOptimizer class + * containing the updated variable assignments, which may be retrieved with + * values(). + */ + virtual auto_ptr iterate() const = 0; + + /** Update the nonlinear factor graph, leaving all other state the same. + * Returns a new updated NonlinearOptimzier object, the original is not + * modified. + */ + virtual auto_ptr update(const SharedGraph& newGraph) const = 0; + + /** Update the variable assignments, leaving all other state the same. + * Returns a new updated NonlinearOptimzier object, the original is not + * modified. + */ + virtual auto_ptr update(const SharedValues& newValues) const = 0; + + /** Update the parameters, leaving all other state the same. + * Returns a new updated NonlinearOptimzier object, the original is not + * modified. + */ + virtual auto_ptr update(const SharedParams& newParams) const = 0; + + /** Create a copy of the NonlinearOptimizer */ + virtual auto_ptr clone() const = 0; + + /// @} + +protected: + + /** A default implementation of the optimization loop, which calls iterate() + * until checkConvergence returns true. + */ + auto_ptr defaultOptimize() const; + +}; + +/** Check whether the relative error decrease is less than relativeErrorTreshold, + * the absolute error decrease is less than absoluteErrorTreshold, or + * the error itself is less than errorThreshold. + */ +bool checkConvergence(double relativeErrorTreshold, + double absoluteErrorTreshold, double errorThreshold, + double currentError, double newError, NonlinearOptimizerParams::Verbosity verbosity); } // gtsam - -/// @} - -#include