From 0309b6b1849443f318b29e2245e0f8b5f41d402d Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Sun, 26 Feb 2012 02:44:58 +0000 Subject: [PATCH 01/57] (in branch) NonlinearOptimizer base class --- gtsam/nonlinear/NonlinearOptimization-inl.h | 134 ---- gtsam/nonlinear/NonlinearOptimization.h | 55 -- .../NonlinearOptimizationParameters.cpp | 121 ---- .../NonlinearOptimizationParameters.h | 138 ---- gtsam/nonlinear/NonlinearOptimizer.cpp | 54 +- gtsam/nonlinear/NonlinearOptimizer.h | 603 ++++++------------ 6 files changed, 222 insertions(+), 883 deletions(-) delete mode 100644 gtsam/nonlinear/NonlinearOptimization-inl.h delete mode 100644 gtsam/nonlinear/NonlinearOptimization.h delete mode 100644 gtsam/nonlinear/NonlinearOptimizationParameters.cpp delete mode 100644 gtsam/nonlinear/NonlinearOptimizationParameters.h diff --git a/gtsam/nonlinear/NonlinearOptimization-inl.h b/gtsam/nonlinear/NonlinearOptimization-inl.h deleted file mode 100644 index 361de59e2..000000000 --- a/gtsam/nonlinear/NonlinearOptimization-inl.h +++ /dev/null @@ -1,134 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file NonlinearOptimization-inl.h - * @date Oct 17, 2010 - * @author Kai Ni - * @brief Easy interfaces for NonlinearOptimizer - */ - -#pragma once - -#include -#include - -#if ENABLE_SPCG -#include -#endif - -#include - -using namespace std; - -namespace gtsam { - - /** - * The Elimination solver - */ - template - Values optimizeSequential(const G& graph, const Values& initialEstimate, - const NonlinearOptimizationParameters& parameters, bool useLM) { - - // Use a variable ordering from COLAMD - Ordering::shared_ptr ordering = graph.orderingCOLAMD(initialEstimate); - - // Create an nonlinear Optimizer that uses a Sequential Solver - typedef NonlinearOptimizer Optimizer; - Optimizer optimizer(boost::make_shared(graph), - boost::make_shared(initialEstimate), ordering, - boost::make_shared(parameters)); - - // Now optimize using either LM or GN methods. - if (useLM) - return *optimizer.levenbergMarquardt().values(); - else - return *optimizer.gaussNewton().values(); - } - - /** - * The multifrontal solver - */ - template - Values optimizeMultiFrontal(const G& graph, const Values& initialEstimate, - const NonlinearOptimizationParameters& parameters, bool useLM) { - - // Use a variable ordering from COLAMD - Ordering::shared_ptr ordering = graph.orderingCOLAMD(initialEstimate); - - // Create an nonlinear Optimizer that uses a Multifrontal Solver - typedef NonlinearOptimizer Optimizer; - Optimizer optimizer(boost::make_shared(graph), - boost::make_shared(initialEstimate), ordering, - boost::make_shared(parameters)); - - // now optimize using either LM or GN methods - if (useLM) - return *optimizer.levenbergMarquardt().values(); - else - return *optimizer.gaussNewton().values(); - } - -#if ENABLE_SPCG - /** - * The sparse preconditioned conjugate gradient solver - */ - template - Values optimizeSPCG(const G& graph, const Values& initialEstimate, - const NonlinearOptimizationParameters& parameters = NonlinearOptimizationParameters(), - bool useLM = true) { - - // initial optimization state is the same in both cases tested - typedef SubgraphSolver Solver; - typedef boost::shared_ptr shared_Solver; - typedef NonlinearOptimizer SPCGOptimizer; - shared_Solver solver = boost::make_shared( - graph, initialEstimate, IterativeOptimizationParameters()); - SPCGOptimizer optimizer( - boost::make_shared(graph), - boost::make_shared(initialEstimate), - solver->ordering(), - solver, - boost::make_shared(parameters)); - - // choose nonlinear optimization method - if (useLM) - return *optimizer.levenbergMarquardt().values(); - else - return *optimizer.gaussNewton().values(); - } -#endif - - /** - * optimization that returns the values - */ - template - Values optimize(const G& graph, const Values& initialEstimate, const NonlinearOptimizationParameters& parameters, - const LinearSolver& solver, - const NonlinearOptimizationMethod& nonlinear_method) { - switch (solver) { - case SEQUENTIAL: - return optimizeSequential(graph, initialEstimate, parameters, - nonlinear_method == LM); - case MULTIFRONTAL: - return optimizeMultiFrontal(graph, initialEstimate, parameters, - nonlinear_method == LM); -#if ENABLE_SPCG - case SPCG: -// return optimizeSPCG(graph, initialEstimate, parameters, -// nonlinear_method == LM); - throw runtime_error("optimize: SPCG not supported yet due to the specific pose constraint"); -#endif - } - throw runtime_error("optimize: undefined solver"); - } - -} //namespace gtsam diff --git a/gtsam/nonlinear/NonlinearOptimization.h b/gtsam/nonlinear/NonlinearOptimization.h deleted file mode 100644 index 6521ded8e..000000000 --- a/gtsam/nonlinear/NonlinearOptimization.h +++ /dev/null @@ -1,55 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file NonlinearOptimization.h - * @date Oct 14, 2010 - * @author Kai Ni - * @brief Easy interfaces for NonlinearOptimizer - */ - -#pragma once - -#include - -namespace gtsam { - - /** - * all the nonlinear optimization methods - */ - typedef enum { - LM, // Levenberg Marquardt - GN // Gaussian-Newton - } NonlinearOptimizationMethod; - - /** - * all the linear solver types - */ - typedef enum { - SEQUENTIAL, // Sequential elimination - MULTIFRONTAL, // Multi-frontal elimination -#if ENABLE_SPCG - SPCG, // Subgraph Preconditioned Conjugate Gradient -#endif - } LinearSolver; - - /** - * optimization that returns the values - */ - template - Values optimize(const G& graph, const Values& initialEstimate, - const NonlinearOptimizationParameters& parameters = NonlinearOptimizationParameters(), - const LinearSolver& solver = MULTIFRONTAL, - const NonlinearOptimizationMethod& nonlinear_method = LM); - -} - -#include diff --git a/gtsam/nonlinear/NonlinearOptimizationParameters.cpp b/gtsam/nonlinear/NonlinearOptimizationParameters.cpp deleted file mode 100644 index e80c3b59c..000000000 --- a/gtsam/nonlinear/NonlinearOptimizationParameters.cpp +++ /dev/null @@ -1,121 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file NonlinearOptimizationParameters.cpp - * @date Jan 28, 2012 - * @author Alex Cunningham - * @brief Implements parameters structure - */ - -#include - -#include - -#include - -namespace gtsam { - -using namespace std; - -/* ************************************************************************* */ -NonlinearOptimizationParameters::NonlinearOptimizationParameters() : - absDecrease_(1e-6), relDecrease_(1e-6), sumError_(0.0), maxIterations_( - 100), lambda_(1e-5), lambdaFactor_(10.0), verbosity_(SILENT), lambdaMode_( - BOUNDED), useQR_(false) { -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::NonlinearOptimizationParameters(double absDecrease, double relDecrease, - double sumError, int iIters, double lambda, - double lambdaFactor, verbosityLevel v, - LambdaMode lambdaMode, bool useQR) : - absDecrease_(absDecrease), relDecrease_(relDecrease), sumError_( - sumError), maxIterations_(iIters), lambda_(lambda), lambdaFactor_( - lambdaFactor), verbosity_(v), lambdaMode_(lambdaMode), useQR_(useQR) { -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::NonlinearOptimizationParameters( - const NonlinearOptimizationParameters ¶meters) : - absDecrease_(parameters.absDecrease_), relDecrease_( - parameters.relDecrease_), sumError_(parameters.sumError_), maxIterations_( - parameters.maxIterations_), lambda_(parameters.lambda_), lambdaFactor_( - parameters.lambdaFactor_), verbosity_(parameters.verbosity_), lambdaMode_( - parameters.lambdaMode_), useQR_(parameters.useQR_) { -} - -/* ************************************************************************* */ -void NonlinearOptimizationParameters::print(const std::string& s) const { - cout << "NonlinearOptimizationParameters " << s << endl; - cout << "absolute decrease threshold: " << absDecrease_ << endl; - cout << "relative decrease threshold: " << relDecrease_ << endl; - cout << " error sum threshold: " << sumError_ << endl; - cout << " maximum nr. of iterations: " << maxIterations_ << endl; - cout << " initial lambda value: " << lambda_ << endl; - cout << " factor to multiply lambda: " << lambdaFactor_ << endl; - cout << " verbosity level: " << verbosity_ << endl; - cout << " lambda mode: " << lambdaMode_ << endl; - cout << " use QR: " << useQR_ << endl; -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::shared_ptr -NonlinearOptimizationParameters::newLambda_(double lambda) const { - shared_ptr ptr( - boost::make_shared < NonlinearOptimizationParameters > (*this)); - ptr->lambda_ = lambda; - return ptr; -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::shared_ptr -NonlinearOptimizationParameters::newVerbosity(verbosityLevel verbosity) { - shared_ptr ptr(boost::make_shared()); - ptr->verbosity_ = verbosity; - return ptr; -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::shared_ptr -NonlinearOptimizationParameters::newMaxIterations(int maxIterations) { - shared_ptr ptr(boost::make_shared()); - ptr->maxIterations_ = maxIterations; - return ptr; -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::shared_ptr -NonlinearOptimizationParameters::newLambda(double lambda) { - shared_ptr ptr(boost::make_shared()); - ptr->lambda_ = lambda; - return ptr; -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::shared_ptr -NonlinearOptimizationParameters::newDecreaseThresholds(double absDecrease, - double relDecrease) { - shared_ptr ptr(boost::make_shared()); - ptr->absDecrease_ = absDecrease; - ptr->relDecrease_ = relDecrease; - return ptr; -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::shared_ptr -NonlinearOptimizationParameters::newFactorization(bool useQR) { - shared_ptr ptr(boost::make_shared()); - ptr->useQR_ = useQR; - return ptr; -} - -} // \namespace gtsam diff --git a/gtsam/nonlinear/NonlinearOptimizationParameters.h b/gtsam/nonlinear/NonlinearOptimizationParameters.h deleted file mode 100644 index 07e01ea58..000000000 --- a/gtsam/nonlinear/NonlinearOptimizationParameters.h +++ /dev/null @@ -1,138 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file NonlinearOptimizationParameters.h - * @date Oct 14, 2010 - * @author Kai Ni - * @brief structure to store parameters for nonlinear optimization - */ - -#pragma once - -#include -#include -#include - -namespace gtsam { - - /** - * a container for all related parameters - * \nosubgrouping - */ - struct NonlinearOptimizationParameters { - - typedef boost::shared_ptr shared_ptr; - typedef NonlinearOptimizationParameters This; - - double absDecrease_; ///< threshold for the absolute decrease per iteration - - /** - * Relative decrease threshold, where relative error = (new-current)/current. - * This can be set to 0 if there is a possibility for negative error values. - */ - double relDecrease_; ///< threshold for the relative decrease per iteration - - double sumError_; ///< threshold for the sum of error - size_t maxIterations_; ///< maximum number of iterations - double lambda_; ///< starting lambda value - double lambdaFactor_; ///< factor by which lambda is multiplied - - /// verbosity levels - typedef enum { - SILENT, - ERROR, - LAMBDA, - TRYLAMBDA, - VALUES, - DELTA, - TRYCONFIG, - TRYDELTA, - LINEAR, - DAMPED - } verbosityLevel; - verbosityLevel verbosity_; ///< verbosity - - /// trust-region method mode - typedef enum { - FAST, BOUNDED, CAUTIOUS - } LambdaMode; - LambdaMode lambdaMode_; /// - void serialize(ARCHIVE & ar, const unsigned int version) - { - ar & BOOST_SERIALIZATION_NVP(absDecrease_); - ar & BOOST_SERIALIZATION_NVP(relDecrease_); - ar & BOOST_SERIALIZATION_NVP(sumError_); - ar & BOOST_SERIALIZATION_NVP(maxIterations_); - ar & BOOST_SERIALIZATION_NVP(lambda_); - ar & BOOST_SERIALIZATION_NVP(lambdaFactor_); - ar & BOOST_SERIALIZATION_NVP(verbosity_); - ar & BOOST_SERIALIZATION_NVP(lambdaMode_); - ar & BOOST_SERIALIZATION_NVP(useQR_); - } - }; -} diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index 4d7d76bfc..e0c10bfe9 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -24,22 +24,50 @@ using namespace std; namespace gtsam { +/* ************************************************************************* */ +auto_ptr NonlinearOptimizer::defaultOptimize() const { -bool check_convergence ( - const NonlinearOptimizationParameters ¶meters, - double currentError, double newError) { - return check_convergence(parameters.relDecrease_, - parameters.absDecrease_, - parameters.sumError_, - currentError, newError, - parameters.verbosity_) ; + double currentError = this->error(); + + // check if we're already close enough + if(currentError <= params_->errorTol) { + if (params_->verbosity >= NonlinearOptimizerParams::ERROR) + cout << "Exiting, as error = " << currentError << " < " << errorTol << endl; + return this->clone(); + } + + // Return if we already have too many iterations + if(this->iterations() >= params_->maxIterations) + return this->clone(); + + // Iterative loop + auto_ptr next = this->iterate(); // First iteration happens here + while(next->iterations() < params_->maxIterations && + !checkConvergence(params_->relativeErrorTol, params_->absoluteErrorTol, + params_->errorTol, currentError, next->error(), params_->verbosity)) { + + // Do next iteration + currentError = next->error(); + next = next->iterate(); + } + + // Printing if verbose + if (params_->verbosity >= NonlinearOptimizerParams::VALUES) + next->values()->print("final values"); + if (params_->verbosity >= NonlinearOptimizerParams::ERROR && + next->iterations() >= params_->maxIterations) + cout << "Terminating because reached maximum iterations" << endl; + if (params_->verbosity >= NonlinearOptimizerParams::ERROR) + cout << "final error: " << next->error() << endl; + + // Return optimizer from final iteration + return next; } -bool check_convergence( - double relativeErrorTreshold, - double absoluteErrorTreshold, - double errorThreshold, - double currentError, double newError, int verbosity) { +/* ************************************************************************* */ +bool checkConvergence(double relativeErrorTreshold, double absoluteErrorTreshold, + double errorThreshold, double currentError, double newError, + NonlinearOptimizerParams::Verbosity verbosity) { if ( verbosity >= 2 ) { if ( newError <= errorThreshold ) diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index 4e41649e6..7981146d7 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -18,442 +18,201 @@ #pragma once -#include #include #include namespace gtsam { -class NullOptimizerWriter { -public: - NullOptimizerWriter(double error) {} ///Contructor - virtual ~NullOptimizerWriter() {} - virtual void write(double error) {} ///Capturing the values of the parameters after the optimization -}; /// - -/** - * The class NonlinearOptimizer encapsulates an optimization state. - * Typically it is instantiated with a NonlinearFactorGraph and initial values - * and then one of the optimization routines is called. These iterate - * until convergence. All methods are functional and return a new state. - * - * The class is parameterized by the Graph type $G$, Values class type $Values$, - * linear system class $L$, the non linear solver type $S$, and the writer type $W$ - * - * The values class type $Values$ is in order to be able to optimize over non-vector values structures. - * - * A nonlinear system solver $S$ - * Concept NonLinearSolver implements - * linearize: G * Values -> L - * solve : L -> Values - * - * The writer $W$ generates output to disk or the screen. - * - * For example, in a 2D case, $G$ can be pose2SLAM::Graph, $Values$ can be Pose2Values, - * $L$ can be GaussianFactorGraph and $S$ can be Factorization. - * The solver class has two main functions: linearize and optimize. The first one - * linearizes the nonlinear cost function around the current estimate, and the second - * one optimizes the linearized system using various methods. - * - * To use the optimizer in code, include in your cpp file - * \nosubgrouping +/** The common parameters for Nonlinear optimizers. Most optimizers + * deriving from NonlinearOptimizer also subclass the parameters. */ -template -class NonlinearOptimizer { +class NonlinearOptimizerParams { public: - // For performance reasons in recursion, we store values in a shared_ptr - typedef boost::shared_ptr shared_values; ///Prevent memory leaks in Values - typedef boost::shared_ptr shared_graph; /// Prevent memory leaks in Graph - typedef boost::shared_ptr shared_linear; /// Not sure - typedef boost::shared_ptr shared_ordering; ///ordering parameters - typedef boost::shared_ptr shared_solver; /// Solver - typedef NonlinearOptimizationParameters Parameters; ///These take the parameters defined in NonLinearOptimizationParameters.h - typedef boost::shared_ptr shared_parameters ; /// - typedef boost::shared_ptr shared_structure; // TODO: make this const + /** Control the printing verbosity */ + enum Verbosity { + SILENT, + ERROR, + LAMBDA, + TRYLAMBDA, + VALUES, + DELTA, + TRYCONFIG, + TRYDELTA, + LINEAR, + DAMPED + }; -private: + int maxIterations; ///< The maximum iterations to stop iterating (default 100) + double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5) + double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 0.0) + double errorTol; ///< The maximum total error to stop iterating (default 0.0) + Verbosity verbosity; ///< The printing verbosity during optimization (default SILENT) - typedef NonlinearOptimizer This; - typedef boost::shared_ptr > shared_dimensions; + NonlinearOptimizerParams() : + maxIterations(100.0), relativeErrorTol(1e-5), absoluteErrorTol(0.0), + errorTol(0.0), verbosity(SILENT) {} - /// keep a reference to const version of the graph - /// These normally do not change - const shared_graph graph_; - - // keep a values structure and its error - // These typically change once per iteration (in a functional way) - shared_values values_; - - // current error for this state - double error_; - - // the variable ordering - const shared_ordering ordering_; - - // storage for parameters, lambda, thresholds, etc. - shared_parameters parameters_; - - // for performance track - size_t iterations_; - - // The dimensions of each linearized variable - const shared_dimensions dimensions_; - - // storage of structural components that don't change between iterations - // used at creation of solvers in each iteration - // TODO: make this structure component specific to solver type - const shared_structure structure_; - - // solver used only for SPCG - // FIXME: remove this! - shared_solver spcg_solver_; - - /// @name Advanced Constructors - /// @{ - - /** - * Constructor that does not do any computation - */ - NonlinearOptimizer(shared_graph graph, shared_values values, const double error, - shared_ordering ordering, shared_parameters parameters, shared_dimensions dimensions, - size_t iterations, shared_structure structure) - : graph_(graph), values_(values), error_(error), ordering_(ordering), - parameters_(parameters), iterations_(iterations), dimensions_(dimensions), - structure_(structure) {} - - /** constructors to replace specific components */ - - ///TODO: comment - This newValues_(shared_values newValues) const { - return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), - ordering_, parameters_, dimensions_, iterations_, structure_); } - - ///TODO: comment - This newValuesErrorLambda_(shared_values newValues, double newError, double newLambda) const { - return NonlinearOptimizer(graph_, newValues, newError, ordering_, - parameters_->newLambda_(newLambda), dimensions_, iterations_, structure_); } - - ///TODO: comment - This newIterations_(int iterations) const { - return NonlinearOptimizer(graph_, values_, error_, ordering_, parameters_, dimensions_, - iterations, structure_); } - - ///TODO: comment - This newLambda_(double newLambda) const { - return NonlinearOptimizer(graph_, values_, error_, ordering_, - parameters_->newLambda_(newLambda), dimensions_, iterations_, structure_); } - - ///TODO: comment - This newValuesLambda_(shared_values newValues, double newLambda) const { - return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), - ordering_, parameters_->newLambda_(newLambda), dimensions_, iterations_, structure_); } - - ///TODO: comment - This newParameters_(shared_parameters parameters) const { - return NonlinearOptimizer(graph_, values_, error_, ordering_, parameters, dimensions_, - iterations_, structure_); } - - /// @} - -public: - - /// @name Standard Constructors - /// @{ - - /** - * Constructor that evaluates new error - */ - NonlinearOptimizer(shared_graph graph, - shared_values values, - shared_ordering ordering, - shared_parameters parameters = boost::make_shared()); - - /** - * Constructor that takes a solver directly - only useful for SPCG - * FIXME: REMOVE THIS FUNCTION! - */ - NonlinearOptimizer(shared_graph graph, - shared_values values, - shared_ordering ordering, - shared_solver spcg_solver, - shared_parameters parameters = boost::make_shared()); - - /** - * Copy constructor - */ - NonlinearOptimizer(const NonlinearOptimizer &optimizer) : - graph_(optimizer.graph_), values_(optimizer.values_), error_(optimizer.error_), - ordering_(optimizer.ordering_), parameters_(optimizer.parameters_), - iterations_(optimizer.iterations_), dimensions_(optimizer.dimensions_), structure_(optimizer.structure_) {} - - // access to member variables - - /// @} - /// @name Standard Interface - /// @{ - - /// print - void print(const std::string& s="") const { - cout << "NonlinearOptimizer " << s << endl; - cout << " current error: " << error_ << endl; - cout << "iterations count: " << iterations_ << endl; - } - - /** Return current error */ - double error() const { return error_; } - - /** Return current lambda */ - double lambda() const { return parameters_->lambda_; } - - /** Return the values */ - shared_values values() const { return values_; } - - /** Return the graph */ - shared_graph graph() const { return graph_; } - - /** Return the itertions */ - size_t iterations() const { return iterations_; } - - /** Return the ordering */ - shared_ordering ordering() const { return ordering_; } - - /** Return the parameters */ - shared_parameters parameters() const { return parameters_; } - - /** Return the structure variable (variable index) */ - shared_structure structure() const { return structure_; } - - /** - * Return a linearized graph at the current graph/values/ordering - */ - shared_linear linearize() const { - return shared_linear(new L(*graph_->linearize(*values_, *ordering_))); - } - - /** - * create a solver around the current graph/values - * NOTE: this will actually solve a linear system - */ - shared_solver createSolver() const { - return shared_solver(new GS(linearize(), structure_, parameters_->useQR_)); - } - - /** - * Return mean and covariance on a single variable - */ - Matrix marginalCovariance(Key j) const { - return createSolver()->marginalCovariance((*ordering_)[j]); - } - - /** - * linearize and optimize - * This returns an VectorValues, i.e., vectors in tangent space of Values - */ - VectorValues linearizeAndOptimizeForDelta() const { - return *createSolver()->optimize(); - } - - /** - * Do one Gauss-Newton iteration and return next state - * suggested interface - */ - NonlinearOptimizer iterate() const; - - /// - ///Optimize a solution for a non linear factor graph - ///param relativeTreshold - ///@param absoluteTreshold - ///@param verbosity Integer specifying how much output to provide - /// - - // suggested interface - NonlinearOptimizer gaussNewton() const; - - /// @} - /// @name Advanced Interface - /// @{ - - /** Recursively try to do tempered Gauss-Newton steps until we succeed */ - NonlinearOptimizer try_lambda(const L& linear); - - /** - * One iteration of Levenberg Marquardt - */ - NonlinearOptimizer iterateLM(); - - /// - ///Optimize using Levenberg-Marquardt. Really Levenberg's - ///algorithm at this moment, as we just add I*\lambda to Hessian - ///H'H. The probabilistic explanation is very simple: every - ///variable gets an extra Gaussian prior that biases staying at - ///current value, with variance 1/lambda. This is done very easily - ///(but perhaps wastefully) by adding a prior factor for each of - ///the variables, after linearization. - /// - ///@param relativeThreshold - ///@param absoluteThreshold - ///@param verbosity Integer specifying how much output to provide - ///@param lambdaFactor Factor by which to decrease/increase lambda - /// - NonlinearOptimizer levenbergMarquardt(); - - /** - * One iteration of the dog leg algorithm - */ - NonlinearOptimizer iterateDogLeg(); - - /** - * Optimize using the Dog Leg algorithm - */ - NonlinearOptimizer dogLeg(); - - // static interfaces to LM, Dog leg, and GN optimization techniques - - ///Static interface to Dog leg optimization using default ordering - ///@param graph Nonlinear factor graph to optimize - ///@param values Initial values - ///@param parameters Optimization parameters - ///@return an optimized values structure - static shared_values optimizeLM(shared_graph graph, - shared_values values, - shared_parameters parameters = boost::make_shared()) { - - // Use a variable ordering from COLAMD - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values); - // initial optimization state is the same in both cases tested - //GS solver(*graph->linearize(*values, *ordering)); - - NonlinearOptimizer optimizer(graph, values, ordering, parameters); - NonlinearOptimizer result = optimizer.levenbergMarquardt(); - return result.values(); - } - - ///TODO: comment - static shared_values optimizeLM(shared_graph graph, - shared_values values, - Parameters::verbosityLevel verbosity) { - return optimizeLM(graph, values, Parameters::newVerbosity(verbosity)); - } - - /** - * Static interface to LM optimization (no shared_ptr arguments) - see above - */ - static shared_values optimizeLM(const G& graph, - const Values& values, - const Parameters parameters = Parameters()) { - return optimizeLM(boost::make_shared(graph), - boost::make_shared(values), - boost::make_shared(parameters)); - } - - ///TODO: comment - static shared_values optimizeLM(const G& graph, - const Values& values, - Parameters::verbosityLevel verbosity) { - return optimizeLM(boost::make_shared(graph), - boost::make_shared(values), - verbosity); - } - - ///Static interface to Dog leg optimization using default ordering - ///@param graph Nonlinear factor graph to optimize - ///@param values Initial values - ///@param parameters Optimization parameters - ///@return an optimized values structure - static shared_values optimizeDogLeg(shared_graph graph, - shared_values values, - shared_parameters parameters = boost::make_shared()) { - - // Use a variable ordering from COLAMD - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values); - // initial optimization state is the same in both cases tested - //GS solver(*graph->linearize(*values, *ordering)); - - NonlinearOptimizer optimizer(graph, values, ordering, parameters); - NonlinearOptimizer result = optimizer.dogLeg(); - return result.values(); + void print(const std::string& str = "") const { + cout << s << "\n"; + cout << "relative decrease threshold: " << relativeErrorTol << "\n"; + cout << "absolute decrease threshold: " << absoluteErrorTol << "\n"; + cout << " total error threshold: " << errorTol << "\n"; + cout << " maximum iterations: " << maxIterations << "\n"; + cout << " verbosity level: " << verbosity << "\n"; } - - /// - ///Static interface to Dog leg optimization using default ordering and thresholds - ///@param graph Nonlinear factor graph to optimize - ///@param values Initial values - ///@param verbosity Integer specifying how much output to provide - ///@return an optimized values structure - /// - static shared_values optimizeDogLeg(shared_graph graph, - shared_values values, - Parameters::verbosityLevel verbosity) { - return optimizeDogLeg(graph, values, Parameters::newVerbosity(verbosity)->newLambda_(1.0)); - } - - /** - * Static interface to Dogleg optimization (no shared_ptr arguments) - see above - */ - static shared_values optimizeDogLeg(const G& graph, - const Values& values, - const Parameters parameters = Parameters()) { - return optimizeDogLeg(boost::make_shared(graph), - boost::make_shared(values), - boost::make_shared(parameters)); - } - - ///TODO: comment - static shared_values optimizeDogLeg(const G& graph, - const Values& values, - Parameters::verbosityLevel verbosity) { - return optimizeDogLeg(boost::make_shared(graph), - boost::make_shared(values), - verbosity); - } - - /// - ///Static interface to GN optimization using default ordering and thresholds - ///@param graph Nonlinear factor graph to optimize - ///@param values Initial values - ///@param verbosity Integer specifying how much output to provide - ///@return an optimized values structure - /// - static shared_values optimizeGN(shared_graph graph, - shared_values values, - shared_parameters parameters = boost::make_shared()) { - - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values); - // initial optimization state is the same in both cases tested - GS solver(*graph->linearize(*values, *ordering)); - - NonlinearOptimizer optimizer(graph, values, ordering, parameters); - NonlinearOptimizer result = optimizer.gaussNewton(); - return result.values(); - } - - /** - * Static interface to GN optimization (no shared_ptr arguments) - see above - */ - static shared_values optimizeGN(const G& graph, const Values& values, const Parameters parameters = Parameters()) { - return optimizeGN(boost::make_shared(graph), - boost::make_shared(values), - boost::make_shared(parameters)); - } }; -/** - * Check convergence - */ -bool check_convergence ( - double relativeErrorTreshold, - double absoluteErrorTreshold, - double errorThreshold, - double currentError, double newError, int verbosity); -///TODO: comment -bool check_convergence ( - const NonlinearOptimizationParameters ¶meters, - double currentError, double newError); +/** + * This is the abstract interface for classes that can optimize for the + * maximum-likelihood estimate of a NonlinearFactorGraph. + * + * To use a class derived from this interface, construct the class with a + * NonlinearFactorGraph and an initial variable assignment. Next, call the + * optimize() method, which returns a new NonlinearOptimizer object containing + * the optimized variable assignment. Call the values() method to retrieve the + * optimized estimate. + * + * Note: This class is immutable, optimize() and iterate() return new + * NonlinearOptimizer objects, so be sure to use the returned object and not + * simply keep the unchanged original. + * + * Example: + * \code +NonlinearOptimizer::shared_ptr optimizer = DoglegOptimizer::Create(graph, initialValues); +optimizer = optimizer->optimizer(); +Values result = optimizer->values(); +useTheResult(result); +\endcode + * + * Equivalent, but more compact: + * \code +useTheResult(DoglegOptimizer(graph, initialValues).optimize()->values()); +\endcode + * + * This interface also exposes an iterate() method, which performs one + * iteration, returning a NonlinearOptimizer containing the adjusted variable + * assignment. The optimize() method simply calls iterate() multiple times, + * until the error changes less than a threshold. We expose iterate() so that + * you can easily control what happens between iterations, such as drawing or + * printing, moving points from behind the camera to in front, etc. + * + * To modify the graph, values, or parameters between iterations, call the + * update() functions, which preserve all other state (for example, the trust + * region size in DoglegOptimizer). Derived optimizer classes also have + * additional update methods, not in this abstract interface, for updating + * algorithm-specific state. + * + * For more flexibility, since all functions are virtual, you may override them + * in your own derived class. + */ +class NonlinearOptimizer { + +public: + + /** An auto pointer to this class */ + typedef std::auto_ptr auto_ptr; + + /** A const shared_ptr to a NonlinearFactorGraph */ + typedef boost::shared_ptr SharedGraph; + + /** A const shared_ptr to a NonlinearFactorGraph */ + typedef boost::shared_ptr SharedValues; + + /** A const shared_ptr to the parameters */ + typedef boost::shared_ptr SharedParams; + +protected: + + const SharedGraph graph_; + const SharedValues values_; + const SharedParams params_; + const double error_; + const int iterations_; + + NonlinearOptimizer(const SharedGraph& graph, const SharedValues& values, + const SharedParams& params) : + graph_(graph), values_(values), params_(params), + error_(graph_->error(*values_)), iterations_(0) {} + + NonlinearOptimizer(const SharedGraph& graph, const SharedValues& values, + const SharedParams& params, double error, int iterations) : + graph_(graph), values_(values), params_(params), + error_(error), iterations_(iterations) {} + +public: + + /// @name Standard interface + /// @{ + + /** Optimize for the maximum-likelihood estimate, returning a new + * NonlinearOptimizer class containing the optimized variable assignments, + * which may be retrieved with values(). + */ + virtual auto_ptr optimize() const { return defaultOptimize(); } + + /** Retrieve the current variable assignment estimate. */ + virtual const SharedValues& values() const { return values_; } + + /** Retrieve the parameters. */ + virtual const SharedParams& params() const { return params_; } + + /** Return the current factor graph error */ + virtual double error() const { return error_; } + + /** Return the number of iterations that have been performed */ + virtual int iterations() const { return iterations_; } + + /// @} + + /// @name Advanced interface + /// @{ + + /** Perform a single iteration, returning a new NonlinearOptimizer class + * containing the updated variable assignments, which may be retrieved with + * values(). + */ + virtual auto_ptr iterate() const = 0; + + /** Update the nonlinear factor graph, leaving all other state the same. + * Returns a new updated NonlinearOptimzier object, the original is not + * modified. + */ + virtual auto_ptr update(const SharedGraph& newGraph) const = 0; + + /** Update the variable assignments, leaving all other state the same. + * Returns a new updated NonlinearOptimzier object, the original is not + * modified. + */ + virtual auto_ptr update(const SharedValues& newValues) const = 0; + + /** Update the parameters, leaving all other state the same. + * Returns a new updated NonlinearOptimzier object, the original is not + * modified. + */ + virtual auto_ptr update(const SharedParams& newParams) const = 0; + + /** Create a copy of the NonlinearOptimizer */ + virtual auto_ptr clone() const = 0; + + /// @} + +protected: + + /** A default implementation of the optimization loop, which calls iterate() + * until checkConvergence returns true. + */ + auto_ptr defaultOptimize() const; + +}; + +/** Check whether the relative error decrease is less than relativeErrorTreshold, + * the absolute error decrease is less than absoluteErrorTreshold, or + * the error itself is less than errorThreshold. + */ +bool checkConvergence(double relativeErrorTreshold, + double absoluteErrorTreshold, double errorThreshold, + double currentError, double newError, NonlinearOptimizerParams::Verbosity verbosity); } // gtsam - -/// @} - -#include From e7e64e945b8cb1527458b72982b3f2935b4ab231 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Mon, 27 Feb 2012 01:18:36 +0000 Subject: [PATCH 02/57] GaussNewtonOptimizer, started LevenbergMarquardtOptimizer --- gtsam/nonlinear/GaussNewtonOptimizer.cpp | 68 ++++++++ gtsam/nonlinear/GaussNewtonOptimizer.h | 158 +++++++++++++++++ .../nonlinear/LevenbergMarquardtOptimizer.cpp | 68 ++++++++ gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 163 ++++++++++++++++++ gtsam/nonlinear/NonlinearOptimizer.h | 104 +++++------ tests/testNonlinearOptimizer.cpp | 5 +- 6 files changed, 516 insertions(+), 50 deletions(-) create mode 100644 gtsam/nonlinear/GaussNewtonOptimizer.cpp create mode 100644 gtsam/nonlinear/GaussNewtonOptimizer.h create mode 100644 gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp create mode 100644 gtsam/nonlinear/LevenbergMarquardtOptimizer.h diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.cpp b/gtsam/nonlinear/GaussNewtonOptimizer.cpp new file mode 100644 index 000000000..a372fae4e --- /dev/null +++ b/gtsam/nonlinear/GaussNewtonOptimizer.cpp @@ -0,0 +1,68 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file GaussNewtonOptimizer.cpp + * @brief + * @author Richard Roberts + * @created Feb 26, 2012 + */ + +#include +#include +#include + +using namespace std; + +namespace gtsam { + +NonlinearOptimizer::auto_ptr GaussNewtonOptimizer::iterate() const { + + // Linearize graph + GaussianFactorGraph::shared_ptr linear = graph_->linearize(values_, gnParams_->ordering); + + // Check whether to use QR + const bool useQR; + if(gnParams_->factorization == GaussNewtonParams::LDL) + useQR = false; + else if(gnParams_->factorization == GaussNewtonParams::QR) + useQR = true; + else + throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::factorization"); + + // Optimize + VectorValues::shared_ptr delta; + if(gnParams_->elimination == MULTIFRONTAL) + delta = GaussianMultifrontalSolver(*linear, useQR).optimize(); + else if(gnParams_->elimination == SEQUENTIAL) + delta = GaussianSequentialSolver(*linear, useQR).optimize(); + else + throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::elimination"); + + // Maybe show output + if(params_->verbosity >= NonlinearOptimizerParams::DELTA) delta->print("delta"); + + // Update values + SharedValues newValues(new Values(values_->retract(*delta, gnParams_->ordering))); + double newError = graph_->error(newValues); + + // Maybe show output + if (params_->verbosity >= NonlinearOptimizerParams::VALUES) newValues->print("newValues"); + if (params_->verbosity >= NonlinearOptimizerParams::ERROR) cout << "error: " << newError << endl; + + // Create new optimizer with new values and new error + auto_ptr newOptimizer(new GaussNewtonOptimizer( + graph_, newValues, gnParams_, newError, iterations_+1)); + + return newOptimizer; +} + +} /* namespace gtsam */ diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.h b/gtsam/nonlinear/GaussNewtonOptimizer.h new file mode 100644 index 000000000..df5a2f88f --- /dev/null +++ b/gtsam/nonlinear/GaussNewtonOptimizer.h @@ -0,0 +1,158 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file GaussNewtonOptimizer.h + * @brief + * @author Richard Roberts + * @created Feb 26, 2012 + */ + +#pragma once + +#include + +namespace gtsam { + +/** Parameters for Gauss-Newton optimization, inherits from + * NonlinearOptimizationParams. + */ +class GaussNewtonParams : public NonlinearOptimizerParams { +public: + /** See GaussNewtonParams::elimination */ + enum Elimination { + MULTIFRONTAL, + SEQUENTIAL + }; + + /** See GaussNewtonParams::factorization */ + enum Factorization { + LDL, + QR, + }; + + Elimination elimination; ///< The elimination algorithm to use (default: MULTIFRONTAL) + Factorization factorization; ///< The numerical factorization (default: LDL) + Ordering ordering; ///< The variable elimination ordering (default: empty -> COLAMD) + + GaussNewtonParams() : + elimination(MULTIFRONTAL), factorization(LDL) {} + + virtual void print(const std::string& str = "") const { + NonlinearOptimizerParams::print(str); + if(elimination == MULTIFRONTAL) + std::cout << " elimination method: MULTIFRONTAL\n"; + else if(elimination == SEQUENTIAL) + std::cout << " elimination method: SEQUENTIAL\n"; + else + std::cout << " elimination method: (invalid)\n"; + + if(factorization == LDL) + std::cout << " factorization method: LDL\n"; + else if(factorization == QR) + std::cout << " factorization method: QR\n"; + else if(factorization == CHOLESKY) + std::cout << " factorization method: CHOLESKY\n"; + else + std::cout << " factorization method: (invalid)\n"; + + std::cout.flush(); + } +}; + +/** + * This class performs Gauss-Newton nonlinear optimization + */ +class GaussNewtonOptimizer : public NonlinearOptimizer { + +public: + + typedef boost::shared_ptr SharedGNParams; + + /// @name Standard interface + /// @{ + + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. For convenience this + * version takes plain objects instead of shared pointers, but internally + * copies the objects. + * @param graph The nonlinear factor graph to optimize + * @param values The initial variable assignments + * @param params The optimization parameters + */ + GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& values, + const GaussNewtonParams& params) : + NonlinearOptimizer( + SharedGraph(new NonlinearFactorGraph(graph)), + SharedValues(new Values(values)), + SharedGNParams(new GaussNewtonParams(params))), + gnParams_(boost::static_pointer_cast(params_)) {} + + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. + * @param graph The nonlinear factor graph to optimize + * @param values The initial variable assignments + * @param params The optimization parameters + */ + GaussNewtonOptimizer(const SharedGraph& graph, const SharedValues& values, + const SharedGNParams& params) : + NonlinearOptimizer(graph, values, params), gnParams_(params) {} + + /// @} + + /// @name Advanced interface + /// @{ + + /** Virtual destructor */ + virtual ~GaussNewtonOptimizer() {} + + /** Perform a single iteration, returning a new NonlinearOptimizer class + * containing the updated variable assignments, which may be retrieved with + * values(). + */ + virtual auto_ptr iterate() const; + + /** Update the graph, values, and/or parameters, leaving all other state + * the same. Any of these that are empty shared pointers are left unchanged + * in the returned optimizer object. Returns a new updated + * NonlinearOptimzier object, the original is not modified. + */ + virtual auto_ptr update( + const SharedGraph& newGraph = SharedGraph(), + const SharedValues& newValues = SharedValues(), + const SharedParams& newParams = SharedParams()) const { + return new GaussNewtonOptimizer(*this, newGraph, newValues, + boost::dynamic_pointer_cast(newParams)); + } + + /** Create a copy of the NonlinearOptimizer */ + virtual auto_ptr clone() const { + return new GaussNewtonOptimizer(*this); + } + + /// @} + +protected: + + const SharedGNParams gnParams_; + + GaussNewtonOptimizer(const SharedGraph& graph, const SharedValues& values, + const SharedGNParams& params, double error, int iterations) : + NonlinearOptimizer(graph, values, params, error, iterations), gnParams_(params) {} + + GaussNewtonOptimizer(const GaussNewtonOptimizer& original, const SharedGraph& newGraph, + const SharedValues& newValues, const SharedGNParams& newParams) : + NonlinearOptimizer(original, newGraph, newValues, newParams), + gnParams_(newParams ? newParams : original.gnParams_) {} + +}; + +} diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp new file mode 100644 index 000000000..270778afe --- /dev/null +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -0,0 +1,68 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file LevenbergMarquardtOptimizer.cpp + * @brief + * @author Richard Roberts + * @created Feb 26, 2012 + */ + +#include +#include +#include + +using namespace std; + +namespace gtsam { + +NonlinearOptimizer::auto_ptr LevenbergMarquardtOptimizer::iterate() const { + + // Linearize graph + GaussianFactorGraph::shared_ptr linear = graph_->linearize(values_, gnParams_->ordering); + + // Check whether to use QR + const bool useQR; + if(gnParams_->factorization == LevenbergMarquardtParams::LDL) + useQR = false; + else if(gnParams_->factorization == LevenbergMarquardtParams::QR) + useQR = true; + else + throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::factorization"); + + // Optimize + VectorValues::shared_ptr delta; + if(gnParams_->elimination == MULTIFRONTAL) + delta = GaussianMultifrontalSolver(*linear, useQR).optimize(); + else if(gnParams_->elimination == SEQUENTIAL) + delta = GaussianSequentialSolver(*linear, useQR).optimize(); + else + throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination"); + + // Maybe show output + if(params_->verbosity >= NonlinearOptimizerParams::DELTA) delta->print("delta"); + + // Update values + SharedValues newValues(new Values(values_->retract(*delta, gnParams_->ordering))); + double newError = graph_->error(newValues); + + // Maybe show output + if (params_->verbosity >= NonlinearOptimizerParams::VALUES) newValues->print("newValues"); + if (params_->verbosity >= NonlinearOptimizerParams::ERROR) cout << "error: " << newError << endl; + + // Create new optimizer with new values and new error + auto_ptr newOptimizer(new LevenbergMarquardtOptimizer( + graph_, newValues, gnParams_, newError, iterations_+1)); + + return newOptimizer; +} + +} /* namespace gtsam */ diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h new file mode 100644 index 000000000..83c4ebe88 --- /dev/null +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -0,0 +1,163 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file LevenbergMarquardtOptimizer.h + * @brief + * @author Richard Roberts + * @created Feb 26, 2012 + */ + +#pragma once + +#include + +namespace gtsam { + +/** Parameters for Levenberg-Marquardt optimization, inherits from + * NonlinearOptimizationParams. + */ +class LevenbergMarquardtParams : public NonlinearOptimizerParams { +public: + /** See LevenbergMarquardtParams::elimination */ + enum Elimination { + MULTIFRONTAL, + SEQUENTIAL + }; + + /** See LevenbergMarquardtParams::factorization */ + enum Factorization { + LDL, + QR, + }; + + Elimination elimination; ///< The elimination algorithm to use (default: MULTIFRONTAL) + Factorization factorization; ///< The numerical factorization (default: LDL) + Ordering::shared_ptr ordering; ///< The variable elimination ordering (default: empty -> COLAMD) + double lambda; ///< The initial (and current after each iteration) Levenberg-Marquardt damping term (default: 1e-5) + double lambdaFactor; ///< The amount by which to multiply or divide lambda when adjusting lambda (default: 10.0) + + LevenbergMarquardtParams() : + elimination(MULTIFRONTAL), factorization(LDL), lambda(1e-5), lambdaFactor(10.0) {} + + virtual void print(const std::string& str = "") const { + NonlinearOptimizerParams::print(str); + if(elimination == MULTIFRONTAL) + std::cout << " elimination method: MULTIFRONTAL\n"; + else if(elimination == SEQUENTIAL) + std::cout << " elimination method: SEQUENTIAL\n"; + else + std::cout << " elimination method: (invalid)\n"; + + if(factorization == LDL) + std::cout << " factorization method: LDL\n"; + else if(factorization == QR) + std::cout << " factorization method: QR\n"; + else if(factorization == CHOLESKY) + std::cout << " factorization method: CHOLESKY\n"; + else + std::cout << " factorization method: (invalid)\n"; + + std::cout << " lambda: " << lambda << "\n"; + std::cout << " lambdaFactor: " << lambdaFactor << "\n"; + + std::cout.flush(); + } +}; + +/** + * This class performs Levenberg-Marquardt nonlinear optimization + */ +class LevenbergMarquardtOptimizer : public NonlinearOptimizer { + +public: + + typedef boost::shared_ptr SharedLMParams; + + /// @name Standard interface + /// @{ + + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. For convenience this + * version takes plain objects instead of shared pointers, but internally + * copies the objects. + * @param graph The nonlinear factor graph to optimize + * @param values The initial variable assignments + * @param params The optimization parameters + */ + LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& values, + const LevenbergMarquardtParams& params) : + NonlinearOptimizer( + SharedGraph(new NonlinearFactorGraph(graph)), + SharedValues(new Values(values)), + SharedLMParams(new LevenbergMarquardtParams(params))), + lmParams_(boost::static_pointer_cast(params_)) {} + + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. + * @param graph The nonlinear factor graph to optimize + * @param values The initial variable assignments + * @param params The optimization parameters + */ + LevenbergMarquardtOptimizer(const SharedGraph& graph, const SharedValues& values, + const SharedLMParams& params) : + NonlinearOptimizer(graph, values, params), lmParams_(params) {} + + /// @} + + /// @name Advanced interface + /// @{ + + /** Virtual destructor */ + virtual ~LevenbergMarquardtOptimizer() {} + + /** Perform a single iteration, returning a new NonlinearOptimizer class + * containing the updated variable assignments, which may be retrieved with + * values(). + */ + virtual auto_ptr iterate() const; + + /** Update the graph, values, and/or parameters, leaving all other state + * the same. Any of these that are empty shared pointers are left unchanged + * in the returned optimizer object. Returns a new updated + * NonlinearOptimzier object, the original is not modified. + */ + virtual auto_ptr update( + const SharedGraph& newGraph = SharedGraph(), + const SharedValues& newValues = SharedValues(), + const SharedParams& newParams = SharedParams()) const { + return new LevenbergMarquardtOptimizer(*this, newGraph, newValues, + boost::dynamic_pointer_cast(newParams)); + } + + /** Create a copy of the NonlinearOptimizer */ + virtual auto_ptr clone() const { + return new LevenbergMarquardtOptimizer(*this); + } + + /// @} + +protected: + + const SharedLMParams lmParams_; + + LevenbergMarquardtOptimizer(const SharedGraph& graph, const SharedValues& values, + const SharedLMParams& params, double error, int iterations) : + NonlinearOptimizer(graph, values, params, error, iterations), lmParams_(params) {} + + LevenbergMarquardtOptimizer(const LevenbergMarquardtOptimizer& original, const SharedGraph& newGraph, + const SharedValues& newValues, const SharedLMParams& newParams) : + NonlinearOptimizer(original, newGraph, newValues, newParams), + lmParams_(newParams ? newParams : original.lmParams_) {} + +}; + +} diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index 7981146d7..dba87d6fb 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -11,15 +11,14 @@ /** * @file NonlinearOptimizer.h - * @brief Encapsulates nonlinear optimization state - * @author Frank Dellaert + * @brief Base class and parameters for nonlinear optimization algorithms + * @author Richard Roberts * @date Sep 7, 2009 */ #pragma once #include -#include namespace gtsam { @@ -28,8 +27,7 @@ namespace gtsam { */ class NonlinearOptimizerParams { public: - - /** Control the printing verbosity */ + /** See NonlinearOptimizationParams::verbosity */ enum Verbosity { SILENT, ERROR, @@ -53,14 +51,16 @@ public: maxIterations(100.0), relativeErrorTol(1e-5), absoluteErrorTol(0.0), errorTol(0.0), verbosity(SILENT) {} - void print(const std::string& str = "") const { - cout << s << "\n"; - cout << "relative decrease threshold: " << relativeErrorTol << "\n"; - cout << "absolute decrease threshold: " << absoluteErrorTol << "\n"; - cout << " total error threshold: " << errorTol << "\n"; - cout << " maximum iterations: " << maxIterations << "\n"; - cout << " verbosity level: " << verbosity << "\n"; + virtual void print(const std::string& str = "") const { + std::cout << s << "\n"; + std::cout << "relative decrease threshold: " << relativeErrorTol << "\n"; + std::cout << "absolute decrease threshold: " << absoluteErrorTol << "\n"; + std::cout << " total error threshold: " << errorTol << "\n"; + std::cout << " maximum iterations: " << maxIterations << "\n"; + std::cout << " verbosity level: " << verbosity << std::endl; } + + virtual ~NonlinearOptimizationParams() {} }; @@ -80,7 +80,7 @@ public: * * Example: * \code -NonlinearOptimizer::shared_ptr optimizer = DoglegOptimizer::Create(graph, initialValues); +NonlinearOptimizer::auto_ptr optimizer = DoglegOptimizer::Create(graph, initialValues); optimizer = optimizer->optimizer(); Values result = optimizer->values(); useTheResult(result); @@ -123,26 +123,6 @@ public: /** A const shared_ptr to the parameters */ typedef boost::shared_ptr SharedParams; -protected: - - const SharedGraph graph_; - const SharedValues values_; - const SharedParams params_; - const double error_; - const int iterations_; - - NonlinearOptimizer(const SharedGraph& graph, const SharedValues& values, - const SharedParams& params) : - graph_(graph), values_(values), params_(params), - error_(graph_->error(*values_)), iterations_(0) {} - - NonlinearOptimizer(const SharedGraph& graph, const SharedValues& values, - const SharedParams& params, double error, int iterations) : - graph_(graph), values_(values), params_(params), - error_(error), iterations_(iterations) {} - -public: - /// @name Standard interface /// @{ @@ -169,29 +149,24 @@ public: /// @name Advanced interface /// @{ + /** Virtual destructor */ + virtual ~NonlinearOptimizer() {} + /** Perform a single iteration, returning a new NonlinearOptimizer class * containing the updated variable assignments, which may be retrieved with * values(). */ virtual auto_ptr iterate() const = 0; - /** Update the nonlinear factor graph, leaving all other state the same. - * Returns a new updated NonlinearOptimzier object, the original is not - * modified. + /** Update the graph, values, and/or parameters, leaving all other state + * the same. Any of these that are empty shared pointers are left unchanged + * in the returned optimizer object. Returns a new updated + * NonlinearOptimzier object, the original is not modified. */ - virtual auto_ptr update(const SharedGraph& newGraph) const = 0; - - /** Update the variable assignments, leaving all other state the same. - * Returns a new updated NonlinearOptimzier object, the original is not - * modified. - */ - virtual auto_ptr update(const SharedValues& newValues) const = 0; - - /** Update the parameters, leaving all other state the same. - * Returns a new updated NonlinearOptimzier object, the original is not - * modified. - */ - virtual auto_ptr update(const SharedParams& newParams) const = 0; + virtual auto_ptr update( + const SharedGraph& newGraph = SharedGraph(), + const SharedValues& newValues = SharedValues(), + const SharedParams& newParams = SharedParams()) const = 0; /** Create a copy of the NonlinearOptimizer */ virtual auto_ptr clone() const = 0; @@ -205,6 +180,37 @@ protected: */ auto_ptr defaultOptimize() const; +protected: + + const SharedGraph graph_; + const SharedValues values_; + const SharedParams params_; + const double error_; + const int iterations_; + + /** Constructor for initial construction of base classes, computes error and + * sets iterations to zero. + */ + NonlinearOptimizer(const SharedGraph& graph, const SharedValues& values, + const SharedParams& params) : + graph_(graph), values_(values), params_(params), + error_(graph_->error(*values_)), iterations_(0) {} + + /** Constructor that specifies all parts of the state, used for updates */ + NonlinearOptimizer(const SharedGraph& graph, const SharedValues& values, + const SharedParams& params, double error, int iterations) : + graph_(graph), values_(values), params_(params), + error_(error), iterations_(iterations) {} + + /** Convenience constructor for modifying only some of the state. */ + NonlinearOptimizer(const NonlinearOptimizer& original, const SharedGraph& newGraph, + const SharedValues& newValues, const SharedParams& newParams) : + graph_(newGraph ? newGraph : original.graph_), + values_(newValues ? newValues : original.values_), + params_(newParams ? newParams : original.params_), + error_(newGraph || newValues ? graph_->error(*values_) : original.error_), + iterations_(original.iterations_) {} + }; /** Check whether the relative error decrease is less than relativeErrorTreshold, diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index e397ea443..91d92b530 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -35,7 +35,6 @@ using namespace boost; // template definitions #include #include -#include using namespace gtsam; @@ -235,6 +234,10 @@ TEST( NonlinearOptimizer, SimpleGNOptimizer_noshared ) /* ************************************************************************* */ TEST( NonlinearOptimizer, optimization_method ) { + GaussNewtonParams params; + params.elimination = GaussNewtonParams::QR; + EXPECT(false); + example::Graph fg = example::createReallyNonlinearFactorGraph(); Point2 x0(3,3); From 67564d043bdf189a16f5dfd4579c9ee2d3635c08 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Tue, 28 Feb 2012 01:17:42 +0000 Subject: [PATCH 03/57] LevenbergMarquardtOptimizer in progress --- .../nonlinear/LevenbergMarquardtOptimizer.cpp | 89 +++++++++++++++++++ gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 6 ++ 2 files changed, 95 insertions(+) diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 270778afe..c599b1830 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -47,6 +47,95 @@ NonlinearOptimizer::auto_ptr LevenbergMarquardtOptimizer::iterate() const { else throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination"); + + const NonlinearOptimizerParams::Verbosity verbosity = params_->verbosity; + const double lambdaFactor = parameters_->lambdaFactor_ ; + double lambda = params_->lambda; + + double next_error = error_; + SharedValues next_values = values_; + + // Keep increasing lambda until we make make progress + while(true) { + if (verbosity >= Parameters::TRYLAMBDA) cout << "trying lambda = " << lambda << endl; + + // add prior-factors + // TODO: replace this dampening with a backsubstitution approach + typename L::shared_ptr dampedSystem(new L(linearSystem)); + { + double sigma = 1.0 / sqrt(lambda); + dampedSystem->reserve(dampedSystem->size() + dimensions_->size()); + // for each of the variables, add a prior + for(Index j=0; jsize(); ++j) { + size_t dim = (*dimensions_)[j]; + Matrix A = eye(dim); + Vector b = zero(dim); + SharedDiagonal model = noiseModel::Isotropic::Sigma(dim,sigma); + typename L::sharedFactor prior(new JacobianFactor(j, A, b, model)); + dampedSystem->push_back(prior); + } + } + if (verbosity >= Parameters::DAMPED) dampedSystem->print("damped"); + + // Create a new solver using the damped linear system + // FIXME: remove spcg specific code + if (spcg_solver_) spcg_solver_->replaceFactors(dampedSystem); + shared_solver solver = (spcg_solver_) ? spcg_solver_ : shared_solver( + new S(dampedSystem, structure_, parameters_->useQR_)); + + // Try solving + try { + VectorValues delta = *solver->optimize(); + if (verbosity >= Parameters::TRYLAMBDA) cout << "linear delta norm = " << delta.vector().norm() << endl; + if (verbosity >= Parameters::TRYDELTA) delta.print("delta"); + + // update values + shared_values newValues(new Values(values_->retract(delta, *ordering_))); + + // create new optimization state with more adventurous lambda + double error = graph_->error(*newValues); + + if (verbosity >= Parameters::TRYLAMBDA) cout << "next error = " << error << endl; + + if( error <= error_ ) { + next_values = newValues; + next_error = error; + lambda /= lambdaFactor; + break; + } + else { + // Either we're not cautious, or the same lambda was worse than the current error. + // The more adventurous lambda was worse too, so make lambda more conservative + // and keep the same values. + if(lambdaMode >= Parameters::BOUNDED && lambda >= 1.0e5) { + if(verbosity >= Parameters::ERROR) + cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; + break; + } else { + lambda *= factor; + } + } + } catch(const NegativeMatrixException& e) { + if(verbosity >= Parameters::LAMBDA) + cout << "Negative matrix, increasing lambda" << endl; + // Either we're not cautious, or the same lambda was worse than the current error. + // The more adventurous lambda was worse too, so make lambda more conservative + // and keep the same values. + if(lambdaMode >= Parameters::BOUNDED && lambda >= 1.0e5) { + if(verbosity >= Parameters::ERROR) + cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; + break; + } else { + lambda *= factor; + } + } catch(...) { + throw; + } + } // end while + + return newValuesErrorLambda_(next_values, next_error, lambda); + + // Maybe show output if(params_->verbosity >= NonlinearOptimizerParams::DELTA) delta->print("delta"); diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index 83c4ebe88..c1535017d 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -39,11 +39,17 @@ public: QR, }; + /** See LevenbergMarquardtParams::lmVerbosity */ + enum LMVerbosity { + + }; + Elimination elimination; ///< The elimination algorithm to use (default: MULTIFRONTAL) Factorization factorization; ///< The numerical factorization (default: LDL) Ordering::shared_ptr ordering; ///< The variable elimination ordering (default: empty -> COLAMD) double lambda; ///< The initial (and current after each iteration) Levenberg-Marquardt damping term (default: 1e-5) double lambdaFactor; ///< The amount by which to multiply or divide lambda when adjusting lambda (default: 10.0) + double lambdaUpperBound; ///< The maximum lambda to try before assuming the optimization has failed (default: 1e5) LevenbergMarquardtParams() : elimination(MULTIFRONTAL), factorization(LDL), lambda(1e-5), lambdaFactor(10.0) {} From 8748f483b08081563b7766bcc1bb25cb7c83a76a Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Tue, 28 Feb 2012 05:30:53 +0000 Subject: [PATCH 04/57] NonlinearOptimizer, GaussNewtonOptimizer, and LevenbergMarquardt Optimizer compile --- .cproject | 2 +- .project | 2 +- gtsam/nonlinear/GaussNewtonOptimizer.cpp | 16 +-- gtsam/nonlinear/GaussNewtonOptimizer.h | 75 +++++++--- .../nonlinear/LevenbergMarquardtOptimizer.cpp | 105 ++++++-------- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 135 ++++++++++++++---- gtsam/nonlinear/NonlinearOptimizer.cpp | 4 +- gtsam/nonlinear/NonlinearOptimizer.h | 15 +- 8 files changed, 231 insertions(+), 123 deletions(-) diff --git a/.cproject b/.cproject index e85ec02ec..108a70799 100644 --- a/.cproject +++ b/.cproject @@ -21,7 +21,7 @@ - + diff --git a/.project b/.project index 9856df2ea..c4d531b04 100644 --- a/.project +++ b/.project @@ -23,7 +23,7 @@ org.eclipse.cdt.make.core.buildArguments - -j5 + org.eclipse.cdt.make.core.buildCommand diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.cpp b/gtsam/nonlinear/GaussNewtonOptimizer.cpp index a372fae4e..a603131ad 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.cpp +++ b/gtsam/nonlinear/GaussNewtonOptimizer.cpp @@ -27,10 +27,10 @@ namespace gtsam { NonlinearOptimizer::auto_ptr GaussNewtonOptimizer::iterate() const { // Linearize graph - GaussianFactorGraph::shared_ptr linear = graph_->linearize(values_, gnParams_->ordering); + GaussianFactorGraph::shared_ptr linear = graph_->linearize(*values_, *ordering_); // Check whether to use QR - const bool useQR; + bool useQR; if(gnParams_->factorization == GaussNewtonParams::LDL) useQR = false; else if(gnParams_->factorization == GaussNewtonParams::QR) @@ -40,9 +40,9 @@ NonlinearOptimizer::auto_ptr GaussNewtonOptimizer::iterate() const { // Optimize VectorValues::shared_ptr delta; - if(gnParams_->elimination == MULTIFRONTAL) + if(gnParams_->elimination == GaussNewtonParams::MULTIFRONTAL) delta = GaussianMultifrontalSolver(*linear, useQR).optimize(); - else if(gnParams_->elimination == SEQUENTIAL) + else if(gnParams_->elimination == GaussNewtonParams::SEQUENTIAL) delta = GaussianSequentialSolver(*linear, useQR).optimize(); else throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::elimination"); @@ -51,16 +51,16 @@ NonlinearOptimizer::auto_ptr GaussNewtonOptimizer::iterate() const { if(params_->verbosity >= NonlinearOptimizerParams::DELTA) delta->print("delta"); // Update values - SharedValues newValues(new Values(values_->retract(*delta, gnParams_->ordering))); - double newError = graph_->error(newValues); + SharedValues newValues(new Values(values_->retract(*delta, *ordering_))); + double newError = graph_->error(*newValues); // Maybe show output if (params_->verbosity >= NonlinearOptimizerParams::VALUES) newValues->print("newValues"); if (params_->verbosity >= NonlinearOptimizerParams::ERROR) cout << "error: " << newError << endl; // Create new optimizer with new values and new error - auto_ptr newOptimizer(new GaussNewtonOptimizer( - graph_, newValues, gnParams_, newError, iterations_+1)); + NonlinearOptimizer::auto_ptr newOptimizer(new GaussNewtonOptimizer( + *this, newValues, newError)); return newOptimizer; } diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.h b/gtsam/nonlinear/GaussNewtonOptimizer.h index df5a2f88f..e4b4c31bb 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.h +++ b/gtsam/nonlinear/GaussNewtonOptimizer.h @@ -41,11 +41,12 @@ public: Elimination elimination; ///< The elimination algorithm to use (default: MULTIFRONTAL) Factorization factorization; ///< The numerical factorization (default: LDL) - Ordering ordering; ///< The variable elimination ordering (default: empty -> COLAMD) GaussNewtonParams() : elimination(MULTIFRONTAL), factorization(LDL) {} + ~GaussNewtonParams() {} + virtual void print(const std::string& str = "") const { NonlinearOptimizerParams::print(str); if(elimination == MULTIFRONTAL) @@ -59,8 +60,6 @@ public: std::cout << " factorization method: LDL\n"; else if(factorization == QR) std::cout << " factorization method: QR\n"; - else if(factorization == CHOLESKY) - std::cout << " factorization method: CHOLESKY\n"; else std::cout << " factorization method: (invalid)\n"; @@ -75,7 +74,8 @@ class GaussNewtonOptimizer : public NonlinearOptimizer { public: - typedef boost::shared_ptr SharedGNParams; + typedef boost::shared_ptr SharedGNParams; + typedef boost::shared_ptr SharedOrdering; /// @name Standard interface /// @{ @@ -89,12 +89,16 @@ public: * @param params The optimization parameters */ GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& values, - const GaussNewtonParams& params) : + const GaussNewtonParams& params = GaussNewtonParams(), + const Ordering& ordering = Ordering()) : NonlinearOptimizer( SharedGraph(new NonlinearFactorGraph(graph)), SharedValues(new Values(values)), SharedGNParams(new GaussNewtonParams(params))), - gnParams_(boost::static_pointer_cast(params_)) {} + gnParams_(boost::static_pointer_cast(params_)), + colamdOrdering_(ordering.size() == 0), + ordering_(colamdOrdering_ ? + graph_->orderingCOLAMD(*values_) : Ordering::shared_ptr(new Ordering(ordering))) {} /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. @@ -103,8 +107,12 @@ public: * @param params The optimization parameters */ GaussNewtonOptimizer(const SharedGraph& graph, const SharedValues& values, - const SharedGNParams& params) : - NonlinearOptimizer(graph, values, params), gnParams_(params) {} + const SharedGNParams& params = SharedGNParams(), + const SharedOrdering& ordering = SharedOrdering()) : + NonlinearOptimizer(graph, values, params ? params : SharedGNParams(new GaussNewtonParams())), + gnParams_(boost::static_pointer_cast(params_)), + colamdOrdering_(!ordering || ordering->size() == 0), + ordering_(colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : ordering) {} /// @} @@ -126,16 +134,24 @@ public: * NonlinearOptimzier object, the original is not modified. */ virtual auto_ptr update( - const SharedGraph& newGraph = SharedGraph(), + const SharedGraph& newGraph, const SharedValues& newValues = SharedValues(), const SharedParams& newParams = SharedParams()) const { - return new GaussNewtonOptimizer(*this, newGraph, newValues, - boost::dynamic_pointer_cast(newParams)); + return auto_ptr(new GaussNewtonOptimizer(*this, newGraph, newValues, + boost::dynamic_pointer_cast(newParams))); } + /** Update the ordering, leaving all other state the same. If newOrdering + * is an empty pointer or contains an empty Ordering object + * (with zero size), a COLAMD ordering will be computed. Returns a new + * NonlinearOptimizer object, the original is not modified. + */ + virtual auto_ptr update(const SharedOrdering& newOrdering) const { + return auto_ptr(new GaussNewtonOptimizer(*this, newOrdering)); } + /** Create a copy of the NonlinearOptimizer */ virtual auto_ptr clone() const { - return new GaussNewtonOptimizer(*this); + return auto_ptr(new GaussNewtonOptimizer(*this)); } /// @} @@ -143,16 +159,41 @@ public: protected: const SharedGNParams gnParams_; + const bool colamdOrdering_; + const SharedOrdering ordering_; - GaussNewtonOptimizer(const SharedGraph& graph, const SharedValues& values, - const SharedGNParams& params, double error, int iterations) : - NonlinearOptimizer(graph, values, params, error, iterations), gnParams_(params) {} - + /** Protected constructor called by update() to modify the graph, values, or + * parameters. Computes a COLAMD ordering if the optimizer was originally + * constructed with an empty ordering, and if the graph is changing. + */ GaussNewtonOptimizer(const GaussNewtonOptimizer& original, const SharedGraph& newGraph, const SharedValues& newValues, const SharedGNParams& newParams) : NonlinearOptimizer(original, newGraph, newValues, newParams), - gnParams_(newParams ? newParams : original.gnParams_) {} + gnParams_(newParams ? newParams : original.gnParams_), + colamdOrdering_(original.colamdOrdering_), + ordering_(newGraph && colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : original.ordering_) {} + /** Protected constructor called by update() to modify the ordering, computing + * a COLAMD ordering if the new ordering is empty, and also recomputing the + * dimensions. + */ + GaussNewtonOptimizer( + const GaussNewtonOptimizer& original, const SharedOrdering& newOrdering) : + NonlinearOptimizer(original), + gnParams_(original.gnParams_), + colamdOrdering_(!newOrdering || newOrdering->size() == 0), + ordering_(colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : newOrdering) {} + +private: + + // Special constructor for completing an iteration, updates the values and + // error, and increments the iteration count. + GaussNewtonOptimizer(const GaussNewtonOptimizer& original, + const SharedValues& newValues, double newError) : + NonlinearOptimizer(graph_, newValues, params_, newError, iterations_+1), + gnParams_(original.gnParams_), + colamdOrdering_(original.colamdOrdering_), + ordering_(original.ordering_) {} }; } diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index c599b1830..dd84bff1c 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -17,6 +17,8 @@ */ #include + +#include // For NegativeMatrixException #include #include @@ -27,75 +29,72 @@ namespace gtsam { NonlinearOptimizer::auto_ptr LevenbergMarquardtOptimizer::iterate() const { // Linearize graph - GaussianFactorGraph::shared_ptr linear = graph_->linearize(values_, gnParams_->ordering); + GaussianFactorGraph::shared_ptr linear = graph_->linearize(*values_, *ordering_); // Check whether to use QR - const bool useQR; - if(gnParams_->factorization == LevenbergMarquardtParams::LDL) + bool useQR; + if(lmParams_->factorization == LevenbergMarquardtParams::LDL) useQR = false; - else if(gnParams_->factorization == LevenbergMarquardtParams::QR) + else if(lmParams_->factorization == LevenbergMarquardtParams::QR) useQR = true; else throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::factorization"); - // Optimize - VectorValues::shared_ptr delta; - if(gnParams_->elimination == MULTIFRONTAL) - delta = GaussianMultifrontalSolver(*linear, useQR).optimize(); - else if(gnParams_->elimination == SEQUENTIAL) - delta = GaussianSequentialSolver(*linear, useQR).optimize(); - else - throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination"); + // Pull out parameters we'll use + const NonlinearOptimizerParams::Verbosity nloVerbosity = params_->verbosity; + const LevenbergMarquardtParams::LMVerbosity lmVerbosity = lmParams_->lmVerbosity; + const double lambdaFactor = lmParams_->lambdaFactor; - - const NonlinearOptimizerParams::Verbosity verbosity = params_->verbosity; - const double lambdaFactor = parameters_->lambdaFactor_ ; - double lambda = params_->lambda; - - double next_error = error_; - SharedValues next_values = values_; + // Variables to update during try_lambda loop + double lambda = lmParams_->lambdaInitial; + double next_error = error(); + SharedValues next_values = values(); // Keep increasing lambda until we make make progress while(true) { - if (verbosity >= Parameters::TRYLAMBDA) cout << "trying lambda = " << lambda << endl; + if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) + cout << "trying lambda = " << lambda << endl; - // add prior-factors + // Add prior-factors // TODO: replace this dampening with a backsubstitution approach - typename L::shared_ptr dampedSystem(new L(linearSystem)); + GaussianFactorGraph dampedSystem(*linear); { double sigma = 1.0 / sqrt(lambda); - dampedSystem->reserve(dampedSystem->size() + dimensions_->size()); + dampedSystem.reserve(dampedSystem.size() + dimensions_->size()); // for each of the variables, add a prior for(Index j=0; jsize(); ++j) { size_t dim = (*dimensions_)[j]; Matrix A = eye(dim); Vector b = zero(dim); SharedDiagonal model = noiseModel::Isotropic::Sigma(dim,sigma); - typename L::sharedFactor prior(new JacobianFactor(j, A, b, model)); - dampedSystem->push_back(prior); + GaussianFactor::shared_ptr prior(new JacobianFactor(j, A, b, model)); + dampedSystem.push_back(prior); } } - if (verbosity >= Parameters::DAMPED) dampedSystem->print("damped"); - - // Create a new solver using the damped linear system - // FIXME: remove spcg specific code - if (spcg_solver_) spcg_solver_->replaceFactors(dampedSystem); - shared_solver solver = (spcg_solver_) ? spcg_solver_ : shared_solver( - new S(dampedSystem, structure_, parameters_->useQR_)); + if (lmVerbosity >= LevenbergMarquardtParams::DAMPED) dampedSystem.print("damped"); // Try solving try { - VectorValues delta = *solver->optimize(); - if (verbosity >= Parameters::TRYLAMBDA) cout << "linear delta norm = " << delta.vector().norm() << endl; - if (verbosity >= Parameters::TRYDELTA) delta.print("delta"); + + // Optimize + VectorValues::shared_ptr delta; + if(lmParams_->elimination == LevenbergMarquardtParams::MULTIFRONTAL) + delta = GaussianMultifrontalSolver(dampedSystem, useQR).optimize(); + else if(lmParams_->elimination == LevenbergMarquardtParams::SEQUENTIAL) + delta = GaussianSequentialSolver(dampedSystem, useQR).optimize(); + else + throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination"); + + if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "linear delta norm = " << delta->vector().norm() << endl; + if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA) delta->print("delta"); // update values - shared_values newValues(new Values(values_->retract(delta, *ordering_))); + SharedValues newValues(new Values(values_->retract(*delta, *ordering_))); // create new optimization state with more adventurous lambda double error = graph_->error(*newValues); - if (verbosity >= Parameters::TRYLAMBDA) cout << "next error = " << error << endl; + if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "next error = " << error << endl; if( error <= error_ ) { next_values = newValues; @@ -107,49 +106,39 @@ NonlinearOptimizer::auto_ptr LevenbergMarquardtOptimizer::iterate() const { // Either we're not cautious, or the same lambda was worse than the current error. // The more adventurous lambda was worse too, so make lambda more conservative // and keep the same values. - if(lambdaMode >= Parameters::BOUNDED && lambda >= 1.0e5) { - if(verbosity >= Parameters::ERROR) + if(lambda >= lmParams_->lambdaUpperBound) { + if(nloVerbosity >= NonlinearOptimizerParams::ERROR) cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; break; } else { - lambda *= factor; + lambda *= lambdaFactor; } } } catch(const NegativeMatrixException& e) { - if(verbosity >= Parameters::LAMBDA) + if(lmVerbosity >= LevenbergMarquardtParams::LAMBDA) cout << "Negative matrix, increasing lambda" << endl; // Either we're not cautious, or the same lambda was worse than the current error. // The more adventurous lambda was worse too, so make lambda more conservative // and keep the same values. - if(lambdaMode >= Parameters::BOUNDED && lambda >= 1.0e5) { - if(verbosity >= Parameters::ERROR) + if(lambda >= lmParams_->lambdaUpperBound) { + if(nloVerbosity >= NonlinearOptimizerParams::ERROR) cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; break; } else { - lambda *= factor; + lambda *= lambdaFactor; } } catch(...) { throw; } } // end while - return newValuesErrorLambda_(next_values, next_error, lambda); - - // Maybe show output - if(params_->verbosity >= NonlinearOptimizerParams::DELTA) delta->print("delta"); - - // Update values - SharedValues newValues(new Values(values_->retract(*delta, gnParams_->ordering))); - double newError = graph_->error(newValues); - - // Maybe show output - if (params_->verbosity >= NonlinearOptimizerParams::VALUES) newValues->print("newValues"); - if (params_->verbosity >= NonlinearOptimizerParams::ERROR) cout << "error: " << newError << endl; + if (params_->verbosity >= NonlinearOptimizerParams::VALUES) next_values->print("newValues"); + if (params_->verbosity >= NonlinearOptimizerParams::ERROR) cout << "error: " << next_error << endl; // Create new optimizer with new values and new error - auto_ptr newOptimizer(new LevenbergMarquardtOptimizer( - graph_, newValues, gnParams_, newError, iterations_+1)); + NonlinearOptimizer::auto_ptr newOptimizer(new LevenbergMarquardtOptimizer( + *this, next_values, next_error, lambda)); return newOptimizer; } diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index c1535017d..20abf1fb7 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -22,8 +22,10 @@ namespace gtsam { -/** Parameters for Levenberg-Marquardt optimization, inherits from - * NonlinearOptimizationParams. +/** Parameters for Levenberg-Marquardt optimization. Note that this parameters + * class inherits from NonlinearOptimizerParams, which specifies the parameters + * common to all nonlinear optimization algorithms. This class also contains + * all of those parameters. */ class LevenbergMarquardtParams : public NonlinearOptimizerParams { public: @@ -41,18 +43,26 @@ public: /** See LevenbergMarquardtParams::lmVerbosity */ enum LMVerbosity { - + SILENT, + LAMBDA, + TRYLAMBDA, + TRYCONFIG, + TRYDELTA, + DAMPED }; Elimination elimination; ///< The elimination algorithm to use (default: MULTIFRONTAL) Factorization factorization; ///< The numerical factorization (default: LDL) - Ordering::shared_ptr ordering; ///< The variable elimination ordering (default: empty -> COLAMD) - double lambda; ///< The initial (and current after each iteration) Levenberg-Marquardt damping term (default: 1e-5) + double lambdaInitial; ///< The initial (and current after each iteration) Levenberg-Marquardt damping term (default: 1e-5) double lambdaFactor; ///< The amount by which to multiply or divide lambda when adjusting lambda (default: 10.0) double lambdaUpperBound; ///< The maximum lambda to try before assuming the optimization has failed (default: 1e5) + LMVerbosity lmVerbosity; ///< The verbosity level for Levenberg-Marquardt (default: SILENT), see also NonlinearOptimizerParams::verbosity LevenbergMarquardtParams() : - elimination(MULTIFRONTAL), factorization(LDL), lambda(1e-5), lambdaFactor(10.0) {} + elimination(MULTIFRONTAL), factorization(LDL), lambdaInitial(1e-5), + lambdaFactor(10.0), lambdaUpperBound(1e5), lmVerbosity(SILENT) {} + + virtual ~LevenbergMarquardtParams() {} virtual void print(const std::string& str = "") const { NonlinearOptimizerParams::print(str); @@ -67,13 +77,12 @@ public: std::cout << " factorization method: LDL\n"; else if(factorization == QR) std::cout << " factorization method: QR\n"; - else if(factorization == CHOLESKY) - std::cout << " factorization method: CHOLESKY\n"; else std::cout << " factorization method: (invalid)\n"; - std::cout << " lambda: " << lambda << "\n"; + std::cout << " lambdaInitial: " << lambdaInitial << "\n"; std::cout << " lambdaFactor: " << lambdaFactor << "\n"; + std::cout << " lambdaUpperBound: " << lambdaUpperBound << "\n"; std::cout.flush(); } @@ -86,7 +95,8 @@ class LevenbergMarquardtOptimizer : public NonlinearOptimizer { public: - typedef boost::shared_ptr SharedLMParams; + typedef boost::shared_ptr SharedLMParams; + typedef boost::shared_ptr SharedOrdering; /// @name Standard interface /// @{ @@ -100,12 +110,18 @@ public: * @param params The optimization parameters */ LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& values, - const LevenbergMarquardtParams& params) : + const LevenbergMarquardtParams& params = LevenbergMarquardtParams(), + const Ordering& ordering = Ordering()) : NonlinearOptimizer( SharedGraph(new NonlinearFactorGraph(graph)), SharedValues(new Values(values)), SharedLMParams(new LevenbergMarquardtParams(params))), - lmParams_(boost::static_pointer_cast(params_)) {} + lmParams_(boost::static_pointer_cast(params_)), + colamdOrdering_(ordering.size() == 0), + ordering_(colamdOrdering_ ? + graph_->orderingCOLAMD(*values_) : Ordering::shared_ptr(new Ordering(ordering))), + dimensions_(new vector(values_->dims(*ordering_))), + lambda_(lmParams_->lambdaInitial) {} /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. @@ -114,8 +130,14 @@ public: * @param params The optimization parameters */ LevenbergMarquardtOptimizer(const SharedGraph& graph, const SharedValues& values, - const SharedLMParams& params) : - NonlinearOptimizer(graph, values, params), lmParams_(params) {} + const SharedLMParams& params = SharedLMParams(), + const SharedOrdering& ordering = SharedOrdering()) : + NonlinearOptimizer(graph, values, params ? params : SharedLMParams(new LevenbergMarquardtParams())), + lmParams_(boost::static_pointer_cast(params_)), + colamdOrdering_(!ordering || ordering->size() == 0), + ordering_(colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : ordering), + dimensions_(new vector(values_->dims(*ordering_))), + lambda_(lmParams_->lambdaInitial) {} /// @} @@ -133,37 +155,98 @@ public: /** Update the graph, values, and/or parameters, leaving all other state * the same. Any of these that are empty shared pointers are left unchanged - * in the returned optimizer object. Returns a new updated - * NonlinearOptimzier object, the original is not modified. + * in the returned optimizer object. Returns a new updated NonlinearOptimzier + * object, the original is not modified. */ virtual auto_ptr update( - const SharedGraph& newGraph = SharedGraph(), + const SharedGraph& newGraph, const SharedValues& newValues = SharedValues(), const SharedParams& newParams = SharedParams()) const { - return new LevenbergMarquardtOptimizer(*this, newGraph, newValues, - boost::dynamic_pointer_cast(newParams)); + return auto_ptr(new LevenbergMarquardtOptimizer(*this, newGraph, newValues, + boost::dynamic_pointer_cast(newParams))); } + /** Update the ordering, leaving all other state the same. If newOrdering + * is an empty pointer or contains an empty Ordering object + * (with zero size), a COLAMD ordering will be computed. Returns a new + * NonlinearOptimizer object, the original is not modified. + */ + virtual auto_ptr update(const SharedOrdering& newOrdering) const { + return auto_ptr(new LevenbergMarquardtOptimizer(*this, newOrdering)); } + + /** Update the damping factor lambda, leaving all other state the same. + * Returns a new NonlinearOptimizer object, the original is not modified. + */ + virtual auto_ptr update(double newLambda) const { + return auto_ptr(new LevenbergMarquardtOptimizer(*this, newLambda)); } + /** Create a copy of the NonlinearOptimizer */ virtual auto_ptr clone() const { - return new LevenbergMarquardtOptimizer(*this); + return auto_ptr(new LevenbergMarquardtOptimizer(*this)); } /// @} protected: + typedef boost::shared_ptr > SharedDimensions; + const SharedLMParams lmParams_; + const bool colamdOrdering_; + const SharedOrdering ordering_; + const SharedDimensions dimensions_; + const double lambda_; - LevenbergMarquardtOptimizer(const SharedGraph& graph, const SharedValues& values, - const SharedLMParams& params, double error, int iterations) : - NonlinearOptimizer(graph, values, params, error, iterations), lmParams_(params) {} - + /** Protected constructor called by update() to modify the graph, values, or + * parameters. Computes a COLAMD ordering if the optimizer was originally + * constructed with an empty ordering, and if the graph is changing. + * Recomputes the dimensions if the values are changing. + */ LevenbergMarquardtOptimizer(const LevenbergMarquardtOptimizer& original, const SharedGraph& newGraph, const SharedValues& newValues, const SharedLMParams& newParams) : - NonlinearOptimizer(original, newGraph, newValues, newParams), - lmParams_(newParams ? newParams : original.lmParams_) {} + NonlinearOptimizer(original, newGraph, newValues, newParams), + lmParams_(newParams ? newParams : original.lmParams_), + colamdOrdering_(original.colamdOrdering_), + ordering_(newGraph && colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : original.ordering_), + dimensions_(newValues ? + SharedDimensions(new std::vector(values_->dims(*ordering_))) : original.dimensions_), + lambda_(original.lambda_) {} + /** Protected constructor called by update() to modify the ordering, computing + * a COLAMD ordering if the new ordering is empty, and also recomputing the + * dimensions. + */ + LevenbergMarquardtOptimizer( + const LevenbergMarquardtOptimizer& original, const SharedOrdering& newOrdering) : + NonlinearOptimizer(original), + lmParams_(original.lmParams_), + colamdOrdering_(!newOrdering || newOrdering->size() == 0), + ordering_(colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : newOrdering), + dimensions_(new std::vector(values_->dims(*ordering_))), + lambda_(original.lambda_) {} + + /** Protected constructor called by update() to modify lambda. */ + LevenbergMarquardtOptimizer( + const LevenbergMarquardtOptimizer& original, double newLambda) : + NonlinearOptimizer(original), + lmParams_(original.lmParams_), + colamdOrdering_(original.colamdOrdering_), + ordering_(original.ordering_), + dimensions_(original.dimensions_), + lambda_(newLambda) {} + +private: + + // Special constructor for completing an iteration, updates the values and + // error, and increments the iteration count. + LevenbergMarquardtOptimizer(const LevenbergMarquardtOptimizer& original, + const SharedValues& newValues, double newError, double newLambda) : + NonlinearOptimizer(graph_, newValues, params_, newError, iterations_+1), + lmParams_(original.lmParams_), + colamdOrdering_(original.colamdOrdering_), + ordering_(original.ordering_), + dimensions_(original.dimensions_), + lambda_(newLambda) {} }; } diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index e0c10bfe9..ed5542df3 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -25,14 +25,14 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -auto_ptr NonlinearOptimizer::defaultOptimize() const { +NonlinearOptimizer::auto_ptr NonlinearOptimizer::defaultOptimize() const { double currentError = this->error(); // check if we're already close enough if(currentError <= params_->errorTol) { if (params_->verbosity >= NonlinearOptimizerParams::ERROR) - cout << "Exiting, as error = " << currentError << " < " << errorTol << endl; + cout << "Exiting, as error = " << currentError << " < " << params_->errorTol << endl; return this->clone(); } diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index dba87d6fb..dd81ced31 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -27,18 +27,13 @@ namespace gtsam { */ class NonlinearOptimizerParams { public: - /** See NonlinearOptimizationParams::verbosity */ + /** See NonlinearOptimizerParams::verbosity */ enum Verbosity { SILENT, ERROR, - LAMBDA, - TRYLAMBDA, VALUES, DELTA, - TRYCONFIG, - TRYDELTA, - LINEAR, - DAMPED + LINEAR }; int maxIterations; ///< The maximum iterations to stop iterating (default 100) @@ -52,7 +47,7 @@ public: errorTol(0.0), verbosity(SILENT) {} virtual void print(const std::string& str = "") const { - std::cout << s << "\n"; + std::cout << str << "\n"; std::cout << "relative decrease threshold: " << relativeErrorTol << "\n"; std::cout << "absolute decrease threshold: " << absoluteErrorTol << "\n"; std::cout << " total error threshold: " << errorTol << "\n"; @@ -60,7 +55,7 @@ public: std::cout << " verbosity level: " << verbosity << std::endl; } - virtual ~NonlinearOptimizationParams() {} + virtual ~NonlinearOptimizerParams() {} }; @@ -112,7 +107,7 @@ class NonlinearOptimizer { public: /** An auto pointer to this class */ - typedef std::auto_ptr auto_ptr; + typedef std::auto_ptr auto_ptr; /** A const shared_ptr to a NonlinearFactorGraph */ typedef boost::shared_ptr SharedGraph; From 9312b0a1285854bc6ff853e027454ec3ae078bff Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Tue, 28 Feb 2012 20:55:50 +0000 Subject: [PATCH 05/57] Updating unit tests for new NonlinearOptimizer --- .cproject | 3 +- .project | 2 +- gtsam/nonlinear/GaussNewtonOptimizer.h | 5 +- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 5 +- gtsam/slam/planarSLAM.cpp | 2 - gtsam/slam/planarSLAM.h | 9 +--- gtsam/slam/pose2SLAM.cpp | 2 - gtsam/slam/pose2SLAM.h | 14 +----- gtsam/slam/pose3SLAM.h | 3 -- gtsam/slam/tests/testGeneralSFMFactor.cpp | 48 +++++++------------ .../testGeneralSFMFactor_Cal3Bundler.cpp | 47 +++++++----------- gtsam/slam/tests/testPose2SLAM.cpp | 25 +++++----- gtsam/slam/tests/testPose3SLAM.cpp | 11 ++--- gtsam/slam/tests/testStereoFactor.cpp | 37 +++++--------- gtsam/slam/tests/testVSLAM.cpp | 34 +++++++------ gtsam/slam/visualSLAM.cpp | 1 - gtsam/slam/visualSLAM.h | 3 -- tests/testBoundingConstraint.cpp | 37 +++++++------- 18 files changed, 107 insertions(+), 181 deletions(-) diff --git a/.cproject b/.cproject index 108a70799..243637d65 100644 --- a/.cproject +++ b/.cproject @@ -21,7 +21,7 @@ - + @@ -2186,4 +2186,5 @@ + diff --git a/.project b/.project index c4d531b04..9856df2ea 100644 --- a/.project +++ b/.project @@ -23,7 +23,7 @@ org.eclipse.cdt.make.core.buildArguments - + -j5 org.eclipse.cdt.make.core.buildCommand diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.h b/gtsam/nonlinear/GaussNewtonOptimizer.h index e4b4c31bb..3b1e69481 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.h +++ b/gtsam/nonlinear/GaussNewtonOptimizer.h @@ -69,6 +69,7 @@ public: /** * This class performs Gauss-Newton nonlinear optimization + * TODO: use make_shared */ class GaussNewtonOptimizer : public NonlinearOptimizer { @@ -107,9 +108,9 @@ public: * @param params The optimization parameters */ GaussNewtonOptimizer(const SharedGraph& graph, const SharedValues& values, - const SharedGNParams& params = SharedGNParams(), + const GaussNewtonParams& params = GaussNewtonParams(), const SharedOrdering& ordering = SharedOrdering()) : - NonlinearOptimizer(graph, values, params ? params : SharedGNParams(new GaussNewtonParams())), + NonlinearOptimizer(graph, values, SharedGNParams(new GaussNewtonParams(params))), gnParams_(boost::static_pointer_cast(params_)), colamdOrdering_(!ordering || ordering->size() == 0), ordering_(colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : ordering) {} diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index 20abf1fb7..b2605b968 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -90,6 +90,7 @@ public: /** * This class performs Levenberg-Marquardt nonlinear optimization + * TODO: use make_shared */ class LevenbergMarquardtOptimizer : public NonlinearOptimizer { @@ -130,9 +131,9 @@ public: * @param params The optimization parameters */ LevenbergMarquardtOptimizer(const SharedGraph& graph, const SharedValues& values, - const SharedLMParams& params = SharedLMParams(), + const LevenbergMarquardtParams& params = LevenbergMarquardtParams(), const SharedOrdering& ordering = SharedOrdering()) : - NonlinearOptimizer(graph, values, params ? params : SharedLMParams(new LevenbergMarquardtParams())), + NonlinearOptimizer(graph, values, SharedLMParams(new LevenbergMarquardtParams(params))), lmParams_(boost::static_pointer_cast(params_)), colamdOrdering_(!ordering || ordering->size() == 0), ordering_(colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : ordering), diff --git a/gtsam/slam/planarSLAM.cpp b/gtsam/slam/planarSLAM.cpp index db3d6b146..5e38acbd2 100644 --- a/gtsam/slam/planarSLAM.cpp +++ b/gtsam/slam/planarSLAM.cpp @@ -17,8 +17,6 @@ #include #include -#include -#include // Use planarSLAM namespace for specific SLAM instance namespace planarSLAM { diff --git a/gtsam/slam/planarSLAM.h b/gtsam/slam/planarSLAM.h index 33dc97a8c..bd7de3a91 100644 --- a/gtsam/slam/planarSLAM.h +++ b/gtsam/slam/planarSLAM.h @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include @@ -110,15 +110,10 @@ namespace planarSLAM { /// Optimize Values optimize(const Values& initialEstimate) { - typedef NonlinearOptimizer Optimizer; - return *Optimizer::optimizeLM(*this, initialEstimate, - NonlinearOptimizationParameters::LAMBDA); + return *LevenbergMarquardtOptimizer(*this, initialEstimate).optimize()->values(); } }; - /// Optimizer - typedef NonlinearOptimizer Optimizer; - } // planarSLAM diff --git a/gtsam/slam/pose2SLAM.cpp b/gtsam/slam/pose2SLAM.cpp index b006c61f2..a500ed22e 100644 --- a/gtsam/slam/pose2SLAM.cpp +++ b/gtsam/slam/pose2SLAM.cpp @@ -21,8 +21,6 @@ // Use pose2SLAM namespace for specific SLAM instance - template class gtsam::NonlinearOptimizer; - namespace pose2SLAM { /* ************************************************************************* */ diff --git a/gtsam/slam/pose2SLAM.h b/gtsam/slam/pose2SLAM.h index 121e0c062..6e448b4ee 100644 --- a/gtsam/slam/pose2SLAM.h +++ b/gtsam/slam/pose2SLAM.h @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include @@ -97,20 +97,10 @@ namespace pose2SLAM { /// Optimize Values optimize(const Values& initialEstimate) { - typedef NonlinearOptimizer Optimizer; - return *Optimizer::optimizeLM(*this, initialEstimate, - NonlinearOptimizationParameters::LAMBDA); + return *LevenbergMarquardtOptimizer(*this, initialEstimate).optimize()->values(); } }; - /// The sequential optimizer - typedef NonlinearOptimizer OptimizerSequential; - - /// The multifrontal optimizer - typedef NonlinearOptimizer Optimizer; - } // pose2SLAM diff --git a/gtsam/slam/pose3SLAM.h b/gtsam/slam/pose3SLAM.h index eb4eb9573..c394d800b 100644 --- a/gtsam/slam/pose3SLAM.h +++ b/gtsam/slam/pose3SLAM.h @@ -61,9 +61,6 @@ namespace gtsam { void addHardConstraint(Index i, const Pose3& p); }; - /// Optimizer - typedef NonlinearOptimizer Optimizer; - } // pose3SLAM /** diff --git a/gtsam/slam/tests/testGeneralSFMFactor.cpp b/gtsam/slam/tests/testGeneralSFMFactor.cpp index 7a8dc4825..5915fd4ff 100644 --- a/gtsam/slam/tests/testGeneralSFMFactor.cpp +++ b/gtsam/slam/tests/testGeneralSFMFactor.cpp @@ -16,7 +16,7 @@ using namespace boost; #include #include #include -#include +#include #include #include #include @@ -62,8 +62,6 @@ double getGaussian() return sqrt(-2.0f * (double)log(S) / S) * V1; } -typedef NonlinearOptimizer Optimizer; - const SharedNoiseModel sigma1(noiseModel::Unit::Create(1)); /* ************************************************************************* */ @@ -175,11 +173,9 @@ TEST( GeneralSFMFactor, optimize_defaultK ) { // Create an ordering of the variables shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-5, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - EXPECT(optimizer2.error() < 0.5 * 1e-5 * nMeasurements); + NonlinearOptimizer::auto_ptr optimizer = + LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + EXPECT(optimizer->error() < 0.5 * 1e-5 * nMeasurements); } /* ************************************************************************* */ @@ -220,11 +216,9 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) { const double reproj_error = 1e-5; shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-5, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements); + NonlinearOptimizer::auto_ptr optimizer = + LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -263,12 +257,9 @@ TEST( GeneralSFMFactor, optimize_varK_FixCameras ) { const double reproj_error = 1e-5 ; shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-3, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements); + NonlinearOptimizer::auto_ptr optimizer = + LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -323,13 +314,9 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) { const double reproj_error = 1e-5 ; shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-3, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - - EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements); + NonlinearOptimizer::auto_ptr optimizer = + LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -366,12 +353,9 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) { const double reproj_error = 1e-5 ; shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-2, 1e-2, 0.0, 100, 1e-5, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements); + NonlinearOptimizer::auto_ptr optimizer = + LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ diff --git a/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp b/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp index f762505a3..7f327b2fd 100644 --- a/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp +++ b/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp @@ -16,7 +16,7 @@ using namespace boost; #include #include #include -#include +#include #include #include #include @@ -63,8 +63,6 @@ double getGaussian() return sqrt(-2.0f * (double)log(S) / S) * V1; } -typedef NonlinearOptimizer Optimizer; - const SharedNoiseModel sigma1(noiseModel::Unit::Create(1)); /* ************************************************************************* */ @@ -177,11 +175,9 @@ TEST( GeneralSFMFactor, optimize_defaultK ) { // Create an ordering of the variables shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-5, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - EXPECT(optimizer2.error() < 0.5 * 1e-5 * nMeasurements); + NonlinearOptimizer::auto_ptr optimizer = + LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + EXPECT(optimizer->error() < 0.5 * 1e-5 * nMeasurements); } /* ************************************************************************* */ @@ -222,11 +218,9 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) { const double reproj_error = 1e-5; shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-5, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements); + NonlinearOptimizer::auto_ptr optimizer = + LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -265,12 +259,9 @@ TEST( GeneralSFMFactor, optimize_varK_FixCameras ) { const double reproj_error = 1e-5 ; shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-3, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements); + NonlinearOptimizer::auto_ptr optimizer = + LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -321,12 +312,9 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) { const double reproj_error = 1e-5 ; shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-3, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements); + NonlinearOptimizer::auto_ptr optimizer = + LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -363,12 +351,9 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) { const double reproj_error = 1e-5 ; shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-5, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements); + NonlinearOptimizer::auto_ptr optimizer = + LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ diff --git a/gtsam/slam/tests/testPose2SLAM.cpp b/gtsam/slam/tests/testPose2SLAM.cpp index 81ce20334..1abc8e476 100644 --- a/gtsam/slam/tests/testPose2SLAM.cpp +++ b/gtsam/slam/tests/testPose2SLAM.cpp @@ -160,17 +160,16 @@ TEST(Pose2Graph, optimize) { // Choose an ordering and optimize shared_ptr ordering(new Ordering); *ordering += kx0, kx1; - typedef NonlinearOptimizer Optimizer; - NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-15, 1e-15); - Optimizer optimizer0(fg, initial, ordering, params); - Optimizer optimizer = optimizer0.levenbergMarquardt(); + LevenbergMarquardtParams params; + params.relativeErrorTol = 1e-15; + NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(fg, initial, params, ordering).optimize(); // Check with expected config Values expected; expected.insert(pose2SLAM::PoseKey(0), Pose2(0,0,0)); expected.insert(pose2SLAM::PoseKey(1), Pose2(1,2,M_PI_2)); - CHECK(assert_equal(expected, *optimizer.values())); + CHECK(assert_equal(expected, *optimizer->values())); } /* ************************************************************************* */ @@ -200,11 +199,11 @@ TEST(Pose2Graph, optimizeThreePoses) { *ordering += kx0,kx1,kx2; // optimize - NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-15, 1e-15); - pose2SLAM::Optimizer optimizer0(fg, initial, ordering, params); - pose2SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt(); + LevenbergMarquardtParams params; + params.relativeErrorTol = 1e-15; + NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(fg, initial, params, ordering).optimize(); - Values actual = *optimizer.values(); + Values actual = *optimizer->values(); // Check with ground truth CHECK(assert_equal((const Values&)hexagon, actual)); @@ -243,11 +242,11 @@ TEST_UNSAFE(Pose2SLAM, optimizeCircle) { *ordering += kx0,kx1,kx2,kx3,kx4,kx5; // optimize - NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-15, 1e-15); - pose2SLAM::Optimizer optimizer0(fg, initial, ordering, params); - pose2SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt(); + LevenbergMarquardtParams params; + params.relativeErrorTol = 1e-15; + NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(fg, initial, params, ordering).optimize(); - Values actual = *optimizer.values(); + Values actual = *optimizer->values(); // Check with ground truth CHECK(assert_equal((const Values&)hexagon, actual)); diff --git a/gtsam/slam/tests/testPose3SLAM.cpp b/gtsam/slam/tests/testPose3SLAM.cpp index 2edf6c4f0..5759b1562 100644 --- a/gtsam/slam/tests/testPose3SLAM.cpp +++ b/gtsam/slam/tests/testPose3SLAM.cpp @@ -30,6 +30,7 @@ using namespace boost::assign; #include #include +#include using namespace std; using namespace gtsam; @@ -76,11 +77,9 @@ TEST(Pose3Graph, optimizeCircle) { // Choose an ordering and optimize shared_ptr ordering(new Ordering); *ordering += kx0,kx1,kx2,kx3,kx4,kx5; - NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-15, 1e-15); - pose3SLAM::Optimizer optimizer0(fg, initial, ordering, params); - pose3SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt(); - Values actual = *optimizer.values(); + Values actual = *LevenbergMarquardtOptimizer( + fg, initial, LevenbergMarquardtOptimizer::SharedLMParams(), ordering).optimize()->values(); // Check with ground truth CHECK(assert_equal(hexagon, actual,1e-4)); @@ -115,7 +114,7 @@ TEST(Pose3Graph, partial_prior_height) { // linearization EXPECT_DOUBLES_EQUAL(2.0, height.error(values), tol); - Values actual = *pose3SLAM::Optimizer::optimizeLM(graph, values); + Values actual = *LevenbergMarquardtOptimizer(graph, values).optimize()->values(); EXPECT(assert_equal(expected, actual.at(key), tol)); EXPECT_DOUBLES_EQUAL(0.0, graph.error(actual), tol); } @@ -167,7 +166,7 @@ TEST(Pose3Graph, partial_prior_xy) { Values values; values.insert(key, init); - Values actual = *pose3SLAM::Optimizer::optimizeLM(graph, values); + Values actual = *LevenbergMarquardtOptimizer(graph, values).optimize()->values(); EXPECT(assert_equal(expected, actual.at(key), tol)); EXPECT_DOUBLES_EQUAL(0.0, graph.error(actual), tol); } diff --git a/gtsam/slam/tests/testStereoFactor.cpp b/gtsam/slam/tests/testStereoFactor.cpp index fc026fb35..d101708b3 100644 --- a/gtsam/slam/tests/testStereoFactor.cpp +++ b/gtsam/slam/tests/testStereoFactor.cpp @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include @@ -50,45 +50,34 @@ TEST( StereoFactor, singlePoint) { //Cal3_S2 K(625, 625, 0, 320, 240, 0.5); boost::shared_ptr K(new Cal3_S2Stereo(625, 625, 0, 320, 240, 0.5)); - boost::shared_ptr graph(new visualSLAM::Graph()); + NonlinearFactorGraph graph; - graph->add(visualSLAM::PoseConstraint(PoseKey(1),camera1)); + graph.add(visualSLAM::PoseConstraint(PoseKey(1),camera1)); StereoPoint2 z14(320,320.0-50, 240); // arguments: measurement, sigma, cam#, measurement #, K, baseline (m) - graph->add(visualSLAM::StereoFactor(z14,sigma, PoseKey(1), PointKey(1), K)); + graph.add(visualSLAM::StereoFactor(z14,sigma, PoseKey(1), PointKey(1), K)); // Create a configuration corresponding to the ground truth - boost::shared_ptr values(new Values()); - values->insert(PoseKey(1), camera1); // add camera at z=6.25m looking towards origin + Values values(new Values()); + values.insert(PoseKey(1), camera1); // add camera at z=6.25m looking towards origin Point3 l1(0, 0, 0); - values->insert(PointKey(1), l1); // add point at origin; + values.insert(PointKey(1), l1); // add point at origin; - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values); - - typedef gtsam::NonlinearOptimizer Optimizer; // optimization engine for this domain - - double absoluteThreshold = 1e-9; - double relativeThreshold = 1e-5; - int maxIterations = 100; - NonlinearOptimizationParameters::verbosityLevel verbose = NonlinearOptimizationParameters::SILENT; - NonlinearOptimizationParameters parameters(absoluteThreshold, relativeThreshold, 0, - maxIterations, 1.0, 10, verbose, NonlinearOptimizationParameters::BOUNDED); - - Optimizer optimizer(graph, values, ordering, make_shared(parameters)); + NonlinearOptimizer::auto_ptr optimizer(new GaussNewtonOptimizer(graph, values))); // We expect the initial to be zero because config is the ground truth - DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); + DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); // Iterate once, and the config should not have changed - Optimizer afterOneIteration = optimizer.iterate(); - DOUBLES_EQUAL(0.0, afterOneIteration.error(), 1e-9); + NonlinearOptimizer::auto_ptr afterOneIteration = optimizer->iterate(); + DOUBLES_EQUAL(0.0, afterOneIteration->error(), 1e-9); // Complete solution - Optimizer final = optimizer.gaussNewton(); + NonlinearOptimizer::auto_ptr final = optimizer->optimize(); - DOUBLES_EQUAL(0.0, final.error(), 1e-6); + DOUBLES_EQUAL(0.0, final->error(), 1e-6); } /* ************************************************************************* */ diff --git a/gtsam/slam/tests/testVSLAM.cpp b/gtsam/slam/tests/testVSLAM.cpp index 0188daa96..3a80752c2 100644 --- a/gtsam/slam/tests/testVSLAM.cpp +++ b/gtsam/slam/tests/testVSLAM.cpp @@ -23,7 +23,7 @@ using namespace boost; #include -#include +#include #include using namespace std; @@ -102,16 +102,16 @@ TEST( Graph, optimizeLM) // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - visualSLAM::Optimizer optimizer(graph, initialEstimate, ordering); - DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); + NonlinearOptimizer::auto_ptr optimizer(new GaussNewtonOptimizer(graph, initialEstimate, GaussNewtonParams(), ordering)); + DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); // Iterate once, and the config should not have changed because we started // with the ground truth - visualSLAM::Optimizer afterOneIteration = optimizer.iterate(); - DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); + NonlinearOptimizer::auto_ptr afterOneIteration = optimizer->iterate(); + DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); // check if correct - CHECK(assert_equal(*initialEstimate,*(afterOneIteration.values()))); + CHECK(assert_equal(*initialEstimate,*(afterOneIteration->values()))); } @@ -139,16 +139,16 @@ TEST( Graph, optimizeLM2) // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - visualSLAM::Optimizer optimizer(graph, initialEstimate, ordering); - DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); + NonlinearOptimizer::auto_ptr optimizer(new GaussNewtonOptimizer(graph, initialEstimate, GaussNewtonParams(), ordering)); + DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); // Iterate once, and the config should not have changed because we started // with the ground truth - visualSLAM::Optimizer afterOneIteration = optimizer.iterate(); - DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); + NonlinearOptimizer::auto_ptr afterOneIteration = optimizer->iterate(); + DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); // check if correct - CHECK(assert_equal(*initialEstimate,*(afterOneIteration.values()))); + CHECK(assert_equal(*initialEstimate,*(afterOneIteration->values()))); } @@ -170,20 +170,18 @@ TEST( Graph, CHECK_ORDERING) initialEstimate->insert(PointKey(3), landmark3); initialEstimate->insert(PointKey(4), landmark4); - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*initialEstimate); - // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - visualSLAM::Optimizer optimizer(graph, initialEstimate, ordering); - DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); + NonlinearOptimizer::auto_ptr optimizer(new GaussNewtonOptimizer(graph, initialEstimate)); + DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); // Iterate once, and the config should not have changed because we started // with the ground truth - visualSLAM::Optimizer afterOneIteration = optimizer.iterate(); - DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); + NonlinearOptimizer::auto_ptr afterOneIteration = optimizer->iterate(); + DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); // check if correct - CHECK(assert_equal(*initialEstimate,*(afterOneIteration.values()))); + CHECK(assert_equal(*initialEstimate,*(afterOneIteration->values()))); } /* ************************************************************************* */ diff --git a/gtsam/slam/visualSLAM.cpp b/gtsam/slam/visualSLAM.cpp index 67e77dc15..f6512e843 100644 --- a/gtsam/slam/visualSLAM.cpp +++ b/gtsam/slam/visualSLAM.cpp @@ -16,7 +16,6 @@ */ #include -#include #include namespace gtsam { diff --git a/gtsam/slam/visualSLAM.h b/gtsam/slam/visualSLAM.h index 5416e2142..d9be45eb3 100644 --- a/gtsam/slam/visualSLAM.h +++ b/gtsam/slam/visualSLAM.h @@ -145,7 +145,4 @@ namespace visualSLAM { }; // Graph - /// typedef for Optimizer. The current default will use the multi-frontal solver - typedef NonlinearOptimizer Optimizer; - } // namespaces diff --git a/tests/testBoundingConstraint.cpp b/tests/testBoundingConstraint.cpp index 09baec6e1..e553fb72b 100644 --- a/tests/testBoundingConstraint.cpp +++ b/tests/testBoundingConstraint.cpp @@ -31,11 +31,6 @@ SharedDiagonal soft_model2 = noiseModel::Unit::Create(2); SharedDiagonal soft_model2_alt = noiseModel::Isotropic::Sigma(2, 0.1); SharedDiagonal hard_model1 = noiseModel::Constrained::All(1); -typedef NonlinearFactorGraph Graph; -typedef boost::shared_ptr shared_graph; -typedef boost::shared_ptr shared_values; -typedef NonlinearOptimizer Optimizer; - // some simple inequality constraints Symbol key(simulated2D::PoseKey(1)); double mu = 10.0; @@ -150,19 +145,19 @@ TEST( testBoundingConstraint, unary_simple_optimization1) { Point2 goal_pt(1.0, 2.0); Point2 start_pt(0.0, 1.0); - shared_graph graph(new Graph()); + NonlinearFactorGraph graph; Symbol x1('x',1); - graph->add(iq2D::PoseXInequality(x1, 1.0, true)); - graph->add(iq2D::PoseYInequality(x1, 2.0, true)); - graph->add(simulated2D::Prior(start_pt, soft_model2, x1)); + graph.add(iq2D::PoseXInequality(x1, 1.0, true)); + graph.add(iq2D::PoseYInequality(x1, 2.0, true)); + graph.add(simulated2D::Prior(start_pt, soft_model2, x1)); - shared_values initValues(new Values()); - initValues->insert(x1, start_pt); + Values initValues; + initValues.insert(x1, start_pt); - Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initValues); + Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimize()->values(); Values expected; expected.insert(x1, goal_pt); - CHECK(assert_equal(expected, *actual, tol)); + CHECK(assert_equal(expected, actual, tol)); } /* ************************************************************************* */ @@ -172,19 +167,19 @@ TEST( testBoundingConstraint, unary_simple_optimization2) { Point2 goal_pt(1.0, 2.0); Point2 start_pt(2.0, 3.0); - shared_graph graph(new Graph()); + NonlinearFactorGraph graph; Symbol x1('x',1); - graph->add(iq2D::PoseXInequality(x1, 1.0, false)); - graph->add(iq2D::PoseYInequality(x1, 2.0, false)); - graph->add(simulated2D::Prior(start_pt, soft_model2, x1)); + graph.add(iq2D::PoseXInequality(x1, 1.0, false)); + graph.add(iq2D::PoseYInequality(x1, 2.0, false)); + graph.add(simulated2D::Prior(start_pt, soft_model2, x1)); - shared_values initValues(new Values()); - initValues->insert(x1, start_pt); + Values initValues; + initValues.insert(x1, start_pt); - Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initValues); + Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimize()->values(); Values expected; expected.insert(x1, goal_pt); - CHECK(assert_equal(expected, *actual, tol)); + CHECK(assert_equal(expected, actual, tol)); } /* ************************************************************************* */ From de2b9d6b31b1b75073ea3abb6b3e4c2db855b2bc Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Thu, 1 Mar 2012 16:07:16 +0000 Subject: [PATCH 06/57] Removed old NonlinearOptimizer-inl.h --- gtsam/nonlinear/NonlinearOptimizer-inl.h | 356 ----------------------- 1 file changed, 356 deletions(-) delete mode 100644 gtsam/nonlinear/NonlinearOptimizer-inl.h diff --git a/gtsam/nonlinear/NonlinearOptimizer-inl.h b/gtsam/nonlinear/NonlinearOptimizer-inl.h deleted file mode 100644 index f788eac2a..000000000 --- a/gtsam/nonlinear/NonlinearOptimizer-inl.h +++ /dev/null @@ -1,356 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file NonlinearOptimizer-inl.h - * This is a template definition file, include it where needed (only!) - * so that the appropriate code is generated and link errors avoided. - * @brief: Encapsulates nonlinear optimization state - * @author Frank Dellaert - * @date Sep 7, 2009 - */ - -#pragma once - -#include -#include -#include -#include - -using namespace std; - -namespace gtsam { - - /* ************************************************************************* */ - template - NonlinearOptimizer::NonlinearOptimizer(shared_graph graph, - shared_values values, shared_ordering ordering, shared_parameters parameters) : - graph_(graph), values_(values), error_(graph->error(*values)), ordering_(ordering), - parameters_(parameters), iterations_(0), - dimensions_(new vector(values->dims(*ordering))), - structure_(new VariableIndex(*graph->symbolic(*ordering))) { - if (!graph) throw std::invalid_argument( - "NonlinearOptimizer constructor: graph = NULL"); - if (!values) throw std::invalid_argument( - "NonlinearOptimizer constructor: values = NULL"); - if (!ordering) throw std::invalid_argument( - "NonlinearOptimizer constructor: ordering = NULL"); - } - - /* ************************************************************************* */ - // FIXME: remove this constructor - template - NonlinearOptimizer::NonlinearOptimizer(shared_graph graph, - shared_values values, shared_ordering ordering, - shared_solver spcg_solver, shared_parameters parameters) : - graph_(graph), values_(values), error_(graph->error(*values)), ordering_(ordering), - parameters_(parameters), iterations_(0), - dimensions_(new vector(values->dims(*ordering))), - spcg_solver_(spcg_solver) { - if (!graph) throw std::invalid_argument( - "NonlinearOptimizer constructor: graph = NULL"); - if (!values) throw std::invalid_argument( - "NonlinearOptimizer constructor: values = NULL"); - if (!spcg_solver) throw std::invalid_argument( - "NonlinearOptimizer constructor: spcg_solver = NULL"); - } - - /* ************************************************************************* */ - // One iteration of Gauss Newton - /* ************************************************************************* */ - template - NonlinearOptimizer NonlinearOptimizer::iterate() const { - - Parameters::verbosityLevel verbosity = parameters_->verbosity_ ; - - // FIXME: get rid of spcg solver - shared_solver solver; - if (spcg_solver_) { // special case for SPCG - spcg_solver_->replaceFactors(linearize()); - solver = spcg_solver_; - } else { // normal case - solver = createSolver(); - } - - VectorValues delta = *solver->optimize(); - - // maybe show output - if (verbosity >= Parameters::DELTA) delta.print("delta"); - - // take old values and update it - shared_values newValues(new Values(values_->retract(delta, *ordering_))); - - // maybe show output - if (verbosity >= Parameters::VALUES) newValues->print("newValues"); - - NonlinearOptimizer newOptimizer = newValues_(newValues); - ++ newOptimizer.iterations_; - - if (verbosity >= Parameters::ERROR) cout << "error: " << newOptimizer.error_ << endl; - return newOptimizer; - } - - /* ************************************************************************* */ - template - NonlinearOptimizer NonlinearOptimizer::gaussNewton() const { - static W writer(error_); - - if (error_ < parameters_->sumError_ ) { - if ( parameters_->verbosity_ >= Parameters::ERROR) - cout << "Exiting, as error = " << error_ - << " < sumError (" << parameters_->sumError_ << ")" << endl; - return *this; - } - - // linearize, solve, update - NonlinearOptimizer next = iterate(); - - writer.write(next.error_); - - // check convergence - bool converged = gtsam::check_convergence(*parameters_, error_, next.error_); - - // return converged state or iterate - if (converged) return next; - else return next.gaussNewton(); - } - - /* ************************************************************************* */ - // Iteratively try to do tempered Gauss-Newton steps until we succeed. - // Form damped system with given lambda, and return a new, more optimistic - // optimizer if error decreased or iterate with a larger lambda if not. - // TODO: in theory we can't infinitely recurse, but maybe we should put a max. - // Reminder: the parameters are Graph type $G$, Values class type $T$, - // linear system class $L$, the non linear solver type $S$, and the writer type $W$ - template - NonlinearOptimizer NonlinearOptimizer::try_lambda(const L& linearSystem) { - - const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ; - const Parameters::LambdaMode lambdaMode = parameters_->lambdaMode_ ; - const double factor = parameters_->lambdaFactor_ ; - double lambda = parameters_->lambda_ ; - - if( lambdaMode >= Parameters::CAUTIOUS) throw runtime_error("CAUTIOUS mode not working yet, please use BOUNDED."); - - double next_error = error_; - shared_values next_values = values_; - - // Keep increasing lambda until we make make progress - while(true) { - if (verbosity >= Parameters::TRYLAMBDA) cout << "trying lambda = " << lambda << endl; - - // add prior-factors - // TODO: replace this dampening with a backsubstitution approach - typename L::shared_ptr dampedSystem(new L(linearSystem)); - { - double sigma = 1.0 / sqrt(lambda); - dampedSystem->reserve(dampedSystem->size() + dimensions_->size()); - // for each of the variables, add a prior - for(Index j=0; jsize(); ++j) { - size_t dim = (*dimensions_)[j]; - Matrix A = eye(dim); - Vector b = zero(dim); - SharedDiagonal model = noiseModel::Isotropic::Sigma(dim,sigma); - typename L::sharedFactor prior(new JacobianFactor(j, A, b, model)); - dampedSystem->push_back(prior); - } - } - if (verbosity >= Parameters::DAMPED) dampedSystem->print("damped"); - - // Create a new solver using the damped linear system - // FIXME: remove spcg specific code - if (spcg_solver_) spcg_solver_->replaceFactors(dampedSystem); - shared_solver solver = (spcg_solver_) ? spcg_solver_ : shared_solver( - new S(dampedSystem, structure_, parameters_->useQR_)); - - // Try solving - try { - VectorValues delta = *solver->optimize(); - if (verbosity >= Parameters::TRYLAMBDA) cout << "linear delta norm = " << delta.vector().norm() << endl; - if (verbosity >= Parameters::TRYDELTA) delta.print("delta"); - - // update values - shared_values newValues(new Values(values_->retract(delta, *ordering_))); - - // create new optimization state with more adventurous lambda - double error = graph_->error(*newValues); - - if (verbosity >= Parameters::TRYLAMBDA) cout << "next error = " << error << endl; - - if( error <= error_ ) { - next_values = newValues; - next_error = error; - lambda /= factor; - break; - } - else { - // Either we're not cautious, or the same lambda was worse than the current error. - // The more adventurous lambda was worse too, so make lambda more conservative - // and keep the same values. - if(lambdaMode >= Parameters::BOUNDED && lambda >= 1.0e5) { - if(verbosity >= Parameters::ERROR) - cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; - break; - } else { - lambda *= factor; - } - } - } catch(const NegativeMatrixException& e) { - if(verbosity >= Parameters::LAMBDA) - cout << "Negative matrix, increasing lambda" << endl; - // Either we're not cautious, or the same lambda was worse than the current error. - // The more adventurous lambda was worse too, so make lambda more conservative - // and keep the same values. - if(lambdaMode >= Parameters::BOUNDED && lambda >= 1.0e5) { - if(verbosity >= Parameters::ERROR) - cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; - break; - } else { - lambda *= factor; - } - } catch(...) { - throw; - } - } // end while - - return newValuesErrorLambda_(next_values, next_error, lambda); - } - - /* ************************************************************************* */ - // One iteration of Levenberg Marquardt - // Reminder: the parameters are Graph type $G$, Values class type $T$, - // linear system class $L$, the non linear solver type $S$, and the writer type $W$ - template - NonlinearOptimizer NonlinearOptimizer::iterateLM() { - - const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ; - const double lambda = parameters_->lambda_ ; - - // show output - if (verbosity >= Parameters::VALUES) values_->print("values"); - if (verbosity >= Parameters::ERROR) cout << "error: " << error_ << endl; - if (verbosity >= Parameters::LAMBDA) cout << "lambda = " << lambda << endl; - - // linearize all factors once - boost::shared_ptr linear(new L(*graph_->linearize(*values_, *ordering_))); - - if (verbosity >= Parameters::LINEAR) linear->print("linear"); - - // try lambda steps with successively larger lambda until we achieve descent - if (verbosity >= Parameters::LAMBDA) cout << "Trying Lambda for the first time" << endl; - - return try_lambda(*linear); - } - - /* ************************************************************************* */ - // Reminder: the parameters are Graph type $G$, Values class type $T$, - // linear system class $L$, the non linear solver type $S$, and the writer type $W$ - template - NonlinearOptimizer NonlinearOptimizer::levenbergMarquardt() { - - // Initialize - bool converged = false; - const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ; - - // check if we're already close enough - if (error_ < parameters_->sumError_) { - if ( verbosity >= Parameters::ERROR ) - cout << "Exiting, as sumError = " << error_ << " < " << parameters_->sumError_ << endl; - return *this; - } - - // for the case that maxIterations_ = 0 - iterations_ = 1; - if (iterations_ >= parameters_->maxIterations_) - return *this; - - // Iterative loop that implements Levenberg-Marquardt - while (true) { - double previous_error = error_; - // do one iteration of LM - NonlinearOptimizer next = iterateLM(); - error_ = next.error_; - values_ = next.values_; - parameters_ = next.parameters_; - iterations_ = next.iterations_; - - // check convergence - // TODO: move convergence checks here and incorporate in verbosity levels - // TODO: build into iterations somehow as an instance variable - converged = gtsam::check_convergence(*parameters_, previous_error, error_); - - if(iterations_ >= parameters_->maxIterations_ || converged == true) { - if (verbosity >= Parameters::VALUES) values_->print("final values"); - if (verbosity >= Parameters::ERROR && iterations_ >= parameters_->maxIterations_) cout << "Terminating because reached maximum iterations" << endl; - if (verbosity >= Parameters::ERROR) cout << "final error: " << error_ << endl; - if (verbosity >= Parameters::LAMBDA) cout << "final lambda = " << lambda() << endl; - return *this; - } - iterations_++; - } - } - - /* ************************************************************************* */ - template - NonlinearOptimizer NonlinearOptimizer::iterateDogLeg() { - - S solver(*graph_->linearize(*values_, *ordering_), parameters_->useQR_); - DoglegOptimizerImpl::IterationResult result = DoglegOptimizerImpl::Iterate( - parameters_->lambda_, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *solver.eliminate(), - *graph_, *values_, *ordering_, error_, parameters_->verbosity_ > Parameters::ERROR); - shared_values newValues(new Values(values_->retract(result.dx_d, *ordering_))); - return newValuesErrorLambda_(newValues, result.f_error, result.Delta); - } - - /* ************************************************************************* */ - template - NonlinearOptimizer NonlinearOptimizer::dogLeg() { - static W writer(error_); - - // check if we're already close enough - if (error_ < parameters_->sumError_) { - if ( parameters_->verbosity_ >= Parameters::ERROR ) - cout << "Exiting, as sumError = " << error_ << " < " << parameters_->sumError_ << endl; - return *this; - } - - // for the case that maxIterations_ = 0 - iterations_ = 1; - if (iterations_ >= parameters_->maxIterations_) - return *this; - - // Iterative loop that runs Dog Leg - while (true) { - double previous_error = error_; - // do one iteration of LM - NonlinearOptimizer next = iterateDogLeg(); - writer.write(next.error_); - error_ = next.error_; - values_ = next.values_; - parameters_ = next.parameters_; - - // check convergence - // TODO: move convergence checks here and incorporate in verbosity levels - // TODO: build into iterations somehow as an instance variable - bool converged = gtsam::check_convergence(*parameters_, previous_error, error_); - - if(iterations_ >= parameters_->maxIterations_ || converged == true) { - if (parameters_->verbosity_ >= Parameters::VALUES) values_->print("final values"); - if (parameters_->verbosity_ >= Parameters::ERROR) cout << "final error: " << error_ << endl; - if (parameters_->verbosity_ >= Parameters::LAMBDA) cout << "final Delta (called lambda) = " << lambda() << endl; - return *this; - } - iterations_++; - } - - } - -} From ff7a78b8549d954b7f1a70fa567a56e226f61937 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Thu, 1 Mar 2012 16:07:19 +0000 Subject: [PATCH 07/57] Slightly more convenient NonlinearOptimizer constructors --- gtsam/nonlinear/GaussNewtonOptimizer.h | 21 ++++++++++++++++++- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 21 +++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.h b/gtsam/nonlinear/GaussNewtonOptimizer.h index 3b1e69481..e445a4e00 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.h +++ b/gtsam/nonlinear/GaussNewtonOptimizer.h @@ -90,7 +90,7 @@ public: * @param params The optimization parameters */ GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& values, - const GaussNewtonParams& params = GaussNewtonParams(), + const GaussNewtonParams& params, const Ordering& ordering = Ordering()) : NonlinearOptimizer( SharedGraph(new NonlinearFactorGraph(graph)), @@ -101,6 +101,25 @@ public: ordering_(colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : Ordering::shared_ptr(new Ordering(ordering))) {} + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. For convenience this + * version takes plain objects instead of shared pointers, but internally + * copies the objects. + * @param graph The nonlinear factor graph to optimize + * @param values The initial variable assignments + * @param params The optimization parameters + */ + GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& values, + const Ordering& ordering = Ordering()) : + NonlinearOptimizer( + SharedGraph(new NonlinearFactorGraph(graph)), + SharedValues(new Values(values)), + SharedGNParams(new GaussNewtonParams())), + gnParams_(boost::static_pointer_cast(params_)), + colamdOrdering_(ordering.size() == 0), + ordering_(colamdOrdering_ ? + graph_->orderingCOLAMD(*values_) : Ordering::shared_ptr(new Ordering(ordering))) {} + /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. * @param graph The nonlinear factor graph to optimize diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index b2605b968..48a7fd532 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -124,6 +124,27 @@ public: dimensions_(new vector(values_->dims(*ordering_))), lambda_(lmParams_->lambdaInitial) {} + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. For convenience this + * version takes plain objects instead of shared pointers, but internally + * copies the objects. + * @param graph The nonlinear factor graph to optimize + * @param values The initial variable assignments + * @param params The optimization parameters + */ + LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& values, + const Ordering& ordering) : + NonlinearOptimizer( + SharedGraph(new NonlinearFactorGraph(graph)), + SharedValues(new Values(values)), + SharedLMParams(new LevenbergMarquardtParams())), + lmParams_(boost::static_pointer_cast(params_)), + colamdOrdering_(ordering.size() == 0), + ordering_(colamdOrdering_ ? + graph_->orderingCOLAMD(*values_) : Ordering::shared_ptr(new Ordering(ordering))), + dimensions_(new vector(values_->dims(*ordering_))), + lambda_(lmParams_->lambdaInitial) {} + /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. * @param graph The nonlinear factor graph to optimize From 3a1175323c4c28a1e63c7fa61908887ef27cbafa Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Thu, 1 Mar 2012 16:07:23 +0000 Subject: [PATCH 08/57] Fixing compilation errors --- gtsam/slam/tests/testGeneralSFMFactor.cpp | 91 +++++----- .../testGeneralSFMFactor_Cal3Bundler.cpp | 91 +++++----- gtsam/slam/tests/testPose3SLAM.cpp | 37 ++-- gtsam/slam/tests/testStereoFactor.cpp | 4 +- gtsam/slam/tests/testVSLAM.cpp | 58 +++---- tests/testBoundingConstraint.cpp | 6 +- tests/testInference.cpp | 1 + tests/testNonlinearEquality.cpp | 160 ++++++++---------- tests/testNonlinearOptimizer.cpp | 83 +-------- 9 files changed, 219 insertions(+), 312 deletions(-) diff --git a/gtsam/slam/tests/testGeneralSFMFactor.cpp b/gtsam/slam/tests/testGeneralSFMFactor.cpp index 5915fd4ff..402b6e17e 100644 --- a/gtsam/slam/tests/testGeneralSFMFactor.cpp +++ b/gtsam/slam/tests/testGeneralSFMFactor.cpp @@ -146,11 +146,11 @@ TEST( GeneralSFMFactor, optimize_defaultK ) { vector X = genCameraDefaultCalibration(); // add measurement with noise - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } @@ -158,23 +158,22 @@ TEST( GeneralSFMFactor, optimize_defaultK ) { // add initial const double noise = baseline*0.1; - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; for ( size_t i = 0 ; i < L.size() ; ++i ) { Point3 pt(L[i].x()+noise*getGaussian(), L[i].y()+noise*getGaussian(), L[i].z()+noise*getGaussian()); - values->insert(Symbol('l',i), pt) ; + values.insert(Symbol('l',i), pt) ; } - graph->addCameraConstraint(0, X[0]); + graph.addCameraConstraint(0, X[0]); // Create an ordering of the variables - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = - LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + Ordering ordering = *getOrdering(X,L); + NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); EXPECT(optimizer->error() < 0.5 * 1e-5 * nMeasurements); } @@ -183,11 +182,11 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) { vector L = genPoint3(); vector X = genCameraVariableCalibration(); // add measurement with noise - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } @@ -195,9 +194,9 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) { // add initial const double noise = baseline*0.1; - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; // add noise only to the first landmark for ( size_t i = 0 ; i < L.size() ; ++i ) { @@ -205,19 +204,18 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) { Point3 pt(L[i].x()+noise*getGaussian(), L[i].y()+noise*getGaussian(), L[i].z()+noise*getGaussian()); - values->insert(Symbol('l',i), pt) ; + values.insert(Symbol('l',i), pt) ; } else { - values->insert(Symbol('l',i), L[i]) ; + values.insert(Symbol('l',i), L[i]) ; } } - graph->addCameraConstraint(0, X[0]); + graph.addCameraConstraint(0, X[0]); const double reproj_error = 1e-5; - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = - LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + Ordering ordering = *getOrdering(X,L); + NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); } @@ -229,36 +227,35 @@ TEST( GeneralSFMFactor, optimize_varK_FixCameras ) { // add measurement with noise const double noise = baseline*0.1; - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } const size_t nMeasurements = L.size()*X.size(); - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; for ( size_t i = 0 ; i < L.size() ; ++i ) { Point3 pt(L[i].x()+noise*getGaussian(), L[i].y()+noise*getGaussian(), L[i].z()+noise*getGaussian()); //Point3 pt(L[i].x(), L[i].y(), L[i].z()); - values->insert(Symbol('l',i), pt) ; + values.insert(Symbol('l',i), pt) ; } for ( size_t i = 0 ; i < X.size() ; ++i ) - graph->addCameraConstraint(i, X[i]); + graph.addCameraConstraint(i, X[i]); const double reproj_error = 1e-5 ; - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = - LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + Ordering ordering = *getOrdering(X,L); + NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); } @@ -269,17 +266,17 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) { vector X = genCameraVariableCalibration(); // add measurement with noise - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } const size_t nMeasurements = L.size()*X.size(); - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) { const double rot_noise = 1e-5, @@ -287,7 +284,7 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) { focal_noise = 1, skew_noise = 1e-5; if ( i == 0 ) { - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; } else { @@ -298,24 +295,23 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) { skew_noise, // s trans_noise, trans_noise // ux, uy ) ; - values->insert(Symbol('x',i), X[i].retract(delta)) ; + values.insert(Symbol('x',i), X[i].retract(delta)) ; } } for ( size_t i = 0 ; i < L.size() ; ++i ) { - values->insert(Symbol('l',i), L[i]) ; + values.insert(Symbol('l',i), L[i]) ; } // fix X0 and all landmarks, allow only the X[1] to move - graph->addCameraConstraint(0, X[0]); + graph.addCameraConstraint(0, X[0]); for ( size_t i = 0 ; i < L.size() ; ++i ) - graph->addPoint3Constraint(i, L[i]); + graph.addPoint3Constraint(i, L[i]); const double reproj_error = 1e-5 ; - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = - LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + Ordering ordering = *getOrdering(X,L); + NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); } @@ -325,11 +321,11 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) { vector X = genCameraVariableCalibration(); // add measurement with noise - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } @@ -337,24 +333,23 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) { // add initial const double noise = baseline*0.1; - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; // add noise only to the first landmark for ( size_t i = 0 ; i < L.size() ; ++i ) { Point3 pt(L[i].x()+noise*getGaussian(), L[i].y()+noise*getGaussian(), L[i].z()+noise*getGaussian()); - values->insert(Symbol('l',i), pt) ; + values.insert(Symbol('l',i), pt) ; } - graph->addCameraConstraint(0, X[0]); + graph.addCameraConstraint(0, X[0]); const double reproj_error = 1e-5 ; - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = - LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + Ordering ordering = *getOrdering(X,L); + NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); } diff --git a/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp b/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp index 7f327b2fd..ca698365a 100644 --- a/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp +++ b/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp @@ -148,11 +148,11 @@ TEST( GeneralSFMFactor, optimize_defaultK ) { vector X = genCameraDefaultCalibration(); // add measurement with noise - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } @@ -160,23 +160,22 @@ TEST( GeneralSFMFactor, optimize_defaultK ) { // add initial const double noise = baseline*0.1; - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; for ( size_t i = 0 ; i < L.size() ; ++i ) { Point3 pt(L[i].x()+noise*getGaussian(), L[i].y()+noise*getGaussian(), L[i].z()+noise*getGaussian()); - values->insert(Symbol('l',i), pt) ; + values.insert(Symbol('l',i), pt) ; } - graph->addCameraConstraint(0, X[0]); + graph.addCameraConstraint(0, X[0]); // Create an ordering of the variables - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = - LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + Ordering ordering = *getOrdering(X,L); + NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); EXPECT(optimizer->error() < 0.5 * 1e-5 * nMeasurements); } @@ -185,11 +184,11 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) { vector L = genPoint3(); vector X = genCameraVariableCalibration(); // add measurement with noise - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } @@ -197,9 +196,9 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) { // add initial const double noise = baseline*0.1; - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; // add noise only to the first landmark for ( size_t i = 0 ; i < L.size() ; ++i ) { @@ -207,19 +206,18 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) { Point3 pt(L[i].x()+noise*getGaussian(), L[i].y()+noise*getGaussian(), L[i].z()+noise*getGaussian()); - values->insert(Symbol('l',i), pt) ; + values.insert(Symbol('l',i), pt) ; } else { - values->insert(Symbol('l',i), L[i]) ; + values.insert(Symbol('l',i), L[i]) ; } } - graph->addCameraConstraint(0, X[0]); + graph.addCameraConstraint(0, X[0]); const double reproj_error = 1e-5; - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = - LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + Ordering ordering = *getOrdering(X,L); + NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); } @@ -231,36 +229,35 @@ TEST( GeneralSFMFactor, optimize_varK_FixCameras ) { // add measurement with noise const double noise = baseline*0.1; - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } const size_t nMeasurements = L.size()*X.size(); - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; for ( size_t i = 0 ; i < L.size() ; ++i ) { Point3 pt(L[i].x()+noise*getGaussian(), L[i].y()+noise*getGaussian(), L[i].z()+noise*getGaussian()); //Point3 pt(L[i].x(), L[i].y(), L[i].z()); - values->insert(Symbol('l',i), pt) ; + values.insert(Symbol('l',i), pt) ; } for ( size_t i = 0 ; i < X.size() ; ++i ) - graph->addCameraConstraint(i, X[i]); + graph.addCameraConstraint(i, X[i]); const double reproj_error = 1e-5 ; - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = - LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + Ordering ordering = *getOrdering(X,L); + NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); } @@ -271,23 +268,23 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) { vector X = genCameraVariableCalibration(); // add measurement with noise - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } const size_t nMeasurements = L.size()*X.size(); - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) { const double rot_noise = 1e-5, trans_noise = 1e-3, focal_noise = 1, distort_noise = 1e-3; if ( i == 0 ) { - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; } else { @@ -296,24 +293,23 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) { trans_noise, trans_noise, trans_noise, // translation focal_noise, distort_noise, distort_noise // f, k1, k2 ) ; - values->insert(Symbol('x',i), X[i].retract(delta)) ; + values.insert(Symbol('x',i), X[i].retract(delta)) ; } } for ( size_t i = 0 ; i < L.size() ; ++i ) { - values->insert(Symbol('l',i), L[i]) ; + values.insert(Symbol('l',i), L[i]) ; } // fix X0 and all landmarks, allow only the X[1] to move - graph->addCameraConstraint(0, X[0]); + graph.addCameraConstraint(0, X[0]); for ( size_t i = 0 ; i < L.size() ; ++i ) - graph->addPoint3Constraint(i, L[i]); + graph.addPoint3Constraint(i, L[i]); const double reproj_error = 1e-5 ; - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = - LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + Ordering ordering = *getOrdering(X,L); + NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); } @@ -323,11 +319,11 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) { vector X = genCameraVariableCalibration(); // add measurement with noise - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } @@ -335,24 +331,23 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) { // add initial const double noise = baseline*0.1; - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; // add noise only to the first landmark for ( size_t i = 0 ; i < L.size() ; ++i ) { Point3 pt(L[i].x()+noise*getGaussian(), L[i].y()+noise*getGaussian(), L[i].z()+noise*getGaussian()); - values->insert(Symbol('l',i), pt) ; + values.insert(Symbol('l',i), pt) ; } - graph->addCameraConstraint(0, X[0]); + graph.addCameraConstraint(0, X[0]); const double reproj_error = 1e-5 ; - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = - LevenbergMarquardtOptimizer(graph, values, LevenbergMarquardtParams(), ordering).optimize(); + Ordering ordering = *getOrdering(X,L); + NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); } diff --git a/gtsam/slam/tests/testPose3SLAM.cpp b/gtsam/slam/tests/testPose3SLAM.cpp index 5759b1562..059e5b762 100644 --- a/gtsam/slam/tests/testPose3SLAM.cpp +++ b/gtsam/slam/tests/testPose3SLAM.cpp @@ -53,33 +53,32 @@ TEST(Pose3Graph, optimizeCircle) { Pose3 gT0 = hexagon.at(PoseKey(0)), gT1 = hexagon.at(PoseKey(1)); // create a Pose graph with one equality constraint and one measurement - shared_ptr fg(new Pose3Graph); - fg->addHardConstraint(0, gT0); + Pose3Graph fg; + fg.addHardConstraint(0, gT0); Pose3 _0T1 = gT0.between(gT1); // inv(gT0)*gT1 double theta = M_PI/3.0; CHECK(assert_equal(Pose3(Rot3::yaw(-theta),Point3(radius*sin(theta),-radius*cos(theta),0)),_0T1)); - fg->addConstraint(0,1, _0T1, covariance); - fg->addConstraint(1,2, _0T1, covariance); - fg->addConstraint(2,3, _0T1, covariance); - fg->addConstraint(3,4, _0T1, covariance); - fg->addConstraint(4,5, _0T1, covariance); - fg->addConstraint(5,0, _0T1, covariance); + fg.addConstraint(0,1, _0T1, covariance); + fg.addConstraint(1,2, _0T1, covariance); + fg.addConstraint(2,3, _0T1, covariance); + fg.addConstraint(3,4, _0T1, covariance); + fg.addConstraint(4,5, _0T1, covariance); + fg.addConstraint(5,0, _0T1, covariance); // Create initial config - boost::shared_ptr initial(new Values()); - initial->insert(PoseKey(0), gT0); - initial->insert(PoseKey(1), hexagon.at(PoseKey(1)).retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1))); - initial->insert(PoseKey(2), hexagon.at(PoseKey(2)).retract(Vector_(6, 0.1,-0.1, 0.1, 0.1,-0.1, 0.1))); - initial->insert(PoseKey(3), hexagon.at(PoseKey(3)).retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1))); - initial->insert(PoseKey(4), hexagon.at(PoseKey(4)).retract(Vector_(6, 0.1,-0.1, 0.1, 0.1,-0.1, 0.1))); - initial->insert(PoseKey(5), hexagon.at(PoseKey(5)).retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1))); + Values initial; + initial.insert(PoseKey(0), gT0); + initial.insert(PoseKey(1), hexagon.at(PoseKey(1)).retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1))); + initial.insert(PoseKey(2), hexagon.at(PoseKey(2)).retract(Vector_(6, 0.1,-0.1, 0.1, 0.1,-0.1, 0.1))); + initial.insert(PoseKey(3), hexagon.at(PoseKey(3)).retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1))); + initial.insert(PoseKey(4), hexagon.at(PoseKey(4)).retract(Vector_(6, 0.1,-0.1, 0.1, 0.1,-0.1, 0.1))); + initial.insert(PoseKey(5), hexagon.at(PoseKey(5)).retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1))); // Choose an ordering and optimize - shared_ptr ordering(new Ordering); - *ordering += kx0,kx1,kx2,kx3,kx4,kx5; + Ordering ordering; + ordering += kx0,kx1,kx2,kx3,kx4,kx5; - Values actual = *LevenbergMarquardtOptimizer( - fg, initial, LevenbergMarquardtOptimizer::SharedLMParams(), ordering).optimize()->values(); + Values actual = *LevenbergMarquardtOptimizer(fg, initial, ordering).optimize()->values(); // Check with ground truth CHECK(assert_equal(hexagon, actual,1e-4)); diff --git a/gtsam/slam/tests/testStereoFactor.cpp b/gtsam/slam/tests/testStereoFactor.cpp index d101708b3..c1393450e 100644 --- a/gtsam/slam/tests/testStereoFactor.cpp +++ b/gtsam/slam/tests/testStereoFactor.cpp @@ -59,13 +59,13 @@ TEST( StereoFactor, singlePoint) graph.add(visualSLAM::StereoFactor(z14,sigma, PoseKey(1), PointKey(1), K)); // Create a configuration corresponding to the ground truth - Values values(new Values()); + Values values; values.insert(PoseKey(1), camera1); // add camera at z=6.25m looking towards origin Point3 l1(0, 0, 0); values.insert(PointKey(1), l1); // add point at origin; - NonlinearOptimizer::auto_ptr optimizer(new GaussNewtonOptimizer(graph, values))); + NonlinearOptimizer::auto_ptr optimizer(new GaussNewtonOptimizer(graph, values)); // We expect the initial to be zero because config is the ground truth DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); diff --git a/gtsam/slam/tests/testVSLAM.cpp b/gtsam/slam/tests/testVSLAM.cpp index 3a80752c2..a5eba9715 100644 --- a/gtsam/slam/tests/testVSLAM.cpp +++ b/gtsam/slam/tests/testVSLAM.cpp @@ -81,28 +81,28 @@ visualSLAM::Graph testGraph() { TEST( Graph, optimizeLM) { // build a graph - shared_ptr graph(new visualSLAM::Graph(testGraph())); + visualSLAM::Graph graph(testGraph()); // add 3 landmark constraints - graph->addPointConstraint(1, landmark1); - graph->addPointConstraint(2, landmark2); - graph->addPointConstraint(3, landmark3); + graph.addPointConstraint(1, landmark1); + graph.addPointConstraint(2, landmark2); + graph.addPointConstraint(3, landmark3); // Create an initial values structure corresponding to the ground truth - boost::shared_ptr initialEstimate(new Values); - initialEstimate->insert(PoseKey(1), camera1); - initialEstimate->insert(PoseKey(2), camera2); - initialEstimate->insert(PointKey(1), landmark1); - initialEstimate->insert(PointKey(2), landmark2); - initialEstimate->insert(PointKey(3), landmark3); - initialEstimate->insert(PointKey(4), landmark4); + Values initialEstimate; + initialEstimate.insert(PoseKey(1), camera1); + initialEstimate.insert(PoseKey(2), camera2); + initialEstimate.insert(PointKey(1), landmark1); + initialEstimate.insert(PointKey(2), landmark2); + initialEstimate.insert(PointKey(3), landmark3); + initialEstimate.insert(PointKey(4), landmark4); // Create an ordering of the variables - shared_ptr ordering(new Ordering); - *ordering += PointKey(1),PointKey(2),PointKey(3),PointKey(4),PoseKey(1),PoseKey(2); + Ordering ordering; + ordering += PointKey(1),PointKey(2),PointKey(3),PointKey(4),PoseKey(1),PoseKey(2); // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - NonlinearOptimizer::auto_ptr optimizer(new GaussNewtonOptimizer(graph, initialEstimate, GaussNewtonParams(), ordering)); + NonlinearOptimizer::auto_ptr optimizer(new GaussNewtonOptimizer(graph, initialEstimate, ordering)); DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); // Iterate once, and the config should not have changed because we started @@ -111,7 +111,7 @@ TEST( Graph, optimizeLM) DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); // check if correct - CHECK(assert_equal(*initialEstimate,*(afterOneIteration->values()))); + CHECK(assert_equal(initialEstimate,*(afterOneIteration->values()))); } @@ -119,27 +119,27 @@ TEST( Graph, optimizeLM) TEST( Graph, optimizeLM2) { // build a graph - shared_ptr graph(new visualSLAM::Graph(testGraph())); + visualSLAM::Graph graph(testGraph()); // add 2 camera constraints - graph->addPoseConstraint(1, camera1); - graph->addPoseConstraint(2, camera2); + graph.addPoseConstraint(1, camera1); + graph.addPoseConstraint(2, camera2); // Create an initial values structure corresponding to the ground truth - boost::shared_ptr initialEstimate(new Values); - initialEstimate->insert(PoseKey(1), camera1); - initialEstimate->insert(PoseKey(2), camera2); - initialEstimate->insert(PointKey(1), landmark1); - initialEstimate->insert(PointKey(2), landmark2); - initialEstimate->insert(PointKey(3), landmark3); - initialEstimate->insert(PointKey(4), landmark4); + Values initialEstimate; + initialEstimate.insert(PoseKey(1), camera1); + initialEstimate.insert(PoseKey(2), camera2); + initialEstimate.insert(PointKey(1), landmark1); + initialEstimate.insert(PointKey(2), landmark2); + initialEstimate.insert(PointKey(3), landmark3); + initialEstimate.insert(PointKey(4), landmark4); // Create an ordering of the variables - shared_ptr ordering(new Ordering); - *ordering += PointKey(1),PointKey(2),PointKey(3),PointKey(4),PoseKey(1),PoseKey(2); + Ordering ordering; + ordering += PointKey(1),PointKey(2),PointKey(3),PointKey(4),PoseKey(1),PoseKey(2); // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - NonlinearOptimizer::auto_ptr optimizer(new GaussNewtonOptimizer(graph, initialEstimate, GaussNewtonParams(), ordering)); + NonlinearOptimizer::auto_ptr optimizer(new GaussNewtonOptimizer(graph, initialEstimate, ordering)); DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); // Iterate once, and the config should not have changed because we started @@ -148,7 +148,7 @@ TEST( Graph, optimizeLM2) DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); // check if correct - CHECK(assert_equal(*initialEstimate,*(afterOneIteration->values()))); + CHECK(assert_equal(initialEstimate,*(afterOneIteration->values()))); } diff --git a/tests/testBoundingConstraint.cpp b/tests/testBoundingConstraint.cpp index e553fb72b..32495f4e7 100644 --- a/tests/testBoundingConstraint.cpp +++ b/tests/testBoundingConstraint.cpp @@ -19,7 +19,7 @@ #include #include -#include +#include namespace iq2D = simulated2D::inequality_constraints; using namespace std; @@ -228,7 +228,7 @@ TEST( testBoundingConstraint, MaxDistance_simple_optimization) { Point2 pt1, pt2_init(5.0, 0.0), pt2_goal(2.0, 0.0); Symbol x1('x',1), x2('x',2); - Graph graph; + NonlinearFactorGraph graph; graph.add(simulated2D::equality_constraints::UnaryEqualityConstraint(pt1, x1)); graph.add(simulated2D::Prior(pt2_init, soft_model2_alt, x2)); graph.add(iq2D::PoseMaxDistConstraint(x1, x2, 2.0)); @@ -254,7 +254,7 @@ TEST( testBoundingConstraint, avoid_demo) { Point2 x1_pt, x2_init(2.0, 0.5), x2_goal(2.0, 1.0), x3_pt(4.0, 0.0), l1_pt(2.0, 0.0); Point2 odo(2.0, 0.0); - Graph graph; + NonlinearFactorGraph graph; graph.add(simulated2D::equality_constraints::UnaryEqualityConstraint(x1_pt, x1)); graph.add(simulated2D::Odometry(odo, soft_model2_alt, x1, x2)); graph.add(iq2D::LandmarkAvoid(x2, l1, radius)); diff --git a/tests/testInference.cpp b/tests/testInference.cpp index df8843415..0bc6c3782 100644 --- a/tests/testInference.cpp +++ b/tests/testInference.cpp @@ -18,6 +18,7 @@ #include #include +#include #include #include diff --git a/tests/testNonlinearEquality.cpp b/tests/testNonlinearEquality.cpp index 2ceb066e2..8652a3927 100644 --- a/tests/testNonlinearEquality.cpp +++ b/tests/testNonlinearEquality.cpp @@ -22,7 +22,7 @@ #include #include #include -#include +#include using namespace std; using namespace gtsam; @@ -35,9 +35,6 @@ typedef PriorFactor PosePrior; typedef NonlinearEquality PoseNLE; typedef boost::shared_ptr shared_poseNLE; -typedef NonlinearFactorGraph PoseGraph; -typedef NonlinearOptimizer PoseOptimizer; - Symbol key('x',1); /* ************************************************************************* */ @@ -188,25 +185,23 @@ TEST ( NonlinearEquality, allow_error_optimize ) { PoseNLE nle(key1, feasible1, error_gain); // add to a graph - boost::shared_ptr graph(new PoseGraph()); - graph->add(nle); + NonlinearFactorGraph graph; + graph.add(nle); // initialize away from the ideal Pose2 initPose(0.0, 2.0, 3.0); - boost::shared_ptr init(new Values()); - init->insert(key1, initPose); + Values init; + init.insert(key1, initPose); // optimize - boost::shared_ptr ord(new Ordering()); - ord->push_back(key1); - NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-5, 1e-5); - PoseOptimizer optimizer(graph, init, ord, params); - PoseOptimizer result = optimizer.levenbergMarquardt(); + Ordering ordering; + ord.push_back(key1); + NonlinearOptimizer::auto_ptr result = LevenbergMarquardtOptimizer(graph, init, ordering).optimize(); // verify Values expected; expected.insert(key1, feasible1); - EXPECT(assert_equal(expected, *result.values())); + EXPECT(assert_equal(expected, *result->values())); } /* ************************************************************************* */ @@ -217,9 +212,9 @@ TEST ( NonlinearEquality, allow_error_optimize_with_factors ) { Pose2 feasible1(1.0, 2.0, 3.0); // initialize away from the ideal - boost::shared_ptr init(new Values()); + Values init; Pose2 initPose(0.0, 2.0, 3.0); - init->insert(key1, initPose); + init.insert(key1, initPose); double error_gain = 500.0; PoseNLE nle(key1, feasible1, error_gain); @@ -228,32 +223,25 @@ TEST ( NonlinearEquality, allow_error_optimize_with_factors ) { PosePrior prior(key1, initPose, noiseModel::Isotropic::Sigma(3, 0.1)); // add to a graph - boost::shared_ptr graph(new PoseGraph()); - graph->add(nle); - graph->add(prior); + NonlinearFactorGraph graph; + graph.add(nle); + graph.add(prior); // optimize - boost::shared_ptr ord(new Ordering()); - ord->push_back(key1); - NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-5, 1e-5); - PoseOptimizer optimizer(graph, init, ord, params); - PoseOptimizer result = optimizer.levenbergMarquardt(); + Ordering ordering; + ord.push_back(key1); + Values actual = *LevenbergMarquardtOptimizer(graph, values, ordering).optimize()->values(); // verify Values expected; expected.insert(key1, feasible1); - EXPECT(assert_equal(expected, *result.values())); + EXPECT(assert_equal(expected, actual)); } /* ************************************************************************* */ SharedDiagonal hard_model = noiseModel::Constrained::All(2); SharedDiagonal soft_model = noiseModel::Isotropic::Sigma(2, 1.0); -typedef NonlinearFactorGraph Graph; -typedef boost::shared_ptr shared_graph; -typedef boost::shared_ptr shared_values; -typedef NonlinearOptimizer Optimizer; - /* ************************************************************************* */ TEST( testNonlinearEqualityConstraint, unary_basics ) { Point2 pt(1.0, 2.0); @@ -314,23 +302,23 @@ TEST( testNonlinearEqualityConstraint, unary_simple_optimization ) { simulated2D::Prior::shared_ptr factor( new simulated2D::Prior(badPt, soft_model, key)); - shared_graph graph(new Graph()); - graph->push_back(constraint); - graph->push_back(factor); + NonlinearFactorGraph graph; + graph.push_back(constraint); + graph.push_back(factor); - shared_values initValues(new Values()); - initValues->insert(key, badPt); + Values initValues; + initValues.insert(key, badPt); // verify error values - EXPECT(constraint->active(*initValues)); + EXPECT(constraint->active(initValues)); Values expected; expected.insert(key, truth_pt); EXPECT(constraint->active(expected)); EXPECT_DOUBLES_EQUAL(0.0, constraint->error(expected), tol); - Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initValues); - EXPECT(assert_equal(expected, *actual, tol)); + Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimize()->values(); + EXPECT(assert_equal(expected, actual, tol)); } /* ************************************************************************* */ @@ -411,20 +399,20 @@ TEST( testNonlinearEqualityConstraint, odo_simple_optimize ) { new eq2D::OdoEqualityConstraint( truth_pt1.between(truth_pt2), key1, key2)); - shared_graph graph(new Graph()); - graph->push_back(constraint1); - graph->push_back(constraint2); - graph->push_back(factor); + Graph graph; + graph.push_back(constraint1); + graph.push_back(constraint2); + graph.push_back(factor); - shared_values initValues(new Values()); - initValues->insert(key1, Point2()); - initValues->insert(key2, badPt); + Values initValues; + initValues.insert(key1, Point2()); + initValues.insert(key2, badPt); - Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initValues); + Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimize()->values(); Values expected; expected.insert(key1, truth_pt1); expected.insert(key2, truth_pt2); - CHECK(assert_equal(expected, *actual, tol)); + CHECK(assert_equal(expected, actual, tol)); } /* ********************************************************************* */ @@ -435,44 +423,44 @@ TEST (testNonlinearEqualityConstraint, two_pose ) { * constrained to a particular value */ - shared_graph graph(new Graph()); + Graph graph; Symbol x1('x',1), x2('x',2); Symbol l1('l',1), l2('l',2); Point2 pt_x1(1.0, 1.0), pt_x2(5.0, 6.0); - graph->add(eq2D::UnaryEqualityConstraint(pt_x1, x1)); - graph->add(eq2D::UnaryEqualityConstraint(pt_x2, x2)); + graph.add(eq2D::UnaryEqualityConstraint(pt_x1, x1)); + graph.add(eq2D::UnaryEqualityConstraint(pt_x2, x2)); Point2 z1(0.0, 5.0); SharedNoiseModel sigma(noiseModel::Isotropic::Sigma(2, 0.1)); - graph->add(simulated2D::Measurement(z1, sigma, x1,l1)); + graph.add(simulated2D::Measurement(z1, sigma, x1,l1)); Point2 z2(-4.0, 0.0); - graph->add(simulated2D::Measurement(z2, sigma, x2,l2)); + graph.add(simulated2D::Measurement(z2, sigma, x2,l2)); - graph->add(eq2D::PointEqualityConstraint(l1, l2)); + graph.add(eq2D::PointEqualityConstraint(l1, l2)); - shared_values initialEstimate(new Values()); - initialEstimate->insert(x1, pt_x1); - initialEstimate->insert(x2, Point2()); - initialEstimate->insert(l1, Point2(1.0, 6.0)); // ground truth - initialEstimate->insert(l2, Point2(-4.0, 0.0)); // starting with a separate reference frame + Values initialEstimate; + initialEstimate.insert(x1, pt_x1); + initialEstimate.insert(x2, Point2()); + initialEstimate.insert(l1, Point2(1.0, 6.0)); // ground truth + initialEstimate.insert(l2, Point2(-4.0, 0.0)); // starting with a separate reference frame - Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initialEstimate); + Values actual = *LevenbergMarquardtOptimizer(graph, initialEstimate).optimize()->values(); Values expected; expected.insert(x1, pt_x1); expected.insert(l1, Point2(1.0, 6.0)); expected.insert(l2, Point2(1.0, 6.0)); expected.insert(x2, Point2(5.0, 6.0)); - CHECK(assert_equal(expected, *actual, 1e-5)); + CHECK(assert_equal(expected, actual, 1e-5)); } /* ********************************************************************* */ TEST (testNonlinearEqualityConstraint, map_warp ) { // get a graph - shared_graph graph(new Graph()); + Graph graph; // keys Symbol x1('x',1), x2('x',2); @@ -480,37 +468,37 @@ TEST (testNonlinearEqualityConstraint, map_warp ) { // constant constraint on x1 Point2 pose1(1.0, 1.0); - graph->add(eq2D::UnaryEqualityConstraint(pose1, x1)); + graph.add(eq2D::UnaryEqualityConstraint(pose1, x1)); SharedDiagonal sigma = noiseModel::Isotropic::Sigma(1,0.1); // measurement from x1 to l1 Point2 z1(0.0, 5.0); - graph->add(simulated2D::Measurement(z1, sigma, x1, l1)); + graph.add(simulated2D::Measurement(z1, sigma, x1, l1)); // measurement from x2 to l2 Point2 z2(-4.0, 0.0); - graph->add(simulated2D::Measurement(z2, sigma, x2, l2)); + graph.add(simulated2D::Measurement(z2, sigma, x2, l2)); // equality constraint between l1 and l2 - graph->add(eq2D::PointEqualityConstraint(l1, l2)); + graph.add(eq2D::PointEqualityConstraint(l1, l2)); // create an initial estimate - shared_values initialEstimate(new Values()); - initialEstimate->insert(x1, Point2( 1.0, 1.0)); - initialEstimate->insert(l1, Point2( 1.0, 6.0)); - initialEstimate->insert(l2, Point2(-4.0, 0.0)); // starting with a separate reference frame - initialEstimate->insert(x2, Point2( 0.0, 0.0)); // other pose starts at origin + Values initialEstimate; + initialEstimate.insert(x1, Point2( 1.0, 1.0)); + initialEstimate.insert(l1, Point2( 1.0, 6.0)); + initialEstimate.insert(l2, Point2(-4.0, 0.0)); // starting with a separate reference frame + initialEstimate.insert(x2, Point2( 0.0, 0.0)); // other pose starts at origin // optimize - Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initialEstimate); + Values actual = *LevenbergMarquardtOptimizer(graph, initialEstimate).optimize()->values(); Values expected; expected.insert(x1, Point2(1.0, 1.0)); expected.insert(l1, Point2(1.0, 6.0)); expected.insert(l2, Point2(1.0, 6.0)); expected.insert(x2, Point2(5.0, 6.0)); - CHECK(assert_equal(expected, *actual, tol)); + CHECK(assert_equal(expected, actual, tol)); } // make a realistic calibration matrix @@ -520,9 +508,7 @@ Cal3_S2 K(fov,w,h); boost::shared_ptr shK(new Cal3_S2(K)); // typedefs for visual SLAM example -typedef boost::shared_ptr shared_vconfig; typedef visualSLAM::Graph VGraph; -typedef NonlinearOptimizer VOptimizer; // factors for visual slam typedef NonlinearEquality2 Point3Equality; @@ -546,32 +532,32 @@ TEST (testNonlinearEqualityConstraint, stereo_constrained ) { Symbol l1('l',1), l2('l',2); // create graph - VGraph::shared_graph graph(new VGraph()); + VGraph graph; // create equality constraints for poses - graph->addPoseConstraint(1, camera1.pose()); - graph->addPoseConstraint(2, camera2.pose()); + graph.addPoseConstraint(1, camera1.pose()); + graph.addPoseConstraint(2, camera2.pose()); // create factors SharedDiagonal vmodel = noiseModel::Unit::Create(3); - graph->addMeasurement(camera1.project(landmark), vmodel, 1, 1, shK); - graph->addMeasurement(camera2.project(landmark), vmodel, 2, 2, shK); + graph.addMeasurement(camera1.project(landmark), vmodel, 1, 1, shK); + graph.addMeasurement(camera2.project(landmark), vmodel, 2, 2, shK); // add equality constraint - graph->add(Point3Equality(l1, l2)); + graph.add(Point3Equality(l1, l2)); // create initial data Point3 landmark1(0.5, 5.0, 0.0); Point3 landmark2(1.5, 5.0, 0.0); - shared_vconfig initValues(new Values()); - initValues->insert(x1, pose1); - initValues->insert(x2, pose2); - initValues->insert(l1, landmark1); - initValues->insert(l2, landmark2); + Values initValues; + initValues.insert(x1, pose1); + initValues.insert(x2, pose2); + initValues.insert(l1, landmark1); + initValues.insert(l2, landmark2); // optimize - VOptimizer::shared_values actual = VOptimizer::optimizeLM(graph, initValues); + Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimize()->values(); // create config Values truthValues; @@ -581,7 +567,7 @@ TEST (testNonlinearEqualityConstraint, stereo_constrained ) { truthValues.insert(l2, landmark); // check if correct - CHECK(assert_equal(truthValues, *actual, 1e-5)); + CHECK(assert_equal(truthValues, actual, 1e-5)); } /* ************************************************************************* */ diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index 91d92b530..f2ccac366 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -43,66 +43,6 @@ const double tol = 1e-5; Key kx(size_t i) { return Symbol('x',i); } Key kl(size_t i) { return Symbol('l',i); } -typedef NonlinearOptimizer Optimizer; - -/* ************************************************************************* */ -TEST( NonlinearOptimizer, linearizeAndOptimizeForDelta ) -{ - shared_ptr fg(new example::Graph( - example::createNonlinearFactorGraph())); - Optimizer::shared_values initial = example::sharedNoisyValues(); - - // Expected values structure is the difference between the noisy config - // and the ground-truth config. One step only because it's linear ! - Ordering ord1; ord1 += kx(2),kl(1),kx(1); - VectorValues expected(initial->dims(ord1)); - Vector dl1(2); - dl1(0) = -0.1; - dl1(1) = 0.1; - expected[ord1[kl(1)]] = dl1; - Vector dx1(2); - dx1(0) = -0.1; - dx1(1) = -0.1; - expected[ord1[kx(1)]] = dx1; - Vector dx2(2); - dx2(0) = 0.1; - dx2(1) = -0.2; - expected[ord1[kx(2)]] = dx2; - - // Check one ordering - Optimizer optimizer1(fg, initial, Optimizer::shared_ordering(new Ordering(ord1))); - - VectorValues actual1 = optimizer1.linearizeAndOptimizeForDelta(); - CHECK(assert_equal(actual1,expected)); - -// SL-FIX // Check another -// shared_ptr ord2(new Ordering()); -// *ord2 += kx(1),kx(2),kl(1); -// solver = Optimizer::shared_solver(new Optimizer::solver(ord2)); -// Optimizer optimizer2(fg, initial, solver); -// -// VectorValues actual2 = optimizer2.linearizeAndOptimizeForDelta(); -// CHECK(assert_equal(actual2,expected)); -// -// // And yet another... -// shared_ptr ord3(new Ordering()); -// *ord3 += kl(1),kx(1),kx(2); -// solver = Optimizer::shared_solver(new Optimizer::solver(ord3)); -// Optimizer optimizer3(fg, initial, solver); -// -// VectorValues actual3 = optimizer3.linearizeAndOptimizeForDelta(); -// CHECK(assert_equal(actual3,expected)); -// -// // More... -// shared_ptr ord4(new Ordering()); -// *ord4 += kx(1),kx(2), kl(1); -// solver = Optimizer::shared_solver(new Optimizer::solver(ord4)); -// Optimizer optimizer4(fg, initial, solver); -// -// VectorValues actual4 = optimizer4.linearizeAndOptimizeForDelta(); -// CHECK(assert_equal(actual4,expected)); -} - /* ************************************************************************* */ TEST( NonlinearOptimizer, iterateLM ) { @@ -120,22 +60,13 @@ TEST( NonlinearOptimizer, iterateLM ) ord->push_back(kx(1)); // create initial optimization state, with lambda=0 - Optimizer optimizer(fg, config, ord, NonlinearOptimizationParameters::newLambda(0.)); + NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(fg, config, LevenbergMarquardtParams(), ord).update(0.0); // normal iterate - Optimizer iterated1 = optimizer.iterate(); + NonlinearOptimizer::auto_ptr iterated1 = GaussNewtonOptimizer(fg, config, GaussNewtonParams(), ord).iterate(); // LM iterate with lambda 0 should be the same - Optimizer iterated2 = optimizer.iterateLM(); - - // Try successive iterates. TODO: ugly pointers, better way ? - Optimizer *pointer = new Optimizer(iterated2); - for (int i=0;i<10;i++) { - Optimizer* newOptimizer = new Optimizer(pointer->iterateLM()); - delete pointer; - pointer = newOptimizer; - } - delete(pointer); + NonlinearOptimizer::auto_ptr iterated2 = LevenbergMarquardtOptimizer(fg, config, LevenbergMarquardtParams(), ord).update(0.0)->iterate(); CHECK(assert_equal(*iterated1.values(), *iterated2.values(), 1e-9)); } @@ -169,12 +100,12 @@ TEST( NonlinearOptimizer, optimize ) Optimizer optimizer(fg, c0, ord, params); // Gauss-Newton - Optimizer actual1 = optimizer.gaussNewton(); - DOUBLES_EQUAL(0,fg->error(*(actual1.values())),tol); + NonlinearOptimizer::auto_ptr actual1 = *GaussNewtonOptimizer(fg, c0, GaussNewtonParams(), ord).optimize(); + DOUBLES_EQUAL(0,fg->error(*(actual1->values())),tol); // Levenberg-Marquardt - Optimizer actual2 = optimizer.levenbergMarquardt(); - DOUBLES_EQUAL(0,fg->error(*(actual2.values())),tol); + Optimizer actual2 = *LevenbergMarquardtOptimizer(fg, c0, LevenbergMarquardtParams(), ord).optimizer(); + DOUBLES_EQUAL(0,fg->error(*(actual2->values())),tol); } /* ************************************************************************* */ From 13ce0e59f0e2fbbd7fb244d192f19a0799a0b101 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Thu, 22 Mar 2012 17:46:39 +0000 Subject: [PATCH 09/57] Added typedef for const shared_ptr --- gtsam/nonlinear/Values.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/gtsam/nonlinear/Values.h b/gtsam/nonlinear/Values.h index bdd1739da..76bcfdfb0 100644 --- a/gtsam/nonlinear/Values.h +++ b/gtsam/nonlinear/Values.h @@ -84,6 +84,9 @@ namespace gtsam { /// A shared_ptr to this class typedef boost::shared_ptr shared_ptr; + /// A const shared_ptr to this class + typedef boost::shared_ptr const_shared_ptr; + /// A key-value pair, which you get by dereferencing iterators struct KeyValuePair { const Key key; ///< The key From be386ed6bd93089d004e8bfbfa0125e796d798a8 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Thu, 22 Mar 2012 17:46:43 +0000 Subject: [PATCH 10/57] All unit tests compile --- .cproject | 12 ++++- .project | 2 +- tests/testNonlinearEquality.cpp | 12 ++--- tests/testNonlinearOptimizer.cpp | 90 +++++++++++++------------------- tests/testRot3Optimization.cpp | 5 +- 5 files changed, 56 insertions(+), 65 deletions(-) diff --git a/.cproject b/.cproject index cfdc18ef9..de3ea1faf 100644 --- a/.cproject +++ b/.cproject @@ -21,7 +21,7 @@ - + @@ -285,6 +285,7 @@ + @@ -2044,6 +2045,14 @@ true true + + make + -j7 + testGeneralSFMFactor.run + true + true + true + make -j2 @@ -2221,5 +2230,4 @@ - diff --git a/.project b/.project index 9856df2ea..f59d79cb7 100644 --- a/.project +++ b/.project @@ -23,7 +23,7 @@ org.eclipse.cdt.make.core.buildArguments - -j5 + -j7 org.eclipse.cdt.make.core.buildCommand diff --git a/tests/testNonlinearEquality.cpp b/tests/testNonlinearEquality.cpp index 8652a3927..39051b8c4 100644 --- a/tests/testNonlinearEquality.cpp +++ b/tests/testNonlinearEquality.cpp @@ -195,7 +195,7 @@ TEST ( NonlinearEquality, allow_error_optimize ) { // optimize Ordering ordering; - ord.push_back(key1); + ordering.push_back(key1); NonlinearOptimizer::auto_ptr result = LevenbergMarquardtOptimizer(graph, init, ordering).optimize(); // verify @@ -229,8 +229,8 @@ TEST ( NonlinearEquality, allow_error_optimize_with_factors ) { // optimize Ordering ordering; - ord.push_back(key1); - Values actual = *LevenbergMarquardtOptimizer(graph, values, ordering).optimize()->values(); + ordering.push_back(key1); + Values actual = *LevenbergMarquardtOptimizer(graph, init, ordering).optimize()->values(); // verify Values expected; @@ -399,7 +399,7 @@ TEST( testNonlinearEqualityConstraint, odo_simple_optimize ) { new eq2D::OdoEqualityConstraint( truth_pt1.between(truth_pt2), key1, key2)); - Graph graph; + NonlinearFactorGraph graph; graph.push_back(constraint1); graph.push_back(constraint2); graph.push_back(factor); @@ -423,7 +423,7 @@ TEST (testNonlinearEqualityConstraint, two_pose ) { * constrained to a particular value */ - Graph graph; + NonlinearFactorGraph graph; Symbol x1('x',1), x2('x',2); Symbol l1('l',1), l2('l',2); @@ -460,7 +460,7 @@ TEST (testNonlinearEqualityConstraint, two_pose ) { /* ********************************************************************* */ TEST (testNonlinearEqualityConstraint, map_warp ) { // get a graph - Graph graph; + NonlinearFactorGraph graph; // keys Symbol x1('x',1), x2('x',2); diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index f2ccac366..ddf0c4ab5 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -34,7 +34,7 @@ using namespace boost; // template definitions #include -#include +#include using namespace gtsam; @@ -68,7 +68,7 @@ TEST( NonlinearOptimizer, iterateLM ) // LM iterate with lambda 0 should be the same NonlinearOptimizer::auto_ptr iterated2 = LevenbergMarquardtOptimizer(fg, config, LevenbergMarquardtParams(), ord).update(0.0)->iterate(); - CHECK(assert_equal(*iterated1.values(), *iterated2.values(), 1e-9)); + CHECK(assert_equal(*iterated1->values(), *iterated2->values(), 1e-9)); } /* ************************************************************************* */ @@ -93,18 +93,12 @@ TEST( NonlinearOptimizer, optimize ) shared_ptr ord(new Ordering()); ord->push_back(kx(1)); - // initial optimization state is the same in both cases tested - boost::shared_ptr params = boost::make_shared(); - params->relDecrease_ = 1e-5; - params->absDecrease_ = 1e-5; - Optimizer optimizer(fg, c0, ord, params); - // Gauss-Newton - NonlinearOptimizer::auto_ptr actual1 = *GaussNewtonOptimizer(fg, c0, GaussNewtonParams(), ord).optimize(); + NonlinearOptimizer::auto_ptr actual1 = GaussNewtonOptimizer(fg, c0, GaussNewtonParams(), ord).optimize(); DOUBLES_EQUAL(0,fg->error(*(actual1->values())),tol); // Levenberg-Marquardt - Optimizer actual2 = *LevenbergMarquardtOptimizer(fg, c0, LevenbergMarquardtParams(), ord).optimizer(); + NonlinearOptimizer::auto_ptr actual2 = LevenbergMarquardtOptimizer(fg, c0, LevenbergMarquardtParams(), ord).optimize(); DOUBLES_EQUAL(0,fg->error(*(actual2->values())),tol); } @@ -118,7 +112,7 @@ TEST( NonlinearOptimizer, SimpleLMOptimizer ) boost::shared_ptr c0(new Values); c0->insert(simulated2D::PoseKey(1), x0); - Optimizer::shared_values actual = Optimizer::optimizeLM(fg, c0); + Values::const_shared_ptr actual = LevenbergMarquardtOptimizer(fg, c0).optimize()->values(); DOUBLES_EQUAL(0,fg->error(*actual),tol); } @@ -131,7 +125,7 @@ TEST( NonlinearOptimizer, SimpleLMOptimizer_noshared ) Values c0; c0.insert(simulated2D::PoseKey(1), x0); - Optimizer::shared_values actual = Optimizer::optimizeLM(fg, c0); + Values::const_shared_ptr actual = LevenbergMarquardtOptimizer(fg, c0).optimize()->values(); DOUBLES_EQUAL(0,fg.error(*actual),tol); } @@ -145,7 +139,7 @@ TEST( NonlinearOptimizer, SimpleGNOptimizer ) boost::shared_ptr c0(new Values); c0->insert(simulated2D::PoseKey(1), x0); - Optimizer::shared_values actual = Optimizer::optimizeGN(fg, c0); + Values::const_shared_ptr actual = GaussNewtonOptimizer(fg, c0).optimize()->values(); DOUBLES_EQUAL(0,fg->error(*actual),tol); } @@ -158,15 +152,17 @@ TEST( NonlinearOptimizer, SimpleGNOptimizer_noshared ) Values c0; c0.insert(simulated2D::PoseKey(1), x0); - Optimizer::shared_values actual = Optimizer::optimizeGN(fg, c0); + Values::const_shared_ptr actual = GaussNewtonOptimizer(fg, c0).optimize()->values(); DOUBLES_EQUAL(0,fg.error(*actual),tol); } /* ************************************************************************* */ TEST( NonlinearOptimizer, optimization_method ) { - GaussNewtonParams params; - params.elimination = GaussNewtonParams::QR; + LevenbergMarquardtParams paramsQR; + paramsQR.factorization = LevenbergMarquardtParams::QR; + LevenbergMarquardtParams paramsLDL; + paramsLDL.factorization = LevenbergMarquardtParams::LDL; EXPECT(false); example::Graph fg = example::createReallyNonlinearFactorGraph(); @@ -175,79 +171,67 @@ TEST( NonlinearOptimizer, optimization_method ) Values c0; c0.insert(simulated2D::PoseKey(1), x0); - Values actualMFQR = optimize( - fg, c0, *NonlinearOptimizationParameters().newFactorization(true), MULTIFRONTAL, LM); + Values actualMFQR = *LevenbergMarquardtOptimizer(fg, c0, paramsQR).optimize()->values(); DOUBLES_EQUAL(0,fg.error(actualMFQR),tol); - Values actualMFLDL = optimize( - fg, c0, *NonlinearOptimizationParameters().newFactorization(false), MULTIFRONTAL, LM); + Values actualMFLDL = *LevenbergMarquardtOptimizer(fg, c0, paramsLDL).optimize()->values(); DOUBLES_EQUAL(0,fg.error(actualMFLDL),tol); } /* ************************************************************************* */ TEST( NonlinearOptimizer, Factorization ) { - typedef NonlinearOptimizer Optimizer; + Values config; + config.insert(pose2SLAM::PoseKey(1), Pose2(0.,0.,0.)); + config.insert(pose2SLAM::PoseKey(2), Pose2(1.5,0.,0.)); - boost::shared_ptr config(new Values); - config->insert(pose2SLAM::PoseKey(1), Pose2(0.,0.,0.)); - config->insert(pose2SLAM::PoseKey(2), Pose2(1.5,0.,0.)); + pose2SLAM::Graph graph; + graph.addPrior(1, Pose2(0.,0.,0.), noiseModel::Isotropic::Sigma(3, 1e-10)); + graph.addOdometry(1,2, Pose2(1.,0.,0.), noiseModel::Isotropic::Sigma(3, 1)); - boost::shared_ptr graph(new pose2SLAM::Graph); - graph->addPrior(1, Pose2(0.,0.,0.), noiseModel::Isotropic::Sigma(3, 1e-10)); - graph->addOdometry(1,2, Pose2(1.,0.,0.), noiseModel::Isotropic::Sigma(3, 1)); + Ordering ordering; + ordering.push_back(pose2SLAM::PoseKey(1)); + ordering.push_back(pose2SLAM::PoseKey(2)); - boost::shared_ptr ordering(new Ordering); - ordering->push_back(pose2SLAM::PoseKey(1)); - ordering->push_back(pose2SLAM::PoseKey(2)); - - Optimizer optimizer(graph, config, ordering); - Optimizer optimized = optimizer.iterateLM(); + NonlinearOptimizer::auto_ptr optimized = LevenbergMarquardtOptimizer(graph, config, ordering).iterate(); Values expected; expected.insert(pose2SLAM::PoseKey(1), Pose2(0.,0.,0.)); expected.insert(pose2SLAM::PoseKey(2), Pose2(1.,0.,0.)); - CHECK(assert_equal(expected, *optimized.values(), 1e-5)); + CHECK(assert_equal(expected, *optimized->values(), 1e-5)); } /* ************************************************************************* */ TEST_UNSAFE(NonlinearOptimizer, NullFactor) { - shared_ptr fg(new example::Graph( - example::createReallyNonlinearFactorGraph())); + example::Graph fg = example::createReallyNonlinearFactorGraph(); // Add null factor - fg->push_back(example::Graph::sharedFactor()); + fg.push_back(example::Graph::sharedFactor()); // test error at minimum Point2 xstar(0,0); Values cstar; cstar.insert(simulated2D::PoseKey(1), xstar); - DOUBLES_EQUAL(0.0,fg->error(cstar),0.0); + DOUBLES_EQUAL(0.0,fg.error(cstar),0.0); // test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 = Point2 x0(3,3); - boost::shared_ptr c0(new Values); - c0->insert(simulated2D::PoseKey(1), x0); - DOUBLES_EQUAL(199.0,fg->error(*c0),1e-3); + Values c0; + c0.insert(simulated2D::PoseKey(1), x0); + DOUBLES_EQUAL(199.0,fg.error(c0),1e-3); // optimize parameters - shared_ptr ord(new Ordering()); - ord->push_back(kx(1)); - - // initial optimization state is the same in both cases tested - boost::shared_ptr params = boost::make_shared(); - params->relDecrease_ = 1e-5; - params->absDecrease_ = 1e-5; - Optimizer optimizer(fg, c0, ord, params); + Ordering ord; + ord.push_back(kx(1)); // Gauss-Newton - Optimizer actual1 = optimizer.gaussNewton(); - DOUBLES_EQUAL(0,fg->error(*(actual1.values())),tol); + NonlinearOptimizer::auto_ptr actual1 = GaussNewtonOptimizer(fg, c0, ord).optimize(); + DOUBLES_EQUAL(0,fg.error(*actual1->values()),tol); // Levenberg-Marquardt - Optimizer actual2 = optimizer.levenbergMarquardt(); - DOUBLES_EQUAL(0,fg->error(*(actual2.values())),tol); + NonlinearOptimizer::auto_ptr actual2 = LevenbergMarquardtOptimizer(fg, c0, ord).optimize(); + DOUBLES_EQUAL(0,fg.error(*actual2->values()),tol); } ///* ************************************************************************* */ diff --git a/tests/testRot3Optimization.cpp b/tests/testRot3Optimization.cpp index 105bc8331..ec7f1ddfa 100644 --- a/tests/testRot3Optimization.cpp +++ b/tests/testRot3Optimization.cpp @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include @@ -47,8 +47,7 @@ TEST(Rot3, optimize) { fg.add(Between(Symbol('r',j), Symbol('r',(j+1)%6), Rot3::Rz(M_PI/3.0), sharedSigma(3, 0.01))); } - NonlinearOptimizationParameters params; - Values final = optimize(fg, initial, params); + Values final = *GaussNewtonOptimizer(fg, initial).optimize()->values(); EXPECT(assert_equal(truth, final, 1e-5)); } From b8ad7b2a0c68f6f6d70a2b0ba350e8ae5ed3233c Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Thu, 22 Mar 2012 18:02:25 +0000 Subject: [PATCH 11/57] Fixed bugs, all unit tests pass --- .cproject | 8 +++ gtsam/nonlinear/GaussNewtonOptimizer.h | 2 +- .../nonlinear/LevenbergMarquardtOptimizer.cpp | 2 +- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 2 +- tests/testNonlinearOptimizer.cpp | 63 +------------------ 5 files changed, 12 insertions(+), 65 deletions(-) diff --git a/.cproject b/.cproject index de3ea1faf..66d59d91a 100644 --- a/.cproject +++ b/.cproject @@ -2053,6 +2053,14 @@ true true + + make + -j7 + testNonlinearOptimizer.run + true + true + true + make -j2 diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.h b/gtsam/nonlinear/GaussNewtonOptimizer.h index e445a4e00..0c85e35d1 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.h +++ b/gtsam/nonlinear/GaussNewtonOptimizer.h @@ -210,7 +210,7 @@ private: // error, and increments the iteration count. GaussNewtonOptimizer(const GaussNewtonOptimizer& original, const SharedValues& newValues, double newError) : - NonlinearOptimizer(graph_, newValues, params_, newError, iterations_+1), + NonlinearOptimizer(original.graph_, newValues, original.params_, newError, original.iterations_+1), gnParams_(original.gnParams_), colamdOrdering_(original.colamdOrdering_), ordering_(original.ordering_) {} diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index dd84bff1c..177c72c36 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -46,7 +46,7 @@ NonlinearOptimizer::auto_ptr LevenbergMarquardtOptimizer::iterate() const { const double lambdaFactor = lmParams_->lambdaFactor; // Variables to update during try_lambda loop - double lambda = lmParams_->lambdaInitial; + double lambda = lambda_; double next_error = error(); SharedValues next_values = values(); diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index 48a7fd532..e3a8331be 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -263,7 +263,7 @@ private: // error, and increments the iteration count. LevenbergMarquardtOptimizer(const LevenbergMarquardtOptimizer& original, const SharedValues& newValues, double newError, double newLambda) : - NonlinearOptimizer(graph_, newValues, params_, newError, iterations_+1), + NonlinearOptimizer(original.graph_, newValues, original.params_, newError, original.iterations_+1), lmParams_(original.lmParams_), colamdOrdering_(original.colamdOrdering_), ordering_(original.ordering_), diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index ddf0c4ab5..5f228f3e7 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -163,7 +163,6 @@ TEST( NonlinearOptimizer, optimization_method ) paramsQR.factorization = LevenbergMarquardtParams::QR; LevenbergMarquardtParams paramsLDL; paramsLDL.factorization = LevenbergMarquardtParams::LDL; - EXPECT(false); example::Graph fg = example::createReallyNonlinearFactorGraph(); @@ -202,7 +201,7 @@ TEST( NonlinearOptimizer, Factorization ) } /* ************************************************************************* */ -TEST_UNSAFE(NonlinearOptimizer, NullFactor) { +TEST(NonlinearOptimizer, NullFactor) { example::Graph fg = example::createReallyNonlinearFactorGraph(); @@ -234,66 +233,6 @@ TEST_UNSAFE(NonlinearOptimizer, NullFactor) { DOUBLES_EQUAL(0,fg.error(*actual2->values()),tol); } -///* ************************************************************************* */ -// SL-FIX TEST( NonlinearOptimizer, SubgraphSolver ) -//{ -// using namespace pose2SLAM; -// typedef SubgraphSolver Solver; -// typedef NonlinearOptimizer Optimizer; -// -// // Create a graph -// boost::shared_ptr graph(new Graph); -// graph->addPrior(1, Pose2(0., 0., 0.), noiseModel::Isotropic::Sigma(3, 1e-10)); -// graph->addConstraint(1, 2, Pose2(1., 0., 0.), noiseModel::Isotropic::Sigma(3, 1)); -// -// // Create an initial config -// boost::shared_ptr config(new Values); -// config->insert(1, Pose2(0., 0., 0.)); -// config->insert(2, Pose2(1.5, 0., 0.)); -// -// // Create solver and optimizer -// Optimizer::shared_solver solver -// (new SubgraphSolver (*graph, *config)); -// Optimizer optimizer(graph, config, solver); -// -// // Optimize !!!! -// double relativeThreshold = 1e-5; -// double absoluteThreshold = 1e-5; -// Optimizer optimized = optimizer.gaussNewton(relativeThreshold, -// absoluteThreshold, Optimizer::SILENT); -// -// // Check solution -// Values expected; -// expected.insert(1, Pose2(0., 0., 0.)); -// expected.insert(2, Pose2(1., 0., 0.)); -// CHECK(assert_equal(expected, *optimized.values(), 1e-5)); -//} - -/* ************************************************************************* */ -// SL-FIX TEST( NonlinearOptimizer, MultiFrontalSolver ) -//{ -// shared_ptr fg(new example::Graph( -// example::createNonlinearFactorGraph())); -// Optimizer::shared_values initial = example::sharedNoisyValues(); -// -// Values expected; -// expected.insert(simulated2D::PoseKey(1), Point2(0.0, 0.0)); -// expected.insert(simulated2D::PoseKey(2), Point2(1.5, 0.0)); -// expected.insert(simulated2D::PointKey(1), Point2(0.0, -1.0)); -// -// Optimizer::shared_solver solver; -// -// // Check one ordering -// shared_ptr ord1(new Ordering()); -// *ord1 += kx(2),kl(1),kx(1); -// solver = Optimizer::shared_solver(new Optimizer::solver(ord1)); -// Optimizer optimizer1(fg, initial, solver); -// -// Values actual = optimizer1.levenbergMarquardt(); -// CHECK(assert_equal(actual,expected)); -//} - - /* ************************************************************************* */ int main() { TestResult tr; From ab4117090f24cd25ab913c9b7e4255817f3c970e Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Thu, 22 Mar 2012 18:05:36 +0000 Subject: [PATCH 12/57] Deleted old DoglegOptimizer placeholder --- gtsam/nonlinear/DoglegOptimizer-inl.h | 7 --- gtsam/nonlinear/DoglegOptimizer.h | 67 --------------------------- 2 files changed, 74 deletions(-) delete mode 100644 gtsam/nonlinear/DoglegOptimizer-inl.h delete mode 100644 gtsam/nonlinear/DoglegOptimizer.h diff --git a/gtsam/nonlinear/DoglegOptimizer-inl.h b/gtsam/nonlinear/DoglegOptimizer-inl.h deleted file mode 100644 index 432844101..000000000 --- a/gtsam/nonlinear/DoglegOptimizer-inl.h +++ /dev/null @@ -1,7 +0,0 @@ -/** - * @file DoglegOptimizer-inl.h - * @brief Nonlinear factor graph optimizer using Powell's Dogleg algorithm - * @author Richard Roberts - */ - -#pragma once diff --git a/gtsam/nonlinear/DoglegOptimizer.h b/gtsam/nonlinear/DoglegOptimizer.h deleted file mode 100644 index 62ff72b05..000000000 --- a/gtsam/nonlinear/DoglegOptimizer.h +++ /dev/null @@ -1,67 +0,0 @@ -/** - * @file DoglegOptimizer.h - * @brief Nonlinear factor graph optimizer using Powell's Dogleg algorithm - * @author Richard Roberts - */ - -#pragma once - -#include - -namespace gtsam { - -/** - * A class to perform nonlinear optimization using Powell's dogleg algorithm. - * This class is functional, meaning every method is \c const, and returns a new - * copy of the class. - * - * \tparam VALUES The Values or TupleValues type to hold the values to be - * estimated. - * - * \tparam GAUSSIAN_SOLVER The linear solver to use at each iteration, - * currently either GaussianSequentialSolver or GaussianMultifrontalSolver. - * The latter is typically faster, especially for non-trivial problems. - */ -template -class DoglegOptimizer { - -protected: - - typedef DoglegOptimizer This; ///< Typedef to this class - - const sharedGraph graph_; - const sharedValues values_; - const double error_; - -public: - - typedef VALUES ValuesType; ///< Typedef of the VALUES template parameter - typedef GAUSSIAN_SOLVER SolverType; ///< Typedef of the GAUSSIAN_SOLVER template parameter - typedef NonlinearFactorGraph GraphType; ///< A nonlinear factor graph templated on ValuesType - - typedef boost::shared_ptr sharedGraph; ///< A shared_ptr to GraphType - typedef boost::shared_ptr sharedValues; ///< A shared_ptr to ValuesType - - - /** - * Construct a DoglegOptimizer from the factor graph to optimize and the - * initial estimate of the variable values, using the default variable - * ordering method, currently COLAMD. - * @param graph The factor graph to optimize - * @param initialization An initial estimate of the variable values - */ - DoglegOptimizer(sharedGraph graph, sharedValues initialization); - - /** - * Construct a DoglegOptimizer from the factor graph to optimize and the - * initial estimate of the variable values, using the default variable - * ordering method, currently COLAMD. (this non-shared-pointer version - * incurs a performance hit for copying, see DoglegOptimizer(sharedGraph, sharedValues)). - * @param graph The factor graph to optimize - * @param initialization An initial estimate of the variable values - */ - DoglegOptimizer(const GraphType& graph, const ValuesType& initialization); - -}; - -} From b5e00efecff4f6c60c71c28edc2b0e7dd5cc2add Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Thu, 22 Mar 2012 19:47:29 +0000 Subject: [PATCH 13/57] Small cleanups --- gtsam/nonlinear/GaussNewtonOptimizer.h | 6 +++--- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.h b/gtsam/nonlinear/GaussNewtonOptimizer.h index 0c85e35d1..ea64bfe44 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.h +++ b/gtsam/nonlinear/GaussNewtonOptimizer.h @@ -90,7 +90,7 @@ public: * @param params The optimization parameters */ GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& values, - const GaussNewtonParams& params, + const GaussNewtonParams& params = GaussNewtonParams(), const Ordering& ordering = Ordering()) : NonlinearOptimizer( SharedGraph(new NonlinearFactorGraph(graph)), @@ -110,7 +110,7 @@ public: * @param params The optimization parameters */ GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& values, - const Ordering& ordering = Ordering()) : + const Ordering& ordering) : NonlinearOptimizer( SharedGraph(new NonlinearFactorGraph(graph)), SharedValues(new Values(values)), @@ -151,7 +151,7 @@ public: /** Update the graph, values, and/or parameters, leaving all other state * the same. Any of these that are empty shared pointers are left unchanged * in the returned optimizer object. Returns a new updated - * NonlinearOptimzier object, the original is not modified. + * NonlinearOptimizer object, the original is not modified. */ virtual auto_ptr update( const SharedGraph& newGraph, diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index e3a8331be..affc4b370 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -177,7 +177,7 @@ public: /** Update the graph, values, and/or parameters, leaving all other state * the same. Any of these that are empty shared pointers are left unchanged - * in the returned optimizer object. Returns a new updated NonlinearOptimzier + * in the returned optimizer object. Returns a new updated NonlinearOptimizer * object, the original is not modified. */ virtual auto_ptr update( From aedf55bad8349e823b0b7bd16d9414fa12b9212a Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Fri, 23 Mar 2012 03:38:53 +0000 Subject: [PATCH 14/57] Comment fix --- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index affc4b370..4f665a7b3 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -53,7 +53,7 @@ public: Elimination elimination; ///< The elimination algorithm to use (default: MULTIFRONTAL) Factorization factorization; ///< The numerical factorization (default: LDL) - double lambdaInitial; ///< The initial (and current after each iteration) Levenberg-Marquardt damping term (default: 1e-5) + double lambdaInitial; ///< The initial Levenberg-Marquardt damping term (default: 1e-5) double lambdaFactor; ///< The amount by which to multiply or divide lambda when adjusting lambda (default: 10.0) double lambdaUpperBound; ///< The maximum lambda to try before assuming the optimization has failed (default: 1e5) LMVerbosity lmVerbosity; ///< The verbosity level for Levenberg-Marquardt (default: SILENT), see also NonlinearOptimizerParams::verbosity From be553be4f78ae36830f8143efc19eb5138002f34 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Fri, 23 Mar 2012 03:38:55 +0000 Subject: [PATCH 15/57] Use GaussianBayesTree typedef --- gtsam/linear/GaussianMultifrontalSolver.cpp | 2 +- gtsam/linear/GaussianMultifrontalSolver.h | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/gtsam/linear/GaussianMultifrontalSolver.cpp b/gtsam/linear/GaussianMultifrontalSolver.cpp index 9e80b7c29..d849f71ed 100644 --- a/gtsam/linear/GaussianMultifrontalSolver.cpp +++ b/gtsam/linear/GaussianMultifrontalSolver.cpp @@ -44,7 +44,7 @@ void GaussianMultifrontalSolver::replaceFactors(const FactorGraph::shared_ptr GaussianMultifrontalSolver::eliminate() const { +GaussianBayesTree::shared_ptr GaussianMultifrontalSolver::eliminate() const { if (useQR_) return Base::eliminate(&EliminateQR); else diff --git a/gtsam/linear/GaussianMultifrontalSolver.h b/gtsam/linear/GaussianMultifrontalSolver.h index 71a4c0c20..8177bed0b 100644 --- a/gtsam/linear/GaussianMultifrontalSolver.h +++ b/gtsam/linear/GaussianMultifrontalSolver.h @@ -18,6 +18,7 @@ #pragma once #include +#include #include #include @@ -86,7 +87,7 @@ public: * Eliminate the factor graph sequentially. Uses a column elimination tree * to recursively eliminate. */ - BayesTree::shared_ptr eliminate() const; + GaussianBayesTree::shared_ptr eliminate() const; /** * Compute the least-squares solution of the GaussianFactorGraph. This From 0bd74426eae8f527a160565d069e97e87449fd54 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Fri, 23 Mar 2012 03:38:57 +0000 Subject: [PATCH 16/57] Added DoglegOptimizer --- gtsam/nonlinear/DoglegOptimizer.cpp | 71 ++++++++ gtsam/nonlinear/DoglegOptimizer.h | 265 ++++++++++++++++++++++++++++ 2 files changed, 336 insertions(+) create mode 100644 gtsam/nonlinear/DoglegOptimizer.cpp create mode 100644 gtsam/nonlinear/DoglegOptimizer.h diff --git a/gtsam/nonlinear/DoglegOptimizer.cpp b/gtsam/nonlinear/DoglegOptimizer.cpp new file mode 100644 index 000000000..9779fa36c --- /dev/null +++ b/gtsam/nonlinear/DoglegOptimizer.cpp @@ -0,0 +1,71 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file DoglegOptimizer.cpp + * @brief + * @author Richard Roberts + * @created Feb 26, 2012 + */ + +#include + +#include +#include +#include + +using namespace std; + +namespace gtsam { + +NonlinearOptimizer::auto_ptr DoglegOptimizer::iterate() const { + + // Linearize graph + GaussianFactorGraph::shared_ptr linear = graph_->linearize(*values_, *ordering_); + + // Check whether to use QR + bool useQR; + if(dlParams_->factorization == DoglegParams::LDL) + useQR = false; + else if(dlParams_->factorization == DoglegParams::QR) + useQR = true; + else + throw runtime_error("Optimization parameter is invalid: DoglegParams::factorization"); + + // Pull out parameters we'll use + const bool dlVerbose = (dlParams_->dlVerbosity > DoglegParams::SILENT); + + // Do Dogleg iteration with either Multifrontal or Sequential elimination + DoglegOptimizerImpl::IterationResult result; + + if(dlParams_->elimination == DoglegParams::MULTIFRONTAL) { + GaussianBayesTree::shared_ptr bt = GaussianMultifrontalSolver(*linear, useQR).eliminate(); + result = DoglegOptimizerImpl::Iterate(delta_, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, *graph_, *values(), *ordering_, error(), dlVerbose); + + } else if(dlParams_->elimination == DoglegParams::SEQUENTIAL) { + GaussianBayesNet::shared_ptr bn = GaussianSequentialSolver(*linear, useQR).eliminate(); + result = DoglegOptimizerImpl::Iterate(delta_, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, *graph_, *values(), *ordering_, error(), dlVerbose); + + } else { + throw runtime_error("Optimization parameter is invalid: DoglegParams::elimination"); + } + + // Update values + SharedValues newValues(new Values(values_->retract(result.dx_d, *ordering_))); + + // Create new optimizer with new values and new error + NonlinearOptimizer::auto_ptr newOptimizer(new DoglegOptimizer( + *this, newValues, result.f_error, result.Delta)); + + return newOptimizer; +} + +} /* namespace gtsam */ diff --git a/gtsam/nonlinear/DoglegOptimizer.h b/gtsam/nonlinear/DoglegOptimizer.h new file mode 100644 index 000000000..25abfda39 --- /dev/null +++ b/gtsam/nonlinear/DoglegOptimizer.h @@ -0,0 +1,265 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file DoglegOptimizer.h + * @brief + * @author Richard Roberts + * @created Feb 26, 2012 + */ + +#pragma once + +#include + +namespace gtsam { + +/** Parameters for Levenberg-Marquardt optimization. Note that this parameters + * class inherits from NonlinearOptimizerParams, which specifies the parameters + * common to all nonlinear optimization algorithms. This class also contains + * all of those parameters. + */ +class DoglegParams : public NonlinearOptimizerParams { +public: + /** See DoglegParams::elimination */ + enum Elimination { + MULTIFRONTAL, + SEQUENTIAL + }; + + /** See DoglegParams::factorization */ + enum Factorization { + LDL, + QR, + }; + + /** See DoglegParams::dlVerbosity */ + enum DLVerbosity { + SILENT, + VERBOSE + }; + + Elimination elimination; ///< The elimination algorithm to use (default: MULTIFRONTAL) + Factorization factorization; ///< The numerical factorization (default: LDL) + double deltaInitial; ///< The initial trust region radius (default: 1.0) + DLVerbosity dlVerbosity; ///< The verbosity level for Dogleg (default: SILENT), see also NonlinearOptimizerParams::verbosity + + DoglegParams() : + elimination(MULTIFRONTAL), factorization(LDL), deltaInitial(1.0), dlVerbosity(SILENT) {} + + virtual ~DoglegParams() {} + + virtual void print(const std::string& str = "") const { + NonlinearOptimizerParams::print(str); + if(elimination == MULTIFRONTAL) + std::cout << " elimination method: MULTIFRONTAL\n"; + else if(elimination == SEQUENTIAL) + std::cout << " elimination method: SEQUENTIAL\n"; + else + std::cout << " elimination method: (invalid)\n"; + + if(factorization == LDL) + std::cout << " factorization method: LDL\n"; + else if(factorization == QR) + std::cout << " factorization method: QR\n"; + else + std::cout << " factorization method: (invalid)\n"; + + std::cout << " deltaInitial: " << deltaInitial << "\n"; + + std::cout.flush(); + } +}; + +/** + * This class performs Dogleg nonlinear optimization + * TODO: use make_shared + */ +class DoglegOptimizer : public NonlinearOptimizer { + +public: + + typedef boost::shared_ptr SharedDLParams; + typedef boost::shared_ptr SharedOrdering; + + /// @name Standard interface + /// @{ + + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. For convenience this + * version takes plain objects instead of shared pointers, but internally + * copies the objects. + * @param graph The nonlinear factor graph to optimize + * @param values The initial variable assignments + * @param params The optimization parameters + */ + DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& values, + const DoglegParams& params = DoglegParams(), + const Ordering& ordering = Ordering()) : + NonlinearOptimizer( + SharedGraph(new NonlinearFactorGraph(graph)), + SharedValues(new Values(values)), + SharedDLParams(new DoglegParams(params))), + dlParams_(boost::static_pointer_cast(params_)), + colamdOrdering_(ordering.size() == 0), + ordering_(colamdOrdering_ ? + graph_->orderingCOLAMD(*values_) : Ordering::shared_ptr(new Ordering(ordering))), + dimensions_(new vector(values_->dims(*ordering_))), + delta_(dlParams_->deltaInitial) {} + + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. For convenience this + * version takes plain objects instead of shared pointers, but internally + * copies the objects. + * @param graph The nonlinear factor graph to optimize + * @param values The initial variable assignments + * @param params The optimization parameters + */ + DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& values, + const Ordering& ordering) : + NonlinearOptimizer( + SharedGraph(new NonlinearFactorGraph(graph)), + SharedValues(new Values(values)), + SharedDLParams(new DoglegParams())), + dlParams_(boost::static_pointer_cast(params_)), + colamdOrdering_(ordering.size() == 0), + ordering_(colamdOrdering_ ? + graph_->orderingCOLAMD(*values_) : Ordering::shared_ptr(new Ordering(ordering))), + dimensions_(new vector(values_->dims(*ordering_))), + delta_(dlParams_->deltaInitial) {} + + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. + * @param graph The nonlinear factor graph to optimize + * @param values The initial variable assignments + * @param params The optimization parameters + */ + DoglegOptimizer(const SharedGraph& graph, const SharedValues& values, + const DoglegParams& params = DoglegParams(), + const SharedOrdering& ordering = SharedOrdering()) : + NonlinearOptimizer(graph, values, SharedDLParams(new DoglegParams(params))), + dlParams_(boost::static_pointer_cast(params_)), + colamdOrdering_(!ordering || ordering->size() == 0), + ordering_(colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : ordering), + dimensions_(new vector(values_->dims(*ordering_))), + delta_(dlParams_->deltaInitial) {} + + /// @} + + /// @name Advanced interface + /// @{ + + /** Virtual destructor */ + virtual ~DoglegOptimizer() {} + + /** Perform a single iteration, returning a new NonlinearOptimizer class + * containing the updated variable assignments, which may be retrieved with + * values(). + */ + virtual auto_ptr iterate() const; + + /** Update the graph, values, and/or parameters, leaving all other state + * the same. Any of these that are empty shared pointers are left unchanged + * in the returned optimizer object. Returns a new updated NonlinearOptimizer + * object, the original is not modified. + */ + virtual auto_ptr update( + const SharedGraph& newGraph, + const SharedValues& newValues = SharedValues(), + const SharedParams& newParams = SharedParams()) const { + return auto_ptr(new DoglegOptimizer(*this, newGraph, newValues, + boost::dynamic_pointer_cast(newParams))); + } + + /** Update the ordering, leaving all other state the same. If newOrdering + * is an empty pointer or contains an empty Ordering object + * (with zero size), a COLAMD ordering will be computed. Returns a new + * NonlinearOptimizer object, the original is not modified. + */ + virtual auto_ptr update(const SharedOrdering& newOrdering) const { + return auto_ptr(new DoglegOptimizer(*this, newOrdering)); } + + /** Update the damping factor lambda, leaving all other state the same. + * Returns a new NonlinearOptimizer object, the original is not modified. + */ + virtual auto_ptr update(double newDelta) const { + return auto_ptr(new DoglegOptimizer(*this, newDelta)); } + + /** Create a copy of the NonlinearOptimizer */ + virtual auto_ptr clone() const { + return auto_ptr(new DoglegOptimizer(*this)); + } + + /// @} + +protected: + + typedef boost::shared_ptr > SharedDimensions; + + const SharedDLParams dlParams_; + const bool colamdOrdering_; + const SharedOrdering ordering_; + const SharedDimensions dimensions_; + const double delta_; + + /** Protected constructor called by update() to modify the graph, values, or + * parameters. Computes a COLAMD ordering if the optimizer was originally + * constructed with an empty ordering, and if the graph is changing. + * Recomputes the dimensions if the values are changing. + */ + DoglegOptimizer(const DoglegOptimizer& original, const SharedGraph& newGraph, + const SharedValues& newValues, const SharedDLParams& newParams) : + NonlinearOptimizer(original, newGraph, newValues, newParams), + dlParams_(newParams ? newParams : original.dlParams_), + colamdOrdering_(original.colamdOrdering_), + ordering_(newGraph && colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : original.ordering_), + dimensions_(newValues ? + SharedDimensions(new std::vector(values_->dims(*ordering_))) : original.dimensions_), + delta_(original.delta_) {} + + /** Protected constructor called by update() to modify the ordering, computing + * a COLAMD ordering if the new ordering is empty, and also recomputing the + * dimensions. + */ + DoglegOptimizer( + const DoglegOptimizer& original, const SharedOrdering& newOrdering) : + NonlinearOptimizer(original), + dlParams_(original.dlParams_), + colamdOrdering_(!newOrdering || newOrdering->size() == 0), + ordering_(colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : newOrdering), + dimensions_(new std::vector(values_->dims(*ordering_))), + delta_(original.delta_) {} + + /** Protected constructor called by update() to modify lambda. */ + DoglegOptimizer( + const DoglegOptimizer& original, double newDelta) : + NonlinearOptimizer(original), + dlParams_(original.dlParams_), + colamdOrdering_(original.colamdOrdering_), + ordering_(original.ordering_), + dimensions_(original.dimensions_), + delta_(newDelta) {} + +private: + + // Special constructor for completing an iteration, updates the values and + // error, and increments the iteration count. + DoglegOptimizer(const DoglegOptimizer& original, + const SharedValues& newValues, double newError, double newDelta) : + NonlinearOptimizer(original.graph_, newValues, original.params_, newError, original.iterations_+1), + dlParams_(original.dlParams_), + colamdOrdering_(original.colamdOrdering_), + ordering_(original.ordering_), + dimensions_(original.dimensions_), + delta_(newDelta) {} +}; + +} From bcd663244ad68648cbb70191de9dbbddf6e2ed4d Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Fri, 23 Mar 2012 03:38:58 +0000 Subject: [PATCH 17/57] Moved printing to NonlinearOptimizer base class --- gtsam/nonlinear/GaussNewtonOptimizer.cpp | 4 ---- gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp | 4 ---- gtsam/nonlinear/NonlinearOptimizer.cpp | 8 ++++++++ 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.cpp b/gtsam/nonlinear/GaussNewtonOptimizer.cpp index a603131ad..e3dd49461 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.cpp +++ b/gtsam/nonlinear/GaussNewtonOptimizer.cpp @@ -54,10 +54,6 @@ NonlinearOptimizer::auto_ptr GaussNewtonOptimizer::iterate() const { SharedValues newValues(new Values(values_->retract(*delta, *ordering_))); double newError = graph_->error(*newValues); - // Maybe show output - if (params_->verbosity >= NonlinearOptimizerParams::VALUES) newValues->print("newValues"); - if (params_->verbosity >= NonlinearOptimizerParams::ERROR) cout << "error: " << newError << endl; - // Create new optimizer with new values and new error NonlinearOptimizer::auto_ptr newOptimizer(new GaussNewtonOptimizer( *this, newValues, newError)); diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 177c72c36..b8ba0b6a4 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -132,10 +132,6 @@ NonlinearOptimizer::auto_ptr LevenbergMarquardtOptimizer::iterate() const { } } // end while - // Maybe show output - if (params_->verbosity >= NonlinearOptimizerParams::VALUES) next_values->print("newValues"); - if (params_->verbosity >= NonlinearOptimizerParams::ERROR) cout << "error: " << next_error << endl; - // Create new optimizer with new values and new error NonlinearOptimizer::auto_ptr newOptimizer(new LevenbergMarquardtOptimizer( *this, next_values, next_error, lambda)); diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index ed5542df3..758216bb4 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -36,6 +36,10 @@ NonlinearOptimizer::auto_ptr NonlinearOptimizer::defaultOptimize() const { return this->clone(); } + // Maybe show output + if (params_->verbosity >= NonlinearOptimizerParams::VALUES) this->values()->print("newValues"); + if (params_->verbosity >= NonlinearOptimizerParams::ERROR) cout << "newError: " << this->error() << endl; + // Return if we already have too many iterations if(this->iterations() >= params_->maxIterations) return this->clone(); @@ -49,6 +53,10 @@ NonlinearOptimizer::auto_ptr NonlinearOptimizer::defaultOptimize() const { // Do next iteration currentError = next->error(); next = next->iterate(); + + // Maybe show output + if (params_->verbosity >= NonlinearOptimizerParams::VALUES) next->values()->print("newValues"); + if (params_->verbosity >= NonlinearOptimizerParams::ERROR) cout << "newError: " << next->error() << endl; } // Printing if verbose From 4205f7d32f8c656476614f7f53450e59f7134cf5 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Fri, 23 Mar 2012 03:39:00 +0000 Subject: [PATCH 18/57] Made constructors explicit --- gtsam/inference/BayesNet.h | 2 +- gtsam/inference/BayesTree.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/gtsam/inference/BayesNet.h b/gtsam/inference/BayesNet.h index a05440d69..0f78a15e3 100644 --- a/gtsam/inference/BayesNet.h +++ b/gtsam/inference/BayesNet.h @@ -81,7 +81,7 @@ public: } /** BayesNet with 1 conditional */ - BayesNet(const sharedConditional& conditional) { push_back(conditional); } + explicit BayesNet(const sharedConditional& conditional) { push_back(conditional); } /// @} /// @name Testable diff --git a/gtsam/inference/BayesTree.h b/gtsam/inference/BayesTree.h index 06a47ba8e..9cba86267 100644 --- a/gtsam/inference/BayesTree.h +++ b/gtsam/inference/BayesTree.h @@ -144,7 +144,7 @@ namespace gtsam { BayesTree() {} /** Create a Bayes Tree from a Bayes Net (requires CONDITIONAL is IndexConditional *or* CONDITIONAL::Combine) */ - BayesTree(const BayesNet& bayesNet); + explicit BayesTree(const BayesNet& bayesNet); /// @} /// @name Advanced Constructors From 4432916745bfa05589132ffdc047225fe3bd0a33 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Fri, 23 Mar 2012 03:39:01 +0000 Subject: [PATCH 19/57] Put default absolute error tolerance back to 1e-5 --- gtsam/nonlinear/NonlinearOptimizer.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index dd81ced31..61edaf754 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -38,12 +38,12 @@ public: int maxIterations; ///< The maximum iterations to stop iterating (default 100) double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5) - double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 0.0) + double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5) double errorTol; ///< The maximum total error to stop iterating (default 0.0) Verbosity verbosity; ///< The printing verbosity during optimization (default SILENT) NonlinearOptimizerParams() : - maxIterations(100.0), relativeErrorTol(1e-5), absoluteErrorTol(0.0), + maxIterations(100.0), relativeErrorTol(1e-5), absoluteErrorTol(1e-5), errorTol(0.0), verbosity(SILENT) {} virtual void print(const std::string& str = "") const { From 9bd9e24af8e56d0fb722fb633038485b4c98cb05 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Fri, 23 Mar 2012 03:39:02 +0000 Subject: [PATCH 20/57] Fixes in printing --- gtsam/nonlinear/NonlinearOptimizer.cpp | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index 758216bb4..42e33d4a8 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -37,8 +37,8 @@ NonlinearOptimizer::auto_ptr NonlinearOptimizer::defaultOptimize() const { } // Maybe show output - if (params_->verbosity >= NonlinearOptimizerParams::VALUES) this->values()->print("newValues"); - if (params_->verbosity >= NonlinearOptimizerParams::ERROR) cout << "newError: " << this->error() << endl; + if (params_->verbosity >= NonlinearOptimizerParams::VALUES) this->values()->print("Initial values"); + if (params_->verbosity >= NonlinearOptimizerParams::ERROR) cout << "Initial error: " << this->error() << endl; // Return if we already have too many iterations if(this->iterations() >= params_->maxIterations) @@ -47,16 +47,17 @@ NonlinearOptimizer::auto_ptr NonlinearOptimizer::defaultOptimize() const { // Iterative loop auto_ptr next = this->iterate(); // First iteration happens here while(next->iterations() < params_->maxIterations && - !checkConvergence(params_->relativeErrorTol, params_->absoluteErrorTol, + !checkConvergence(params_->relativeErrorTol, params_->absoluteErrorTol, params_->errorTol, currentError, next->error(), params_->verbosity)) { - // Do next iteration - currentError = next->error(); - next = next->iterate(); - // Maybe show output if (params_->verbosity >= NonlinearOptimizerParams::VALUES) next->values()->print("newValues"); if (params_->verbosity >= NonlinearOptimizerParams::ERROR) cout << "newError: " << next->error() << endl; + + + // Do next iteration + currentError = next->error(); + next = next->iterate(); } // Printing if verbose From 2df82aab8ff11c2461cc8a913b46e53a0f1118e4 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Fri, 23 Mar 2012 03:39:04 +0000 Subject: [PATCH 21/57] Comment fixes in Pose3 --- gtsam/geometry/Pose3.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gtsam/geometry/Pose3.h b/gtsam/geometry/Pose3.h index 2d7e42363..793397201 100644 --- a/gtsam/geometry/Pose3.h +++ b/gtsam/geometry/Pose3.h @@ -217,10 +217,10 @@ namespace gtsam { /** * Calculate range to another pose - * @param point SO(3) pose of landmark + * @param pose Other SO(3) pose * @return range (double) */ - double range(const Pose3& point, + double range(const Pose3& pose, boost::optional H1=boost::none, boost::optional H2=boost::none) const; From a7f9c861ad8a549346e8998171b9907a4775944a Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Fri, 23 Mar 2012 03:39:05 +0000 Subject: [PATCH 22/57] Added range functions in cameras (in branch, not tested yet) --- gtsam/geometry/CalibratedCamera.h | 32 +++++++++++- gtsam/geometry/PinholeCamera.h | 83 ++++++++++++++++++++++++++++--- 2 files changed, 107 insertions(+), 8 deletions(-) diff --git a/gtsam/geometry/CalibratedCamera.h b/gtsam/geometry/CalibratedCamera.h index 95ff66e68..04296eba1 100644 --- a/gtsam/geometry/CalibratedCamera.h +++ b/gtsam/geometry/CalibratedCamera.h @@ -120,7 +120,7 @@ namespace gtsam { /* ************************************************************************* */ /// @} - /// @name Transformations + /// @name Transformations and mesaurement functions /// @{ /** @@ -147,6 +147,36 @@ namespace gtsam { */ static Point3 backproject_from_camera(const Point2& p, const double scale); + /** + * Calculate range to a landmark + * @param point 3D location of landmark + * @return range (double) + */ + double range(const Point3& point, + boost::optional H1=boost::none, + boost::optional H2=boost::none) const { + return pose_.range(point, H1, H2); } + + /** + * Calculate range to another pose + * @param pose Other SO(3) pose + * @return range (double) + */ + double range(const Pose3& pose, + boost::optional H1=boost::none, + boost::optional H2=boost::none) const { + return pose_.range(pose, H1, H2); } + + /** + * Calculate range to another camera + * @param camera Other camera + * @return range (double) + */ + double range(const CalibratedCamera& camera, + boost::optional H1=boost::none, + boost::optional H2=boost::none) const { + return pose_.range(camera.pose_, H1, H2); } + private: /// @} diff --git a/gtsam/geometry/PinholeCamera.h b/gtsam/geometry/PinholeCamera.h index 8aee12064..1d9e9dcff 100644 --- a/gtsam/geometry/PinholeCamera.h +++ b/gtsam/geometry/PinholeCamera.h @@ -161,9 +161,9 @@ namespace gtsam { return PinholeCamera(pose3, K); } - /* ************************************************************************* */ - // measurement functions and derivatives - /* ************************************************************************* */ + /// @} + /// @name Transformations and measurement functions + /// @{ /** * projects a 3-dimensional point in camera coordinates into the @@ -186,10 +186,6 @@ namespace gtsam { return std::make_pair(k_.uncalibrate(pn), pc.z()>0); } - /// @} - /// @name Transformations - /// @{ - /** project a point from world coordinate to the image * @param pw is a point in the world coordinate * @param H1 is the jacobian w.r.t. pose3 @@ -270,6 +266,79 @@ namespace gtsam { return backproject(pi, scale); } + /** + * Calculate range to a landmark + * @param point 3D location of landmark + * @return range (double) + */ + double range(const Point3& point, + boost::optional H1=boost::none, + boost::optional H2=boost::none) const { + double result = pose_.range(point, H1, H2); + if(H1) { + // Add columns of zeros to Jacobian for calibration + Matrix& H1r(*H1); + H1r.conservativeResize(Eigen::NoChange, pose_.dim() + k_.dim()); + H1r.block(0, pose_.dim(), 1, k_.dim()) = Matrix::Zero(1, k_.dim()); + gtsam::print(*H1, "H1: "); + } + std::cout << "Range = " << result << std::endl; + return result; + } + + /** + * Calculate range to another pose + * @param pose Other SO(3) pose + * @return range (double) + */ + double range(const Pose3& pose, + boost::optional H1=boost::none, + boost::optional H2=boost::none) const { + double result = pose_.range(pose, H1, H2); + if(H1) { + // Add columns of zeros to Jacobian for calibration + Matrix& H1r(*H1); + H1r.conservativeResize(Eigen::NoChange, pose_.dim() + k_.dim()); + H1r.block(0, pose_.dim(), 1, k_.dim()) = Matrix::Zero(1, k_.dim()); + } + return result; + } + + /** + * Calculate range to another camera + * @param camera Other camera + * @return range (double) + */ + template + double range(const PinholeCamera& camera, + boost::optional H1=boost::none, + boost::optional H2=boost::none) const { + double result = pose_.range(camera.pose_, H1, H2); + if(H1) { + // Add columns of zeros to Jacobian for calibration + Matrix& H1r(*H1); + H1r.conservativeResize(Eigen::NoChange, pose_.dim() + k_.dim()); + H1r.block(0, pose_.dim(), 1, k_.dim()) = Matrix::Zero(1, k_.dim()); + } + if(H2) { + // Add columns of zeros to Jacobian for calibration + Matrix& H2r(*H2); + H2r.conservativeResize(Eigen::NoChange, camera.pose().dim() + camera.calibration().dim()); + H2r.block(0, camera.pose().dim(), 1, camera.calibration().dim()) = Matrix::Zero(1, camera.calibration().dim()); + } + return result; + } + + /** + * Calculate range to another camera + * @param camera Other camera + * @return range (double) + */ + double range(const CalibratedCamera& camera, + boost::optional H1=boost::none, + boost::optional H2=boost::none) const { + return pose_.range(camera.pose_, H1, H2); } + private: /// @} From cdd89a43f56ffdfd4648ac58e556fe5e649c16ca Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Fri, 23 Mar 2012 03:43:28 +0000 Subject: [PATCH 23/57] DoglegOptimizer unit tests --- tests/testNonlinearOptimizer.cpp | 39 ++++++++++++++------------------ 1 file changed, 17 insertions(+), 22 deletions(-) diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index 5f228f3e7..cb97a6ae7 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -31,10 +31,9 @@ using namespace boost; #include #include #include - -// template definitions #include #include +#include using namespace gtsam; @@ -100,6 +99,10 @@ TEST( NonlinearOptimizer, optimize ) // Levenberg-Marquardt NonlinearOptimizer::auto_ptr actual2 = LevenbergMarquardtOptimizer(fg, c0, LevenbergMarquardtParams(), ord).optimize(); DOUBLES_EQUAL(0,fg->error(*(actual2->values())),tol); + + // Dogleg + NonlinearOptimizer::auto_ptr actual3 = DoglegOptimizer(fg, c0, DoglegParams(), ord).optimize(); + DOUBLES_EQUAL(0,fg->error(*(actual2->values())),tol); } /* ************************************************************************* */ @@ -116,19 +119,6 @@ TEST( NonlinearOptimizer, SimpleLMOptimizer ) DOUBLES_EQUAL(0,fg->error(*actual),tol); } -/* ************************************************************************* */ -TEST( NonlinearOptimizer, SimpleLMOptimizer_noshared ) -{ - example::Graph fg = example::createReallyNonlinearFactorGraph(); - - Point2 x0(3,3); - Values c0; - c0.insert(simulated2D::PoseKey(1), x0); - - Values::const_shared_ptr actual = LevenbergMarquardtOptimizer(fg, c0).optimize()->values(); - DOUBLES_EQUAL(0,fg.error(*actual),tol); -} - /* ************************************************************************* */ TEST( NonlinearOptimizer, SimpleGNOptimizer ) { @@ -144,16 +134,17 @@ TEST( NonlinearOptimizer, SimpleGNOptimizer ) } /* ************************************************************************* */ -TEST( NonlinearOptimizer, SimpleGNOptimizer_noshared ) +TEST( NonlinearOptimizer, SimpleDLOptimizer ) { - example::Graph fg = example::createReallyNonlinearFactorGraph(); + shared_ptr fg(new example::Graph( + example::createReallyNonlinearFactorGraph())); - Point2 x0(3,3); - Values c0; - c0.insert(simulated2D::PoseKey(1), x0); + Point2 x0(3,3); + boost::shared_ptr c0(new Values); + c0->insert(simulated2D::PoseKey(1), x0); - Values::const_shared_ptr actual = GaussNewtonOptimizer(fg, c0).optimize()->values(); - DOUBLES_EQUAL(0,fg.error(*actual),tol); + Values::const_shared_ptr actual = DoglegOptimizer(fg, c0).optimize()->values(); + DOUBLES_EQUAL(0,fg->error(*actual),tol); } /* ************************************************************************* */ @@ -231,6 +222,10 @@ TEST(NonlinearOptimizer, NullFactor) { // Levenberg-Marquardt NonlinearOptimizer::auto_ptr actual2 = LevenbergMarquardtOptimizer(fg, c0, ord).optimize(); DOUBLES_EQUAL(0,fg.error(*actual2->values()),tol); + + // Dogleg + NonlinearOptimizer::auto_ptr actual3 = DoglegOptimizer(fg, c0, ord).optimize(); + DOUBLES_EQUAL(0,fg.error(*actual2->values()),tol); } /* ************************************************************************* */ From 829bb1f8aa4c917ed31459c3205233f61eae3298 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Fri, 23 Mar 2012 22:43:59 +0000 Subject: [PATCH 24/57] Added 'optimized' shortcut function to optimize and return Values directly --- gtsam/nonlinear/NonlinearOptimizer.h | 12 ++++++++++++ gtsam/slam/planarSLAM.h | 2 +- gtsam/slam/pose2SLAM.h | 2 +- gtsam/slam/tests/testPose3SLAM.cpp | 6 +++--- tests/testBoundingConstraint.cpp | 4 ++-- tests/testNonlinearEquality.cpp | 12 ++++++------ tests/testNonlinearOptimizer.cpp | 10 +++++----- tests/testRot3Optimization.cpp | 2 +- 8 files changed, 31 insertions(+), 19 deletions(-) diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index 61edaf754..4dd81c8d9 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -124,9 +124,21 @@ public: /** Optimize for the maximum-likelihood estimate, returning a new * NonlinearOptimizer class containing the optimized variable assignments, * which may be retrieved with values(). + * + * This function simply calls iterate() in a loop, checking for convergence + * with check_convergence(). For fine-grain control over the optimization + * process, you may call iterate() and check_convergence() yourself, and if + * needed modify the optimization state between iterations. */ virtual auto_ptr optimize() const { return defaultOptimize(); } + /** Shortcut to optimize and return the resulting Values of the maximum- + * likelihood estimate. To access statistics and information such as the + * final error and number of iterations, use optimize() instead. + * @return The maximum-likelihood estimate. + */ + virtual SharedValues optimized() const { return this->optimize()->values(); } + /** Retrieve the current variable assignment estimate. */ virtual const SharedValues& values() const { return values_; } diff --git a/gtsam/slam/planarSLAM.h b/gtsam/slam/planarSLAM.h index bd7de3a91..49463a4db 100644 --- a/gtsam/slam/planarSLAM.h +++ b/gtsam/slam/planarSLAM.h @@ -110,7 +110,7 @@ namespace planarSLAM { /// Optimize Values optimize(const Values& initialEstimate) { - return *LevenbergMarquardtOptimizer(*this, initialEstimate).optimize()->values(); + return *LevenbergMarquardtOptimizer(*this, initialEstimate).optimized(); } }; diff --git a/gtsam/slam/pose2SLAM.h b/gtsam/slam/pose2SLAM.h index 6e448b4ee..7fb86a46a 100644 --- a/gtsam/slam/pose2SLAM.h +++ b/gtsam/slam/pose2SLAM.h @@ -97,7 +97,7 @@ namespace pose2SLAM { /// Optimize Values optimize(const Values& initialEstimate) { - return *LevenbergMarquardtOptimizer(*this, initialEstimate).optimize()->values(); + return *LevenbergMarquardtOptimizer(*this, initialEstimate).optimized(); } }; diff --git a/gtsam/slam/tests/testPose3SLAM.cpp b/gtsam/slam/tests/testPose3SLAM.cpp index 059e5b762..2c9562275 100644 --- a/gtsam/slam/tests/testPose3SLAM.cpp +++ b/gtsam/slam/tests/testPose3SLAM.cpp @@ -78,7 +78,7 @@ TEST(Pose3Graph, optimizeCircle) { Ordering ordering; ordering += kx0,kx1,kx2,kx3,kx4,kx5; - Values actual = *LevenbergMarquardtOptimizer(fg, initial, ordering).optimize()->values(); + Values actual = *LevenbergMarquardtOptimizer(fg, initial, ordering).optimized(); // Check with ground truth CHECK(assert_equal(hexagon, actual,1e-4)); @@ -113,7 +113,7 @@ TEST(Pose3Graph, partial_prior_height) { // linearization EXPECT_DOUBLES_EQUAL(2.0, height.error(values), tol); - Values actual = *LevenbergMarquardtOptimizer(graph, values).optimize()->values(); + Values actual = *LevenbergMarquardtOptimizer(graph, values).optimized(); EXPECT(assert_equal(expected, actual.at(key), tol)); EXPECT_DOUBLES_EQUAL(0.0, graph.error(actual), tol); } @@ -165,7 +165,7 @@ TEST(Pose3Graph, partial_prior_xy) { Values values; values.insert(key, init); - Values actual = *LevenbergMarquardtOptimizer(graph, values).optimize()->values(); + Values actual = *LevenbergMarquardtOptimizer(graph, values).optimized(); EXPECT(assert_equal(expected, actual.at(key), tol)); EXPECT_DOUBLES_EQUAL(0.0, graph.error(actual), tol); } diff --git a/tests/testBoundingConstraint.cpp b/tests/testBoundingConstraint.cpp index 32495f4e7..c062484a6 100644 --- a/tests/testBoundingConstraint.cpp +++ b/tests/testBoundingConstraint.cpp @@ -154,7 +154,7 @@ TEST( testBoundingConstraint, unary_simple_optimization1) { Values initValues; initValues.insert(x1, start_pt); - Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimize()->values(); + Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimized(); Values expected; expected.insert(x1, goal_pt); CHECK(assert_equal(expected, actual, tol)); @@ -176,7 +176,7 @@ TEST( testBoundingConstraint, unary_simple_optimization2) { Values initValues; initValues.insert(x1, start_pt); - Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimize()->values(); + Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimized(); Values expected; expected.insert(x1, goal_pt); CHECK(assert_equal(expected, actual, tol)); diff --git a/tests/testNonlinearEquality.cpp b/tests/testNonlinearEquality.cpp index 39051b8c4..ce86e71bc 100644 --- a/tests/testNonlinearEquality.cpp +++ b/tests/testNonlinearEquality.cpp @@ -230,7 +230,7 @@ TEST ( NonlinearEquality, allow_error_optimize_with_factors ) { // optimize Ordering ordering; ordering.push_back(key1); - Values actual = *LevenbergMarquardtOptimizer(graph, init, ordering).optimize()->values(); + Values actual = *LevenbergMarquardtOptimizer(graph, init, ordering).optimized(); // verify Values expected; @@ -317,7 +317,7 @@ TEST( testNonlinearEqualityConstraint, unary_simple_optimization ) { EXPECT(constraint->active(expected)); EXPECT_DOUBLES_EQUAL(0.0, constraint->error(expected), tol); - Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimize()->values(); + Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimized(); EXPECT(assert_equal(expected, actual, tol)); } @@ -408,7 +408,7 @@ TEST( testNonlinearEqualityConstraint, odo_simple_optimize ) { initValues.insert(key1, Point2()); initValues.insert(key2, badPt); - Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimize()->values(); + Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimized(); Values expected; expected.insert(key1, truth_pt1); expected.insert(key2, truth_pt2); @@ -447,7 +447,7 @@ TEST (testNonlinearEqualityConstraint, two_pose ) { initialEstimate.insert(l1, Point2(1.0, 6.0)); // ground truth initialEstimate.insert(l2, Point2(-4.0, 0.0)); // starting with a separate reference frame - Values actual = *LevenbergMarquardtOptimizer(graph, initialEstimate).optimize()->values(); + Values actual = *LevenbergMarquardtOptimizer(graph, initialEstimate).optimized(); Values expected; expected.insert(x1, pt_x1); @@ -491,7 +491,7 @@ TEST (testNonlinearEqualityConstraint, map_warp ) { initialEstimate.insert(x2, Point2( 0.0, 0.0)); // other pose starts at origin // optimize - Values actual = *LevenbergMarquardtOptimizer(graph, initialEstimate).optimize()->values(); + Values actual = *LevenbergMarquardtOptimizer(graph, initialEstimate).optimized(); Values expected; expected.insert(x1, Point2(1.0, 1.0)); @@ -557,7 +557,7 @@ TEST (testNonlinearEqualityConstraint, stereo_constrained ) { initValues.insert(l2, landmark2); // optimize - Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimize()->values(); + Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimized(); // create config Values truthValues; diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index cb97a6ae7..4e35c0f77 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -115,7 +115,7 @@ TEST( NonlinearOptimizer, SimpleLMOptimizer ) boost::shared_ptr c0(new Values); c0->insert(simulated2D::PoseKey(1), x0); - Values::const_shared_ptr actual = LevenbergMarquardtOptimizer(fg, c0).optimize()->values(); + Values::const_shared_ptr actual = LevenbergMarquardtOptimizer(fg, c0).optimized(); DOUBLES_EQUAL(0,fg->error(*actual),tol); } @@ -129,7 +129,7 @@ TEST( NonlinearOptimizer, SimpleGNOptimizer ) boost::shared_ptr c0(new Values); c0->insert(simulated2D::PoseKey(1), x0); - Values::const_shared_ptr actual = GaussNewtonOptimizer(fg, c0).optimize()->values(); + Values::const_shared_ptr actual = GaussNewtonOptimizer(fg, c0).optimized(); DOUBLES_EQUAL(0,fg->error(*actual),tol); } @@ -143,7 +143,7 @@ TEST( NonlinearOptimizer, SimpleDLOptimizer ) boost::shared_ptr c0(new Values); c0->insert(simulated2D::PoseKey(1), x0); - Values::const_shared_ptr actual = DoglegOptimizer(fg, c0).optimize()->values(); + Values::const_shared_ptr actual = DoglegOptimizer(fg, c0).optimized(); DOUBLES_EQUAL(0,fg->error(*actual),tol); } @@ -161,10 +161,10 @@ TEST( NonlinearOptimizer, optimization_method ) Values c0; c0.insert(simulated2D::PoseKey(1), x0); - Values actualMFQR = *LevenbergMarquardtOptimizer(fg, c0, paramsQR).optimize()->values(); + Values actualMFQR = *LevenbergMarquardtOptimizer(fg, c0, paramsQR).optimized(); DOUBLES_EQUAL(0,fg.error(actualMFQR),tol); - Values actualMFLDL = *LevenbergMarquardtOptimizer(fg, c0, paramsLDL).optimize()->values(); + Values actualMFLDL = *LevenbergMarquardtOptimizer(fg, c0, paramsLDL).optimized(); DOUBLES_EQUAL(0,fg.error(actualMFLDL),tol); } diff --git a/tests/testRot3Optimization.cpp b/tests/testRot3Optimization.cpp index ec7f1ddfa..1c90f4f48 100644 --- a/tests/testRot3Optimization.cpp +++ b/tests/testRot3Optimization.cpp @@ -47,7 +47,7 @@ TEST(Rot3, optimize) { fg.add(Between(Symbol('r',j), Symbol('r',(j+1)%6), Rot3::Rz(M_PI/3.0), sharedSigma(3, 0.01))); } - Values final = *GaussNewtonOptimizer(fg, initial).optimize()->values(); + Values final = *GaussNewtonOptimizer(fg, initial).optimized(); EXPECT(assert_equal(truth, final, 1e-5)); } From 31fe933877db35edd1e9242291acb7928dcd39db Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Sat, 24 Mar 2012 19:53:07 +0000 Subject: [PATCH 25/57] Removed printing --- gtsam/geometry/PinholeCamera.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/gtsam/geometry/PinholeCamera.h b/gtsam/geometry/PinholeCamera.h index 1d9e9dcff..943409b4b 100644 --- a/gtsam/geometry/PinholeCamera.h +++ b/gtsam/geometry/PinholeCamera.h @@ -280,9 +280,7 @@ namespace gtsam { Matrix& H1r(*H1); H1r.conservativeResize(Eigen::NoChange, pose_.dim() + k_.dim()); H1r.block(0, pose_.dim(), 1, k_.dim()) = Matrix::Zero(1, k_.dim()); - gtsam::print(*H1, "H1: "); } - std::cout << "Range = " << result << std::endl; return result; } From 7a24e1c940659491ebd9ef89caacebdc84654cdf Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Sat, 24 Mar 2012 19:53:17 +0000 Subject: [PATCH 26/57] Updated examples for new NonlinearOptimizer --- examples/CameraResectioning.cpp | 4 +-- examples/PlanarSLAMExample_easy.cpp | 4 +-- examples/PlanarSLAMSelfContained_advanced.cpp | 33 ++++++++++--------- examples/Pose2SLAMExample_advanced.cpp | 16 ++++----- examples/Pose2SLAMExample_easy.cpp | 4 +-- examples/SimpleRotation.cpp | 4 +-- examples/vSLAMexample/vSFMexample.cpp | 9 ++--- 7 files changed, 38 insertions(+), 36 deletions(-) diff --git a/examples/CameraResectioning.cpp b/examples/CameraResectioning.cpp index f00509c3b..bbbb454f4 100644 --- a/examples/CameraResectioning.cpp +++ b/examples/CameraResectioning.cpp @@ -22,7 +22,7 @@ #include #include #include -#include +#include using namespace gtsam; @@ -95,7 +95,7 @@ int main(int argc, char* argv[]) { 0.,0.,-1.), Point3(0.,0.,2.0))); /* 4. Optimize the graph using Levenberg-Marquardt*/ - Values result = optimize (graph, initial); + Values result = *LevenbergMarquardtOptimizer(graph, initial).optimized(); result.print("Final result: "); return 0; diff --git a/examples/PlanarSLAMExample_easy.cpp b/examples/PlanarSLAMExample_easy.cpp index 1ff3a8677..b660a1c16 100644 --- a/examples/PlanarSLAMExample_easy.cpp +++ b/examples/PlanarSLAMExample_easy.cpp @@ -20,7 +20,7 @@ // pull in the planar SLAM domain with all typedefs and helper functions defined #include -#include +#include using namespace std; using namespace gtsam; @@ -83,7 +83,7 @@ int main(int argc, char** argv) { initialEstimate.print("initial estimate"); // optimize using Levenberg-Marquardt optimization with an ordering from colamd - planarSLAM::Values result = optimize(graph, initialEstimate); + planarSLAM::Values result = *LevenbergMarquardtOptimizer(graph, initialEstimate).optimized(); result.print("final result"); return 0; diff --git a/examples/PlanarSLAMSelfContained_advanced.cpp b/examples/PlanarSLAMSelfContained_advanced.cpp index 10802ee87..46f81d94d 100644 --- a/examples/PlanarSLAMSelfContained_advanced.cpp +++ b/examples/PlanarSLAMSelfContained_advanced.cpp @@ -35,17 +35,13 @@ // implementations for structures - needed if self-contained, and these should be included last #include -#include +#include #include #include using namespace std; using namespace gtsam; -// Main typedefs -typedef NonlinearOptimizer OptimizerSeqential; // optimization engine for this domain -typedef NonlinearOptimizer OptimizerMultifrontal; // optimization engine for this domain - /** * In this version of the system we make the following assumptions: * - All values are axis aligned @@ -117,22 +113,27 @@ int main(int argc, char** argv) { // optimize using Levenberg-Marquardt optimization with an ordering from colamd // first using sequential elimination - OptimizerSeqential::shared_values resultSequential = OptimizerSeqential::optimizeLM(*graph, *initial); + LevenbergMarquardtParams lmParams; + lmParams.elimination = LevenbergMarquardtParams::SEQUENTIAL; + Values::const_shared_ptr resultSequential = LevenbergMarquardtOptimizer(graph, initial, lmParams).optimized(); resultSequential->print("final result (solved with a sequential solver)"); // then using multifrontal, advanced interface - // Note how we create an optimizer, call LM, then we get access to covariances - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*initial); - OptimizerMultifrontal optimizerMF(graph, initial, ordering); - OptimizerMultifrontal resultMF = optimizerMF.levenbergMarquardt(); - resultMF.values()->print("final result (solved with a multifrontal solver)"); + // Note that we keep the original optimizer object so we can use the COLAMD + // ordering it computes. + LevenbergMarquardtOptimizer optimizer(graph, initial); + Values::const_shared_ptr resultMultifrontal = optimizer.optimized(); + resultMultifrontal->print("final result (solved with a multifrontal solver)"); + + const Ordering& ordering = *optimizer.ordering(); + GaussianMultifrontalSolver linearSolver(*graph->linearize(*resultMultifrontal, ordering)); // Print marginals covariances for all variables - print(resultMF.marginalCovariance(x1), "x1 covariance"); - print(resultMF.marginalCovariance(x2), "x2 covariance"); - print(resultMF.marginalCovariance(x3), "x3 covariance"); - print(resultMF.marginalCovariance(l1), "l1 covariance"); - print(resultMF.marginalCovariance(l2), "l2 covariance"); + print(linearSolver.marginalCovariance(ordering[x1]), "x1 covariance"); + print(linearSolver.marginalCovariance(ordering[x2]), "x2 covariance"); + print(linearSolver.marginalCovariance(ordering[x3]), "x3 covariance"); + print(linearSolver.marginalCovariance(ordering[l1]), "l1 covariance"); + print(linearSolver.marginalCovariance(ordering[l2]), "l2 covariance"); return 0; } diff --git a/examples/Pose2SLAMExample_advanced.cpp b/examples/Pose2SLAMExample_advanced.cpp index 1c196488f..c495dba97 100644 --- a/examples/Pose2SLAMExample_advanced.cpp +++ b/examples/Pose2SLAMExample_advanced.cpp @@ -22,7 +22,7 @@ // pull in the Pose2 SLAM domain with all typedefs and helper functions defined #include -#include +#include #include #include @@ -65,16 +65,16 @@ int main(int argc, char** argv) { Ordering::shared_ptr ordering = graph->orderingCOLAMD(*initial); /* 4.2.2 set up solver and optimize */ - NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-15, 1e-15); - Optimizer optimizer(graph, initial, ordering, params); - Optimizer optimizer_result = optimizer.levenbergMarquardt(); - - pose2SLAM::Values result = *optimizer_result.values(); + LevenbergMarquardtParams params; + params.relativeErrorTol = 1e-15; + params.absoluteErrorTol = 1e-15; + pose2SLAM::Values result = *LevenbergMarquardtOptimizer(graph, initial, params, ordering).optimized(); result.print("final result"); /* Get covariances */ - Matrix covariance1 = optimizer_result.marginalCovariance(PoseKey(1)); - Matrix covariance2 = optimizer_result.marginalCovariance(PoseKey(1)); + GaussianMultifrontalSolver solver(*graph->linearize(result, *ordering)); + Matrix covariance1 = solver.marginalCovariance(ordering->at(PoseKey(1))); + Matrix covariance2 = solver.marginalCovariance(ordering->at(PoseKey(1))); print(covariance1, "Covariance1"); print(covariance2, "Covariance2"); diff --git a/examples/Pose2SLAMExample_easy.cpp b/examples/Pose2SLAMExample_easy.cpp index 2593c043c..4a4b7aa4f 100644 --- a/examples/Pose2SLAMExample_easy.cpp +++ b/examples/Pose2SLAMExample_easy.cpp @@ -24,7 +24,7 @@ // pull in the Pose2 SLAM domain with all typedefs and helper functions defined #include -#include +#include using namespace std; using namespace gtsam; @@ -61,7 +61,7 @@ int main(int argc, char** argv) { /* 4 Single Step Optimization * optimize using Levenberg-Marquardt optimization with an ordering from colamd */ - pose2SLAM::Values result = optimize(graph, initial); + pose2SLAM::Values result = *LevenbergMarquardtOptimizer(graph, initial).optimized(); result.print("final result"); diff --git a/examples/SimpleRotation.cpp b/examples/SimpleRotation.cpp index 71727f750..f3722da79 100644 --- a/examples/SimpleRotation.cpp +++ b/examples/SimpleRotation.cpp @@ -24,7 +24,7 @@ #include #include #include -#include +#include /* * TODO: make factors independent of RotValues @@ -105,7 +105,7 @@ int main() { * initial estimate. This will yield a new RotValues structure * with the final state of the optimization. */ - Values result = optimize(graph, initial); + Values result = *LevenbergMarquardtOptimizer(graph, initial).optimized(); result.print("final result"); return 0; diff --git a/examples/vSLAMexample/vSFMexample.cpp b/examples/vSLAMexample/vSFMexample.cpp index c7e0df05c..076430858 100644 --- a/examples/vSLAMexample/vSFMexample.cpp +++ b/examples/vSLAMexample/vSFMexample.cpp @@ -20,7 +20,7 @@ using namespace boost; #include -#include +#include #include #include @@ -143,12 +143,13 @@ int main(int argc, char* argv[]) { // Optimize the graph cout << "*******************************************************" << endl; - NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newVerbosity(Optimizer::Parameters::DAMPED); - visualSLAM::Optimizer::shared_values result = visualSLAM::Optimizer::optimizeGN(graph, initialEstimates, params); + LevenbergMarquardtParams params; + params.lmVerbosity = LevenbergMarquardtParams::DAMPED; + visualSLAM::Values result = *LevenbergMarquardtOptimizer(graph, initialEstimates, params).optimized(); // Print final results cout << "*******************************************************" << endl; - result->print("FINAL RESULTS: "); + result.print("FINAL RESULTS: "); return 0; } From 5be3af8d6d18ee42ff83ec20f744ef57a771e1d2 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Sat, 24 Mar 2012 19:53:25 +0000 Subject: [PATCH 27/57] Added ordering accessor --- gtsam/nonlinear/DoglegOptimizer.h | 3 +++ gtsam/nonlinear/GaussNewtonOptimizer.h | 3 +++ gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp | 3 +-- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 3 +++ 4 files changed, 10 insertions(+), 2 deletions(-) diff --git a/gtsam/nonlinear/DoglegOptimizer.h b/gtsam/nonlinear/DoglegOptimizer.h index 25abfda39..81081c177 100644 --- a/gtsam/nonlinear/DoglegOptimizer.h +++ b/gtsam/nonlinear/DoglegOptimizer.h @@ -152,6 +152,9 @@ public: dimensions_(new vector(values_->dims(*ordering_))), delta_(dlParams_->deltaInitial) {} + /** Access the variable ordering used by this optimizer */ + const SharedOrdering& ordering() const { return ordering_; } + /// @} /// @name Advanced interface diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.h b/gtsam/nonlinear/GaussNewtonOptimizer.h index ea64bfe44..f9deaeded 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.h +++ b/gtsam/nonlinear/GaussNewtonOptimizer.h @@ -134,6 +134,9 @@ public: colamdOrdering_(!ordering || ordering->size() == 0), ordering_(colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : ordering) {} + /** Access the variable ordering used by this optimizer */ + const SharedOrdering& ordering() const { return ordering_; } + /// @} /// @name Advanced interface diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index b8ba0b6a4..d05d23c3f 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -101,8 +101,7 @@ NonlinearOptimizer::auto_ptr LevenbergMarquardtOptimizer::iterate() const { next_error = error; lambda /= lambdaFactor; break; - } - else { + } else { // Either we're not cautious, or the same lambda was worse than the current error. // The more adventurous lambda was worse too, so make lambda more conservative // and keep the same values. diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index 4f665a7b3..73a6e8254 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -161,6 +161,9 @@ public: dimensions_(new vector(values_->dims(*ordering_))), lambda_(lmParams_->lambdaInitial) {} + /** Access the variable ordering used by this optimizer */ + const SharedOrdering& ordering() const { return ordering_; } + /// @} /// @name Advanced interface From daae77771b5230d638f516c3870a8f6ad45fa8fd Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Sat, 24 Mar 2012 19:53:32 +0000 Subject: [PATCH 28/57] Changed nonlinear convergence to <= (from <) --- gtsam/nonlinear/NonlinearOptimizer.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index 42e33d4a8..785ee8195 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -90,7 +90,7 @@ bool checkConvergence(double relativeErrorTreshold, double absoluteErrorTreshold // check if diverges double absoluteDecrease = currentError - newError; if (verbosity >= 2) { - if (absoluteDecrease < absoluteErrorTreshold) + if (absoluteDecrease <= absoluteErrorTreshold) cout << "absoluteDecrease: " << setprecision(12) << absoluteDecrease << " < " << absoluteErrorTreshold << endl; else cout << "absoluteDecrease: " << setprecision(12) << absoluteDecrease << " >= " << absoluteErrorTreshold << endl; @@ -99,13 +99,13 @@ bool checkConvergence(double relativeErrorTreshold, double absoluteErrorTreshold // calculate relative error decrease and update currentError double relativeDecrease = absoluteDecrease / currentError; if (verbosity >= 2) { - if (relativeDecrease < relativeErrorTreshold) + if (relativeDecrease <= relativeErrorTreshold) cout << "relativeDecrease: " << setprecision(12) << relativeDecrease << " < " << relativeErrorTreshold << endl; else cout << "relativeDecrease: " << setprecision(12) << relativeDecrease << " >= " << relativeErrorTreshold << endl; } - bool converged = (relativeErrorTreshold && (relativeDecrease < relativeErrorTreshold)) - || (absoluteDecrease < absoluteErrorTreshold); + bool converged = (relativeErrorTreshold && (relativeDecrease <= relativeErrorTreshold)) + || (absoluteDecrease <= absoluteErrorTreshold); if (verbosity >= 1 && converged) { if(absoluteDecrease >= 0.0) cout << "converged" << endl; From 5e6a1836e9509bb01b4b68f5fb1eb9efa8c8c22b Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Sat, 24 Mar 2012 19:53:39 +0000 Subject: [PATCH 29/57] shared_ptr typedef --- gtsam/nonlinear/NonlinearOptimizer.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index 4dd81c8d9..e1c722caf 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -109,6 +109,9 @@ public: /** An auto pointer to this class */ typedef std::auto_ptr auto_ptr; + /** A shared pointer to this class */ + typedef boost::shared_ptr shared_ptr; + /** A const shared_ptr to a NonlinearFactorGraph */ typedef boost::shared_ptr SharedGraph; From df1b9c32c14492c2107906bf2efb07cf1bdbb6b7 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Sat, 24 Mar 2012 19:53:46 +0000 Subject: [PATCH 30/57] Constrain SFM scale with range prior --- gtsam/slam/tests/testGeneralSFMFactor.cpp | 6 ++++++ gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/gtsam/slam/tests/testGeneralSFMFactor.cpp b/gtsam/slam/tests/testGeneralSFMFactor.cpp index 402b6e17e..d24624d21 100644 --- a/gtsam/slam/tests/testGeneralSFMFactor.cpp +++ b/gtsam/slam/tests/testGeneralSFMFactor.cpp @@ -20,6 +20,7 @@ using namespace boost; #include #include #include +#include using namespace std; using namespace gtsam; @@ -345,7 +346,12 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) { values.insert(Symbol('l',i), pt) ; } + // Constrain position of system with the first camera constrained to the origin graph.addCameraConstraint(0, X[0]); + + // Constrain the scale of the problem with a soft range factor of 1m between the cameras + graph.add(RangeFactor(Symbol('x',0), Symbol('x',1), 2.0, sharedSigma(1, 10.0))); + const double reproj_error = 1e-5 ; Ordering ordering = *getOrdering(X,L); diff --git a/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp b/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp index ca698365a..044908e56 100644 --- a/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp +++ b/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp @@ -20,6 +20,7 @@ using namespace boost; #include #include #include +#include using namespace std; using namespace gtsam; @@ -343,7 +344,12 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) { values.insert(Symbol('l',i), pt) ; } + // Constrain position of system with the first camera constrained to the origin graph.addCameraConstraint(0, X[0]); + + // Constrain the scale of the problem with a soft range factor of 1m between the cameras + graph.add(RangeFactor(Symbol('x',0), Symbol('x',1), 2.0, sharedSigma(1, 10.0))); + const double reproj_error = 1e-5 ; Ordering ordering = *getOrdering(X,L); From aa7eb6306e996abc9405eb92f72486f6b5fb255b Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Sat, 24 Mar 2012 20:49:35 +0000 Subject: [PATCH 31/57] Unit tests for camera range factors --- gtsam/slam/tests/testGeneralSFMFactor.cpp | 50 +++++++++++++++++++++++ gtsam/slam/tests/testVSLAM.cpp | 8 ++-- 2 files changed, 54 insertions(+), 4 deletions(-) diff --git a/gtsam/slam/tests/testGeneralSFMFactor.cpp b/gtsam/slam/tests/testGeneralSFMFactor.cpp index d24624d21..b4fb6e2af 100644 --- a/gtsam/slam/tests/testGeneralSFMFactor.cpp +++ b/gtsam/slam/tests/testGeneralSFMFactor.cpp @@ -21,6 +21,7 @@ using namespace boost; #include #include #include +#include using namespace std; using namespace gtsam; @@ -359,6 +360,55 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) { EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); } +/* ************************************************************************* */ +TEST(GeneralSFMFactor, GeneralCameraPoseRange) { + // Tests range factor between a GeneralCamera and a Pose3 + Graph graph; + graph.addCameraConstraint(0, GeneralCamera()); + graph.add(RangeFactor(Symbol('x',0), Symbol('x',1), 2.0, sharedSigma(1, 1.0))); + graph.add(PriorFactor(Symbol('x',1), Pose3(Rot3(), Point3(1.0, 0.0, 0.0)), sharedSigma(6, 1.0))); + + Values init; + init.insert(Symbol('x',0), GeneralCamera()); + init.insert(Symbol('x',1), Pose3(Rot3(), Point3(1.0,1.0,1.0))); + + // The optimal value between the 2m range factor and 1m prior is 1.5m + Values expected; + expected.insert(Symbol('x',0), GeneralCamera()); + expected.insert(Symbol('x',1), Pose3(Rot3(), Point3(1.5,0.0,0.0))); + + LevenbergMarquardtParams params; + params.absoluteErrorTol = 1e-9; + params.relativeErrorTol = 1e-9; + Values actual = *LevenbergMarquardtOptimizer(graph, init, params).optimized(); + + EXPECT(assert_equal(expected, actual, 1e-4)); +} + +/* ************************************************************************* */ +TEST(GeneralSFMFactor, CalibratedCameraPoseRange) { + // Tests range factor between a CalibratedCamera and a Pose3 + NonlinearFactorGraph graph; + graph.add(PriorFactor(Symbol('x',0), CalibratedCamera(), sharedSigma(6, 1.0))); + graph.add(RangeFactor(Symbol('x',0), Symbol('x',1), 2.0, sharedSigma(1, 1.0))); + graph.add(PriorFactor(Symbol('x',1), Pose3(Rot3(), Point3(1.0, 0.0, 0.0)), sharedSigma(6, 1.0))); + + Values init; + init.insert(Symbol('x',0), CalibratedCamera()); + init.insert(Symbol('x',1), Pose3(Rot3(), Point3(1.0,1.0,1.0))); + + Values expected; + expected.insert(Symbol('x',0), CalibratedCamera(Pose3(Rot3(), Point3(-0.333333333333, 0, 0)))); + expected.insert(Symbol('x',1), Pose3(Rot3(), Point3(1.333333333333, 0, 0))); + + LevenbergMarquardtParams params; + params.absoluteErrorTol = 1e-9; + params.relativeErrorTol = 1e-9; + Values actual = *LevenbergMarquardtOptimizer(graph, init, params).optimized(); + + EXPECT(assert_equal(expected, actual, 1e-4)); +} + /* ************************************************************************* */ int main() { TestResult tr; return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ diff --git a/gtsam/slam/tests/testVSLAM.cpp b/gtsam/slam/tests/testVSLAM.cpp index a5eba9715..5f65b757d 100644 --- a/gtsam/slam/tests/testVSLAM.cpp +++ b/gtsam/slam/tests/testVSLAM.cpp @@ -23,7 +23,7 @@ using namespace boost; #include -#include +#include #include using namespace std; @@ -102,7 +102,7 @@ TEST( Graph, optimizeLM) // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - NonlinearOptimizer::auto_ptr optimizer(new GaussNewtonOptimizer(graph, initialEstimate, ordering)); + NonlinearOptimizer::auto_ptr optimizer(new LevenbergMarquardtOptimizer(graph, initialEstimate, ordering)); DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); // Iterate once, and the config should not have changed because we started @@ -139,7 +139,7 @@ TEST( Graph, optimizeLM2) // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - NonlinearOptimizer::auto_ptr optimizer(new GaussNewtonOptimizer(graph, initialEstimate, ordering)); + NonlinearOptimizer::auto_ptr optimizer(new LevenbergMarquardtOptimizer(graph, initialEstimate, ordering)); DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); // Iterate once, and the config should not have changed because we started @@ -172,7 +172,7 @@ TEST( Graph, CHECK_ORDERING) // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - NonlinearOptimizer::auto_ptr optimizer(new GaussNewtonOptimizer(graph, initialEstimate)); + NonlinearOptimizer::auto_ptr optimizer(new LevenbergMarquardtOptimizer(graph, initialEstimate)); DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); // Iterate once, and the config should not have changed because we started From b07d2096c272b330fabb6135dc8b0c2d407457e2 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Wed, 28 Mar 2012 04:53:16 +0000 Subject: [PATCH 32/57] Improved NLO documentation --- gtsam/nonlinear/NonlinearOptimizer.h | 45 +++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 8 deletions(-) diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index e1c722caf..89982c17c 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -64,26 +64,55 @@ public: * maximum-likelihood estimate of a NonlinearFactorGraph. * * To use a class derived from this interface, construct the class with a - * NonlinearFactorGraph and an initial variable assignment. Next, call the + * NonlinearFactorGraph and an initial Values variable assignment. Next, call the * optimize() method, which returns a new NonlinearOptimizer object containing * the optimized variable assignment. Call the values() method to retrieve the - * optimized estimate. + * optimized estimate. Alternatively, to take a shortcut, instead of calling + * optimize(), call optimized(), which performs full optimization and returns + * the resulting Values instead of the new optimizer. * * Note: This class is immutable, optimize() and iterate() return new * NonlinearOptimizer objects, so be sure to use the returned object and not * simply keep the unchanged original. * - * Example: + * Simple and compact example: * \code -NonlinearOptimizer::auto_ptr optimizer = DoglegOptimizer::Create(graph, initialValues); -optimizer = optimizer->optimizer(); -Values result = optimizer->values(); +// One-liner to do full optimization and use the result. +// Note use of "optimized()" to directly return Values, instead of "optimize()" that returns a new optimizer. +Values::const_shared_ptr result = DoglegOptimizer(graph, initialValues).optimized(); +\endcode + * + * Example exposing more functionality and details: + * \code +// Create initial optimizer +DoglegOptimizer initial(graph, initialValues); + +// Run full optimization until convergence. +// Note use of "optimize()" to return a new optimizer, instead of "optimized()" that returns only the Values. +// NonlinearOptimizer pointers are always returned, though they are actually a derived optimizer type. +NonlinearOptimizer::auto_ptr final = initial->optimize(); + +// The new optimizer has results and statistics +cout << "Converged in " << final->iterations() << " iterations " + "with final error " << final->error() << endl; + +// The values are a const_shared_ptr (boost::shared_ptr) +Values::const_shared_ptr result = final->values(); + +// Use the results useTheResult(result); \endcode * - * Equivalent, but more compact: + * Example of setting parameters before optimization: * \code -useTheResult(DoglegOptimizer(graph, initialValues).optimize()->values()); +// Each derived optimizer type has its own parameters class, which inherits from NonlinearOptimizerParams +DoglegParams params; +params.factorization = DoglegParams::QR; +params.relativeErrorTol = 1e-3; +params.absoluteErrorTol = 1e-3; + +// Optimize +Values::const_shared_ptr result = DoglegOptimizer(graph, initialValues, params).optimized(); \endcode * * This interface also exposes an iterate() method, which performs one From fdb4138d109790270b081d56b80bc13d4f731a98 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Sun, 1 Apr 2012 20:22:24 +0000 Subject: [PATCH 33/57] Started NLO state --- .project | 2 +- gtsam/nonlinear/NonlinearOptimizer.h | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/.project b/.project index f59d79cb7..9856df2ea 100644 --- a/.project +++ b/.project @@ -23,7 +23,7 @@ org.eclipse.cdt.make.core.buildArguments - -j7 + -j5 org.eclipse.cdt.make.core.buildCommand diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index 89982c17c..361b1bd69 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -59,6 +59,27 @@ public: }; +/** + * Base class for a nonlinear optimization state, including the current estimate + * of the variable values, error, and number of iterations. Optimizers derived + * from NonlinearOptimizer usually also define a derived state class containing + * additional state specific to the algorithm (for example, Dogleg state + * contains the current trust region radius). + */ +class NonlinearOptimizerState { +public: + + /** The current estimate of the variable values. */ + Values values; + + /** The factor graph error on the current values. */ + double error; + + /** The number of optimization iterations performed. */ + unsigned int iterations; +}; + + /** * This is the abstract interface for classes that can optimize for the * maximum-likelihood estimate of a NonlinearFactorGraph. From 9366136c782a8bc85eec51f6e7a0146fad87bc36 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Mon, 2 Apr 2012 00:26:42 +0000 Subject: [PATCH 34/57] In progress - making NLO interface less confusing / error prone with a separate "state" class. Refactoring in NLO to reduce amount of code and remove code duplication. --- gtsam/nonlinear/DirectOptimizer.cpp | 58 ++++++ gtsam/nonlinear/DirectOptimizer.h | 99 +++++++++ gtsam/nonlinear/DoglegOptimizer.cpp | 15 +- gtsam/nonlinear/DoglegOptimizer.h | 195 ++++-------------- .../nonlinear/LevenbergMarquardtOptimizer.cpp | 13 +- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 190 ++++------------- gtsam/nonlinear/NonlinearOptimizer.cpp | 65 +++--- gtsam/nonlinear/NonlinearOptimizer.h | 86 +++----- 8 files changed, 314 insertions(+), 407 deletions(-) create mode 100644 gtsam/nonlinear/DirectOptimizer.cpp create mode 100644 gtsam/nonlinear/DirectOptimizer.h diff --git a/gtsam/nonlinear/DirectOptimizer.cpp b/gtsam/nonlinear/DirectOptimizer.cpp new file mode 100644 index 000000000..424bf0396 --- /dev/null +++ b/gtsam/nonlinear/DirectOptimizer.cpp @@ -0,0 +1,58 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file DirectOptimizer.cpp + * @brief + * @author Richard Roberts + * @date Apr 1, 2012 + */ + +#include + +namespace gtsam { + +/* ************************************************************************* */ +NonlinearOptimizer::shared_ptr DirectOptimizer::update(const SharedGraph& newGraph) const { + + // Call update on the base class + shared_ptr result = boost::static_pointer_cast(); + + // Need to recompute the ordering if we're using an automatic COLAMD ordering + if(result->colamdOrdering_) { + result->ordering_ = result->graph_->orderingCOLAMD(*result->values_); + } + + return result; +} + +/* ************************************************************************* */ +DirectOptimizer::shared_ptr DirectOptimizer::update(const SharedOrdering& newOrdering) const { + return update(graph_, newOrdering); +} + +/* ************************************************************************* */ +DirectOptimizer::shared_ptr DirectOptimizer::update(const SharedGraph& newGraph, const SharedOrdering& newOrdering) const { + // Call update on base class + shared_ptr result = boost::static_pointer_cast(NonlinearOptimizer::update(newGraph)); + + if(newOrdering && newOrdering->size() > 0) { + result->colamdOrdering_ = false; + result->ordering_ = newOrdering; + } else { + result->colamdOrdering_ = true; + result->ordering_ = result->graph_->orderingCOLAMD(*result->values_); + } + + return result; +} + +} /* namespace gtsam */ diff --git a/gtsam/nonlinear/DirectOptimizer.h b/gtsam/nonlinear/DirectOptimizer.h new file mode 100644 index 000000000..dac5a0004 --- /dev/null +++ b/gtsam/nonlinear/DirectOptimizer.h @@ -0,0 +1,99 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file DirectOptimizer.h + * @brief + * @author Richard Roberts + * @date Apr 1, 2012 + */ + +#pragma once + +#include + +namespace gtsam { + +class DirectOptimizerParams : public NonlinearOptimizerParams { +public: + /** See DoglegParams::elimination */ + enum Elimination { + MULTIFRONTAL, + SEQUENTIAL + }; + + /** See DoglegParams::factorization */ + enum Factorization { + LDL, + QR, + }; + + Elimination elimination; ///< The elimination algorithm to use (default: MULTIFRONTAL) + Factorization factorization; ///< The numerical factorization (default: LDL) + + DirectOptimizerParams() : + elimination(MULTIFRONTAL), factorization(LDL) {} + + virtual ~DirectOptimizerParams() {} + + virtual void print(const std::string& str = "") const { + NonlinearOptimizerParams::print(str); + if(elimination == MULTIFRONTAL) + std::cout << " elimination method: MULTIFRONTAL\n"; + else if(elimination == SEQUENTIAL) + std::cout << " elimination method: SEQUENTIAL\n"; + else + std::cout << " elimination method: (invalid)\n"; + + if(factorization == LDL) + std::cout << " factorization method: LDL\n"; + else if(factorization == QR) + std::cout << " factorization method: QR\n"; + else + std::cout << " factorization method: (invalid)\n"; + + std::cout.flush(); + } +}; + +/** + * This is an intermediate base class for NonlinearOptimizers that use direct + * solving, i.e. factorization. This class is here to reduce code duplication + * for storing the ordering, having factorization and elimination parameters, + * etc. + */ +class DirectOptimizer : public NonlinearOptimizer { +public: + typedef boost::shared_ptr SharedOrdering; + typedef boost::shared_ptr shared_ptr; + + virtual NonlinearOptimizer::shared_ptr update(const SharedGraph& newGraph) const; + + virtual shared_ptr update(const SharedOrdering& newOrdering) const; + + virtual shared_ptr update(const SharedGraph& newGraph, const SharedOrdering& newOrdering) const; + + /** Access the variable ordering used by this optimizer */ + const SharedOrdering& ordering() const { return ordering_; } + +protected: + + const bool colamdOrdering_; + const SharedOrdering ordering_; + + DirectOptimizer(const SharedGraph& graph, + const SharedOrdering ordering = SharedOrdering()) : + NonlinearOptimizer(graph), + colamdOrdering_(!ordering || ordering->size() == 0), + ordering_(colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : ordering) {} +}; + +} /* namespace gtsam */ diff --git a/gtsam/nonlinear/DoglegOptimizer.cpp b/gtsam/nonlinear/DoglegOptimizer.cpp index 9779fa36c..1a490b744 100644 --- a/gtsam/nonlinear/DoglegOptimizer.cpp +++ b/gtsam/nonlinear/DoglegOptimizer.cpp @@ -26,7 +26,7 @@ using namespace std; namespace gtsam { -NonlinearOptimizer::auto_ptr DoglegOptimizer::iterate() const { +NonlinearOptimizer::SharedState DoglegOptimizer::iterate(const SharedState& current) const { // Linearize graph GaussianFactorGraph::shared_ptr linear = graph_->linearize(*values_, *ordering_); @@ -59,13 +59,16 @@ NonlinearOptimizer::auto_ptr DoglegOptimizer::iterate() const { } // Update values - SharedValues newValues(new Values(values_->retract(result.dx_d, *ordering_))); + SharedValues newValues = boost::make_shared(values_->retract(result.dx_d, *ordering_)); - // Create new optimizer with new values and new error - NonlinearOptimizer::auto_ptr newOptimizer(new DoglegOptimizer( - *this, newValues, result.f_error, result.Delta)); + // Create new state with new values and new error + DoglegOptimizer::SharedState newState = boost::make_shared(); + newState->values = newValues; + newState->error = rsult.f_error; + newState->iterations = current->iterations + 1; + newState->Delta = result.Delta; - return newOptimizer; + return newState; } } /* namespace gtsam */ diff --git a/gtsam/nonlinear/DoglegOptimizer.h b/gtsam/nonlinear/DoglegOptimizer.h index 81081c177..89c88edb0 100644 --- a/gtsam/nonlinear/DoglegOptimizer.h +++ b/gtsam/nonlinear/DoglegOptimizer.h @@ -18,7 +18,7 @@ #pragma once -#include +#include namespace gtsam { @@ -27,68 +27,49 @@ namespace gtsam { * common to all nonlinear optimization algorithms. This class also contains * all of those parameters. */ -class DoglegParams : public NonlinearOptimizerParams { +class DoglegParams : public DirectOptimizerParams { public: - /** See DoglegParams::elimination */ - enum Elimination { - MULTIFRONTAL, - SEQUENTIAL - }; - - /** See DoglegParams::factorization */ - enum Factorization { - LDL, - QR, - }; - /** See DoglegParams::dlVerbosity */ enum DLVerbosity { SILENT, VERBOSE }; - Elimination elimination; ///< The elimination algorithm to use (default: MULTIFRONTAL) - Factorization factorization; ///< The numerical factorization (default: LDL) double deltaInitial; ///< The initial trust region radius (default: 1.0) DLVerbosity dlVerbosity; ///< The verbosity level for Dogleg (default: SILENT), see also NonlinearOptimizerParams::verbosity DoglegParams() : - elimination(MULTIFRONTAL), factorization(LDL), deltaInitial(1.0), dlVerbosity(SILENT) {} + deltaInitial(1.0), dlVerbosity(SILENT) {} virtual ~DoglegParams() {} virtual void print(const std::string& str = "") const { - NonlinearOptimizerParams::print(str); - if(elimination == MULTIFRONTAL) - std::cout << " elimination method: MULTIFRONTAL\n"; - else if(elimination == SEQUENTIAL) - std::cout << " elimination method: SEQUENTIAL\n"; - else - std::cout << " elimination method: (invalid)\n"; - - if(factorization == LDL) - std::cout << " factorization method: LDL\n"; - else if(factorization == QR) - std::cout << " factorization method: QR\n"; - else - std::cout << " factorization method: (invalid)\n"; - + DirectOptimizerParams::print(str); std::cout << " deltaInitial: " << deltaInitial << "\n"; - std::cout.flush(); } }; /** - * This class performs Dogleg nonlinear optimization - * TODO: use make_shared + * State for DoglegOptimizer */ -class DoglegOptimizer : public NonlinearOptimizer { +class DoglegState : public NonlinearOptimizerState { +public: + + double Delta; + +}; + +/** + * This class performs Dogleg nonlinear optimization + */ +class DoglegOptimizer : public DirectOptimizer { public: - typedef boost::shared_ptr SharedDLParams; - typedef boost::shared_ptr SharedOrdering; + typedef boost::shared_ptr SharedParams; + typedef boost::shared_ptr SharedState; + typedef boost::shared_ptr shared_ptr; /// @name Standard interface /// @{ @@ -101,19 +82,11 @@ public: * @param values The initial variable assignments * @param params The optimization parameters */ - DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& values, + DoglegOptimizer(const NonlinearFactorGraph& graph, const DoglegParams& params = DoglegParams(), const Ordering& ordering = Ordering()) : - NonlinearOptimizer( - SharedGraph(new NonlinearFactorGraph(graph)), - SharedValues(new Values(values)), - SharedDLParams(new DoglegParams(params))), - dlParams_(boost::static_pointer_cast(params_)), - colamdOrdering_(ordering.size() == 0), - ordering_(colamdOrdering_ ? - graph_->orderingCOLAMD(*values_) : Ordering::shared_ptr(new Ordering(ordering))), - dimensions_(new vector(values_->dims(*ordering_))), - delta_(dlParams_->deltaInitial) {} + DirectOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), + params_(new DoglegParams(params)) {} /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. For convenience this @@ -123,18 +96,10 @@ public: * @param values The initial variable assignments * @param params The optimization parameters */ - DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& values, + DoglegOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) : - NonlinearOptimizer( - SharedGraph(new NonlinearFactorGraph(graph)), - SharedValues(new Values(values)), - SharedDLParams(new DoglegParams())), - dlParams_(boost::static_pointer_cast(params_)), - colamdOrdering_(ordering.size() == 0), - ordering_(colamdOrdering_ ? - graph_->orderingCOLAMD(*values_) : Ordering::shared_ptr(new Ordering(ordering))), - dimensions_(new vector(values_->dims(*ordering_))), - delta_(dlParams_->deltaInitial) {} + DirectOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), + params_(new DoglegParams()) {} /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. @@ -142,18 +107,17 @@ public: * @param values The initial variable assignments * @param params The optimization parameters */ - DoglegOptimizer(const SharedGraph& graph, const SharedValues& values, + DoglegOptimizer(const SharedGraph& graph, const DoglegParams& params = DoglegParams(), const SharedOrdering& ordering = SharedOrdering()) : - NonlinearOptimizer(graph, values, SharedDLParams(new DoglegParams(params))), - dlParams_(boost::static_pointer_cast(params_)), - colamdOrdering_(!ordering || ordering->size() == 0), - ordering_(colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : ordering), - dimensions_(new vector(values_->dims(*ordering_))), - delta_(dlParams_->deltaInitial) {} + DirectOptimizer(graph), + params_(new DoglegParams(params)) {} - /** Access the variable ordering used by this optimizer */ - const SharedOrdering& ordering() const { return ordering_; } + /** Access the parameters */ + virtual const NonlinearOptimizer::SharedParams& params() const { return params_; } + + /** Access the parameters */ + const DoglegOptimizer::SharedParams& params() const { return params_; } /// @} @@ -167,102 +131,19 @@ public: * containing the updated variable assignments, which may be retrieved with * values(). */ - virtual auto_ptr iterate() const; - - /** Update the graph, values, and/or parameters, leaving all other state - * the same. Any of these that are empty shared pointers are left unchanged - * in the returned optimizer object. Returns a new updated NonlinearOptimizer - * object, the original is not modified. - */ - virtual auto_ptr update( - const SharedGraph& newGraph, - const SharedValues& newValues = SharedValues(), - const SharedParams& newParams = SharedParams()) const { - return auto_ptr(new DoglegOptimizer(*this, newGraph, newValues, - boost::dynamic_pointer_cast(newParams))); - } - - /** Update the ordering, leaving all other state the same. If newOrdering - * is an empty pointer or contains an empty Ordering object - * (with zero size), a COLAMD ordering will be computed. Returns a new - * NonlinearOptimizer object, the original is not modified. - */ - virtual auto_ptr update(const SharedOrdering& newOrdering) const { - return auto_ptr(new DoglegOptimizer(*this, newOrdering)); } - - /** Update the damping factor lambda, leaving all other state the same. - * Returns a new NonlinearOptimizer object, the original is not modified. - */ - virtual auto_ptr update(double newDelta) const { - return auto_ptr(new DoglegOptimizer(*this, newDelta)); } + virtual NonlinearOptimizer::SharedState iterate(const SharedState& current) const; /** Create a copy of the NonlinearOptimizer */ - virtual auto_ptr clone() const { - return auto_ptr(new DoglegOptimizer(*this)); - } + virtual NonlinearOptimizer::shared_ptr clone() const { + return boost::make_shared(*this); } /// @} protected: - typedef boost::shared_ptr > SharedDimensions; + const SharedParams params_; - const SharedDLParams dlParams_; - const bool colamdOrdering_; - const SharedOrdering ordering_; - const SharedDimensions dimensions_; - const double delta_; - - /** Protected constructor called by update() to modify the graph, values, or - * parameters. Computes a COLAMD ordering if the optimizer was originally - * constructed with an empty ordering, and if the graph is changing. - * Recomputes the dimensions if the values are changing. - */ - DoglegOptimizer(const DoglegOptimizer& original, const SharedGraph& newGraph, - const SharedValues& newValues, const SharedDLParams& newParams) : - NonlinearOptimizer(original, newGraph, newValues, newParams), - dlParams_(newParams ? newParams : original.dlParams_), - colamdOrdering_(original.colamdOrdering_), - ordering_(newGraph && colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : original.ordering_), - dimensions_(newValues ? - SharedDimensions(new std::vector(values_->dims(*ordering_))) : original.dimensions_), - delta_(original.delta_) {} - - /** Protected constructor called by update() to modify the ordering, computing - * a COLAMD ordering if the new ordering is empty, and also recomputing the - * dimensions. - */ - DoglegOptimizer( - const DoglegOptimizer& original, const SharedOrdering& newOrdering) : - NonlinearOptimizer(original), - dlParams_(original.dlParams_), - colamdOrdering_(!newOrdering || newOrdering->size() == 0), - ordering_(colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : newOrdering), - dimensions_(new std::vector(values_->dims(*ordering_))), - delta_(original.delta_) {} - - /** Protected constructor called by update() to modify lambda. */ - DoglegOptimizer( - const DoglegOptimizer& original, double newDelta) : - NonlinearOptimizer(original), - dlParams_(original.dlParams_), - colamdOrdering_(original.colamdOrdering_), - ordering_(original.ordering_), - dimensions_(original.dimensions_), - delta_(newDelta) {} - -private: - - // Special constructor for completing an iteration, updates the values and - // error, and increments the iteration count. - DoglegOptimizer(const DoglegOptimizer& original, - const SharedValues& newValues, double newError, double newDelta) : - NonlinearOptimizer(original.graph_, newValues, original.params_, newError, original.iterations_+1), - dlParams_(original.dlParams_), - colamdOrdering_(original.colamdOrdering_), - ordering_(original.ordering_), - dimensions_(original.dimensions_), - delta_(newDelta) {} + virtual void setParams(const NonlinearOptimizer::SharedParams& newParams) { params_ = newParams; } }; } diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index d05d23c3f..b90af8348 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -26,7 +26,7 @@ using namespace std; namespace gtsam { -NonlinearOptimizer::auto_ptr LevenbergMarquardtOptimizer::iterate() const { +NonlinearOptimizer::auto_ptr LevenbergMarquardtOptimizer::iterate(const SharedState& current) const { // Linearize graph GaussianFactorGraph::shared_ptr linear = graph_->linearize(*values_, *ordering_); @@ -131,11 +131,14 @@ NonlinearOptimizer::auto_ptr LevenbergMarquardtOptimizer::iterate() const { } } // end while - // Create new optimizer with new values and new error - NonlinearOptimizer::auto_ptr newOptimizer(new LevenbergMarquardtOptimizer( - *this, next_values, next_error, lambda)); + // Create new state with new values and new error + LevenbergMarquardtOptimizer::SharedState newState = boost::make_shared(); + newState->values = next_values; + newState->error = next_error; + newState->iterations = current->iterations + 1; + newState->Delta = lambda; - return newOptimizer; + return newState; } } /* namespace gtsam */ diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index 73a6e8254..4ccb330f2 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -18,7 +18,7 @@ #pragma once -#include +#include namespace gtsam { @@ -27,20 +27,8 @@ namespace gtsam { * common to all nonlinear optimization algorithms. This class also contains * all of those parameters. */ -class LevenbergMarquardtParams : public NonlinearOptimizerParams { +class LevenbergMarquardtParams : public DirectOptimizerParams { public: - /** See LevenbergMarquardtParams::elimination */ - enum Elimination { - MULTIFRONTAL, - SEQUENTIAL - }; - - /** See LevenbergMarquardtParams::factorization */ - enum Factorization { - LDL, - QR, - }; - /** See LevenbergMarquardtParams::lmVerbosity */ enum LMVerbosity { SILENT, @@ -51,53 +39,46 @@ public: DAMPED }; - Elimination elimination; ///< The elimination algorithm to use (default: MULTIFRONTAL) - Factorization factorization; ///< The numerical factorization (default: LDL) double lambdaInitial; ///< The initial Levenberg-Marquardt damping term (default: 1e-5) double lambdaFactor; ///< The amount by which to multiply or divide lambda when adjusting lambda (default: 10.0) double lambdaUpperBound; ///< The maximum lambda to try before assuming the optimization has failed (default: 1e5) LMVerbosity lmVerbosity; ///< The verbosity level for Levenberg-Marquardt (default: SILENT), see also NonlinearOptimizerParams::verbosity LevenbergMarquardtParams() : - elimination(MULTIFRONTAL), factorization(LDL), lambdaInitial(1e-5), - lambdaFactor(10.0), lambdaUpperBound(1e5), lmVerbosity(SILENT) {} + lambdaInitial(1e-5), lambdaFactor(10.0), lambdaUpperBound(1e5), lmVerbosity(SILENT) {} virtual ~LevenbergMarquardtParams() {} virtual void print(const std::string& str = "") const { - NonlinearOptimizerParams::print(str); - if(elimination == MULTIFRONTAL) - std::cout << " elimination method: MULTIFRONTAL\n"; - else if(elimination == SEQUENTIAL) - std::cout << " elimination method: SEQUENTIAL\n"; - else - std::cout << " elimination method: (invalid)\n"; - - if(factorization == LDL) - std::cout << " factorization method: LDL\n"; - else if(factorization == QR) - std::cout << " factorization method: QR\n"; - else - std::cout << " factorization method: (invalid)\n"; - + DirectOptimizerParams::print(str); std::cout << " lambdaInitial: " << lambdaInitial << "\n"; std::cout << " lambdaFactor: " << lambdaFactor << "\n"; std::cout << " lambdaUpperBound: " << lambdaUpperBound << "\n"; - std::cout.flush(); } }; +/** + * State for LevenbergMarquardtOptimizer + */ +class LevenbergMarquardtState : public NonlinearOptimizerState { +public: + + double lambda; + +}; + /** * This class performs Levenberg-Marquardt nonlinear optimization * TODO: use make_shared */ -class LevenbergMarquardtOptimizer : public NonlinearOptimizer { +class LevenbergMarquardtOptimizer : public DirectOptimizer { public: - typedef boost::shared_ptr SharedLMParams; - typedef boost::shared_ptr SharedOrdering; + typedef boost::shared_ptr SharedParams; + typedef boost::shared_ptr SharedState; + typedef boost::shared_ptr shared_ptr; /// @name Standard interface /// @{ @@ -110,19 +91,11 @@ public: * @param values The initial variable assignments * @param params The optimization parameters */ - LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& values, + LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const LevenbergMarquardtParams& params = LevenbergMarquardtParams(), const Ordering& ordering = Ordering()) : - NonlinearOptimizer( - SharedGraph(new NonlinearFactorGraph(graph)), - SharedValues(new Values(values)), - SharedLMParams(new LevenbergMarquardtParams(params))), - lmParams_(boost::static_pointer_cast(params_)), - colamdOrdering_(ordering.size() == 0), - ordering_(colamdOrdering_ ? - graph_->orderingCOLAMD(*values_) : Ordering::shared_ptr(new Ordering(ordering))), - dimensions_(new vector(values_->dims(*ordering_))), - lambda_(lmParams_->lambdaInitial) {} + DirectOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), + params_(new LevenbergMarquardtParams(params)) {} /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. For convenience this @@ -132,18 +105,10 @@ public: * @param values The initial variable assignments * @param params The optimization parameters */ - LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& values, + LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) : - NonlinearOptimizer( - SharedGraph(new NonlinearFactorGraph(graph)), - SharedValues(new Values(values)), - SharedLMParams(new LevenbergMarquardtParams())), - lmParams_(boost::static_pointer_cast(params_)), - colamdOrdering_(ordering.size() == 0), - ordering_(colamdOrdering_ ? - graph_->orderingCOLAMD(*values_) : Ordering::shared_ptr(new Ordering(ordering))), - dimensions_(new vector(values_->dims(*ordering_))), - lambda_(lmParams_->lambdaInitial) {} + DirectOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), + params_(new LevenbergMarquardtParams()) {} /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. @@ -151,18 +116,17 @@ public: * @param values The initial variable assignments * @param params The optimization parameters */ - LevenbergMarquardtOptimizer(const SharedGraph& graph, const SharedValues& values, + LevenbergMarquardtOptimizer(const SharedGraph& graph, const LevenbergMarquardtParams& params = LevenbergMarquardtParams(), const SharedOrdering& ordering = SharedOrdering()) : - NonlinearOptimizer(graph, values, SharedLMParams(new LevenbergMarquardtParams(params))), - lmParams_(boost::static_pointer_cast(params_)), - colamdOrdering_(!ordering || ordering->size() == 0), - ordering_(colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : ordering), - dimensions_(new vector(values_->dims(*ordering_))), - lambda_(lmParams_->lambdaInitial) {} + DirectOptimizer(graph), + params_(new LevenbergMarquardtParams(params)) {} - /** Access the variable ordering used by this optimizer */ - const SharedOrdering& ordering() const { return ordering_; } + /** Access the parameters */ + virtual const NonlinearOptimizer::SharedParams& params() const { return params_; } + + /** Access the parameters */ + const LevenbergMarquardtOptimizer::SharedParams& params() const { return params_; } /// @} @@ -176,39 +140,11 @@ public: * containing the updated variable assignments, which may be retrieved with * values(). */ - virtual auto_ptr iterate() const; - - /** Update the graph, values, and/or parameters, leaving all other state - * the same. Any of these that are empty shared pointers are left unchanged - * in the returned optimizer object. Returns a new updated NonlinearOptimizer - * object, the original is not modified. - */ - virtual auto_ptr update( - const SharedGraph& newGraph, - const SharedValues& newValues = SharedValues(), - const SharedParams& newParams = SharedParams()) const { - return auto_ptr(new LevenbergMarquardtOptimizer(*this, newGraph, newValues, - boost::dynamic_pointer_cast(newParams))); - } - - /** Update the ordering, leaving all other state the same. If newOrdering - * is an empty pointer or contains an empty Ordering object - * (with zero size), a COLAMD ordering will be computed. Returns a new - * NonlinearOptimizer object, the original is not modified. - */ - virtual auto_ptr update(const SharedOrdering& newOrdering) const { - return auto_ptr(new LevenbergMarquardtOptimizer(*this, newOrdering)); } - - /** Update the damping factor lambda, leaving all other state the same. - * Returns a new NonlinearOptimizer object, the original is not modified. - */ - virtual auto_ptr update(double newLambda) const { - return auto_ptr(new LevenbergMarquardtOptimizer(*this, newLambda)); } + virtual NonlinearOptimizer::SharedState iterate(const SharedState& current) const; /** Create a copy of the NonlinearOptimizer */ - virtual auto_ptr clone() const { - return auto_ptr(new LevenbergMarquardtOptimizer(*this)); - } + virtual NonlinearOptimizer::shared_ptr clone() const { + return boost::make_shared(*this); } /// @} @@ -216,62 +152,8 @@ protected: typedef boost::shared_ptr > SharedDimensions; - const SharedLMParams lmParams_; - const bool colamdOrdering_; - const SharedOrdering ordering_; + const SharedParams params_; const SharedDimensions dimensions_; - const double lambda_; - - /** Protected constructor called by update() to modify the graph, values, or - * parameters. Computes a COLAMD ordering if the optimizer was originally - * constructed with an empty ordering, and if the graph is changing. - * Recomputes the dimensions if the values are changing. - */ - LevenbergMarquardtOptimizer(const LevenbergMarquardtOptimizer& original, const SharedGraph& newGraph, - const SharedValues& newValues, const SharedLMParams& newParams) : - NonlinearOptimizer(original, newGraph, newValues, newParams), - lmParams_(newParams ? newParams : original.lmParams_), - colamdOrdering_(original.colamdOrdering_), - ordering_(newGraph && colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : original.ordering_), - dimensions_(newValues ? - SharedDimensions(new std::vector(values_->dims(*ordering_))) : original.dimensions_), - lambda_(original.lambda_) {} - - /** Protected constructor called by update() to modify the ordering, computing - * a COLAMD ordering if the new ordering is empty, and also recomputing the - * dimensions. - */ - LevenbergMarquardtOptimizer( - const LevenbergMarquardtOptimizer& original, const SharedOrdering& newOrdering) : - NonlinearOptimizer(original), - lmParams_(original.lmParams_), - colamdOrdering_(!newOrdering || newOrdering->size() == 0), - ordering_(colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : newOrdering), - dimensions_(new std::vector(values_->dims(*ordering_))), - lambda_(original.lambda_) {} - - /** Protected constructor called by update() to modify lambda. */ - LevenbergMarquardtOptimizer( - const LevenbergMarquardtOptimizer& original, double newLambda) : - NonlinearOptimizer(original), - lmParams_(original.lmParams_), - colamdOrdering_(original.colamdOrdering_), - ordering_(original.ordering_), - dimensions_(original.dimensions_), - lambda_(newLambda) {} - -private: - - // Special constructor for completing an iteration, updates the values and - // error, and increments the iteration count. - LevenbergMarquardtOptimizer(const LevenbergMarquardtOptimizer& original, - const SharedValues& newValues, double newError, double newLambda) : - NonlinearOptimizer(original.graph_, newValues, original.params_, newError, original.iterations_+1), - lmParams_(original.lmParams_), - colamdOrdering_(original.colamdOrdering_), - ordering_(original.ordering_), - dimensions_(original.dimensions_), - lambda_(newLambda) {} }; } diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index 785ee8195..dd91563f3 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -25,49 +25,58 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -NonlinearOptimizer::auto_ptr NonlinearOptimizer::defaultOptimize() const { +NonlinearOptimizer::shared_ptr NonlinearOptimizer::update(const NonlinearOptimizer::SharedGraph& newGraph) const { + shared_ptr result(this->clone()); + result->graph_ = newGraph; + return result; +} - double currentError = this->error(); +/* ************************************************************************* */ +NonlinearOptimizer::shared_ptr NonlinearOptimizer::update(const NonlinearOptimizer::SharedParams& newParams) const { + shared_ptr result(this->clone()); + result->params_ = newParams; + return result; +} + +/* ************************************************************************* */ +NonlinearOptimizer::SharedState NonlinearOptimizer::defaultOptimize(const SharedState& initial) const { + + const SharedParams& params = this->params(); + double currentError = initial->error(); // check if we're already close enough - if(currentError <= params_->errorTol) { - if (params_->verbosity >= NonlinearOptimizerParams::ERROR) - cout << "Exiting, as error = " << currentError << " < " << params_->errorTol << endl; - return this->clone(); + if(currentError <= params->errorTol) { + if (params->verbosity >= NonlinearOptimizerParams::ERROR) + cout << "Exiting, as error = " << currentError << " < " << params->errorTol << endl; + return initial; } // Maybe show output - if (params_->verbosity >= NonlinearOptimizerParams::VALUES) this->values()->print("Initial values"); - if (params_->verbosity >= NonlinearOptimizerParams::ERROR) cout << "Initial error: " << this->error() << endl; + if (params->verbosity >= NonlinearOptimizerParams::VALUES) this->values()->print("Initial values"); + if (params->verbosity >= NonlinearOptimizerParams::ERROR) cout << "Initial error: " << this->error() << endl; // Return if we already have too many iterations - if(this->iterations() >= params_->maxIterations) - return this->clone(); + if(this->iterations() >= params->maxIterations) + return initial; // Iterative loop - auto_ptr next = this->iterate(); // First iteration happens here - while(next->iterations() < params_->maxIterations && - !checkConvergence(params_->relativeErrorTol, params_->absoluteErrorTol, - params_->errorTol, currentError, next->error(), params_->verbosity)) { + SharedState next = initial; + do { + // Do next iteration + currentError = next->error; + next = this->iterate(next); // Maybe show output - if (params_->verbosity >= NonlinearOptimizerParams::VALUES) next->values()->print("newValues"); - if (params_->verbosity >= NonlinearOptimizerParams::ERROR) cout << "newError: " << next->error() << endl; - - - // Do next iteration - currentError = next->error(); - next = next->iterate(); - } + if (params->verbosity >= NonlinearOptimizerParams::VALUES) next->values->print("newValues"); + if (params->verbosity >= NonlinearOptimizerParams::ERROR) cout << "newError: " << next->error << endl; + } while(next->iterations < params->maxIterations && + !checkConvergence(params->relativeErrorTol, params->absoluteErrorTol, + params->errorTol, currentError, next->error, params->verbosity)); // Printing if verbose - if (params_->verbosity >= NonlinearOptimizerParams::VALUES) - next->values()->print("final values"); - if (params_->verbosity >= NonlinearOptimizerParams::ERROR && - next->iterations() >= params_->maxIterations) + if (params->verbosity >= NonlinearOptimizerParams::ERROR && + next->iterations >= params->maxIterations) cout << "Terminating because reached maximum iterations" << endl; - if (params_->verbosity >= NonlinearOptimizerParams::ERROR) - cout << "final error: " << next->error() << endl; // Return optimizer from final iteration return next; diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index 361b1bd69..52effe514 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -77,6 +77,9 @@ public: /** The number of optimization iterations performed. */ unsigned int iterations; + + /** Virtual destructor to enable RTTI */ + virtual ~NonlinearOptimizerState() {} }; @@ -156,21 +159,18 @@ class NonlinearOptimizer { public: - /** An auto pointer to this class */ - typedef std::auto_ptr auto_ptr; - /** A shared pointer to this class */ typedef boost::shared_ptr shared_ptr; /** A const shared_ptr to a NonlinearFactorGraph */ typedef boost::shared_ptr SharedGraph; - /** A const shared_ptr to a NonlinearFactorGraph */ - typedef boost::shared_ptr SharedValues; - /** A const shared_ptr to the parameters */ typedef boost::shared_ptr SharedParams; + /** A shared_ptr to an optimizer state */ + typedef boost::shared_ptr SharedState; + /// @name Standard interface /// @{ @@ -183,26 +183,17 @@ public: * process, you may call iterate() and check_convergence() yourself, and if * needed modify the optimization state between iterations. */ - virtual auto_ptr optimize() const { return defaultOptimize(); } + virtual SharedState optimize(const SharedState& initial) const { return defaultOptimize(initial); } /** Shortcut to optimize and return the resulting Values of the maximum- * likelihood estimate. To access statistics and information such as the * final error and number of iterations, use optimize() instead. * @return The maximum-likelihood estimate. */ - virtual SharedValues optimized() const { return this->optimize()->values(); } - - /** Retrieve the current variable assignment estimate. */ - virtual const SharedValues& values() const { return values_; } + virtual SharedValues optimized(const SharedState& initial) const { return this->optimize(initial)->values(); } /** Retrieve the parameters. */ - virtual const SharedParams& params() const { return params_; } - - /** Return the current factor graph error */ - virtual double error() const { return error_; } - - /** Return the number of iterations that have been performed */ - virtual int iterations() const { return iterations_; } + virtual const SharedParams& params() const = 0; /// @} @@ -216,60 +207,41 @@ public: * containing the updated variable assignments, which may be retrieved with * values(). */ - virtual auto_ptr iterate() const = 0; + virtual SharedState iterate(const SharedState& current) const = 0; - /** Update the graph, values, and/or parameters, leaving all other state - * the same. Any of these that are empty shared pointers are left unchanged - * in the returned optimizer object. Returns a new updated - * NonlinearOptimzier object, the original is not modified. + /** Update the graph, leaving all other parts of the optimizer unchanged, + * returns a new updated NonlinearOptimzier object, the original is not + * modified. */ - virtual auto_ptr update( - const SharedGraph& newGraph = SharedGraph(), - const SharedValues& newValues = SharedValues(), - const SharedParams& newParams = SharedParams()) const = 0; + virtual shared_ptr update(const SharedGraph& newGraph) const; + + /** Update the parameters, leaving all other parts of the optimizer unchanged, + * returns a new updated NonlinearOptimzier object, the original is not + * modified. + */ + const shared_ptr update(const SharedParams& newParams) const; /** Create a copy of the NonlinearOptimizer */ - virtual auto_ptr clone() const = 0; + virtual shared_ptr clone() const = 0; /// @} protected: + const SharedGraph graph_; + /** A default implementation of the optimization loop, which calls iterate() * until checkConvergence returns true. */ - auto_ptr defaultOptimize() const; + SharedState defaultOptimize(const SharedState& initial) const; -protected: + /** Modify the parameters in-place (not for external use) */ + virtual void setParams(const NonlinearOptimizer::SharedParams& newParams); - const SharedGraph graph_; - const SharedValues values_; - const SharedParams params_; - const double error_; - const int iterations_; - - /** Constructor for initial construction of base classes, computes error and - * sets iterations to zero. + /** Constructor for initial construction of base classes. */ - NonlinearOptimizer(const SharedGraph& graph, const SharedValues& values, - const SharedParams& params) : - graph_(graph), values_(values), params_(params), - error_(graph_->error(*values_)), iterations_(0) {} - - /** Constructor that specifies all parts of the state, used for updates */ - NonlinearOptimizer(const SharedGraph& graph, const SharedValues& values, - const SharedParams& params, double error, int iterations) : - graph_(graph), values_(values), params_(params), - error_(error), iterations_(iterations) {} - - /** Convenience constructor for modifying only some of the state. */ - NonlinearOptimizer(const NonlinearOptimizer& original, const SharedGraph& newGraph, - const SharedValues& newValues, const SharedParams& newParams) : - graph_(newGraph ? newGraph : original.graph_), - values_(newValues ? newValues : original.values_), - params_(newParams ? newParams : original.params_), - error_(newGraph || newValues ? graph_->error(*values_) : original.error_), - iterations_(original.iterations_) {} + NonlinearOptimizer(const SharedGraph& graph) : + graph_(graph) {} }; From d0211bb0316f005eeea547534bc19f26cd8071f0 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Wed, 4 Apr 2012 23:20:42 +0000 Subject: [PATCH 35/57] Simplifying code and minor restructuring to support SPCG - removed update functions so NLO is now completely immutable, moved ordering to parameters, added SuccessiveLinearizationOptimizer base class that will do direct and iterative linear solving, with derived classes for GN, LM, DL. This structure will allow us to later add a new NLO subclass for nonlinear gradient descent methods. --- .cproject | 2 +- .project | 2 +- gtsam/nonlinear/DirectOptimizer.cpp | 58 ------- gtsam/nonlinear/DoglegOptimizer.cpp | 38 +++-- gtsam/nonlinear/DoglegOptimizer.h | 39 ++--- gtsam/nonlinear/GaussNewtonOptimizer.cpp | 8 + gtsam/nonlinear/GaussNewtonOptimizer.h | 152 +++--------------- .../nonlinear/LevenbergMarquardtOptimizer.cpp | 11 +- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 34 ++-- gtsam/nonlinear/NonlinearOptimizer.cpp | 7 + gtsam/nonlinear/NonlinearOptimizer.h | 36 ++--- .../SuccessiveLinearizationOptimizer.cpp | 41 +++++ ...r.h => SuccessiveLinearizationOptimizer.h} | 82 +++++----- gtsam/slam/pose2SLAM.h | 2 +- 14 files changed, 210 insertions(+), 302 deletions(-) delete mode 100644 gtsam/nonlinear/DirectOptimizer.cpp create mode 100644 gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp rename gtsam/nonlinear/{DirectOptimizer.h => SuccessiveLinearizationOptimizer.h} (59%) diff --git a/.cproject b/.cproject index c6699da3e..e80acb39e 100644 --- a/.cproject +++ b/.cproject @@ -21,7 +21,7 @@ - + diff --git a/.project b/.project index 9856df2ea..e52e979df 100644 --- a/.project +++ b/.project @@ -23,7 +23,7 @@ org.eclipse.cdt.make.core.buildArguments - -j5 + -j8 org.eclipse.cdt.make.core.buildCommand diff --git a/gtsam/nonlinear/DirectOptimizer.cpp b/gtsam/nonlinear/DirectOptimizer.cpp deleted file mode 100644 index 424bf0396..000000000 --- a/gtsam/nonlinear/DirectOptimizer.cpp +++ /dev/null @@ -1,58 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file DirectOptimizer.cpp - * @brief - * @author Richard Roberts - * @date Apr 1, 2012 - */ - -#include - -namespace gtsam { - -/* ************************************************************************* */ -NonlinearOptimizer::shared_ptr DirectOptimizer::update(const SharedGraph& newGraph) const { - - // Call update on the base class - shared_ptr result = boost::static_pointer_cast(); - - // Need to recompute the ordering if we're using an automatic COLAMD ordering - if(result->colamdOrdering_) { - result->ordering_ = result->graph_->orderingCOLAMD(*result->values_); - } - - return result; -} - -/* ************************************************************************* */ -DirectOptimizer::shared_ptr DirectOptimizer::update(const SharedOrdering& newOrdering) const { - return update(graph_, newOrdering); -} - -/* ************************************************************************* */ -DirectOptimizer::shared_ptr DirectOptimizer::update(const SharedGraph& newGraph, const SharedOrdering& newOrdering) const { - // Call update on base class - shared_ptr result = boost::static_pointer_cast(NonlinearOptimizer::update(newGraph)); - - if(newOrdering && newOrdering->size() > 0) { - result->colamdOrdering_ = false; - result->ordering_ = newOrdering; - } else { - result->colamdOrdering_ = true; - result->ordering_ = result->graph_->orderingCOLAMD(*result->values_); - } - - return result; -} - -} /* namespace gtsam */ diff --git a/gtsam/nonlinear/DoglegOptimizer.cpp b/gtsam/nonlinear/DoglegOptimizer.cpp index 1a490b744..9a7822c6f 100644 --- a/gtsam/nonlinear/DoglegOptimizer.cpp +++ b/gtsam/nonlinear/DoglegOptimizer.cpp @@ -26,49 +26,59 @@ using namespace std; namespace gtsam { -NonlinearOptimizer::SharedState DoglegOptimizer::iterate(const SharedState& current) const { +/* ************************************************************************* */ +NonlinearOptimizer::SharedState DoglegOptimizer::iterate(const NonlinearOptimizer::SharedState& current) const { // Linearize graph - GaussianFactorGraph::shared_ptr linear = graph_->linearize(*values_, *ordering_); + const Ordering& ordering = *ordering(current->values); + GaussianFactorGraph::shared_ptr linear = graph_->linearize(current->values, ordering); // Check whether to use QR bool useQR; - if(dlParams_->factorization == DoglegParams::LDL) + if(params_->factorization == DoglegParams::LDL) useQR = false; - else if(dlParams_->factorization == DoglegParams::QR) + else if(params_->factorization == DoglegParams::QR) useQR = true; else throw runtime_error("Optimization parameter is invalid: DoglegParams::factorization"); // Pull out parameters we'll use - const bool dlVerbose = (dlParams_->dlVerbosity > DoglegParams::SILENT); + const bool dlVerbose = (params_->dlVerbosity > DoglegParams::SILENT); // Do Dogleg iteration with either Multifrontal or Sequential elimination DoglegOptimizerImpl::IterationResult result; - if(dlParams_->elimination == DoglegParams::MULTIFRONTAL) { + if(params_->elimination == DoglegParams::MULTIFRONTAL) { GaussianBayesTree::shared_ptr bt = GaussianMultifrontalSolver(*linear, useQR).eliminate(); - result = DoglegOptimizerImpl::Iterate(delta_, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, *graph_, *values(), *ordering_, error(), dlVerbose); + result = DoglegOptimizerImpl::Iterate(current->Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, *graph_, *values(), ordering, error(), dlVerbose); - } else if(dlParams_->elimination == DoglegParams::SEQUENTIAL) { + } else if(params_->elimination == DoglegParams::SEQUENTIAL) { GaussianBayesNet::shared_ptr bn = GaussianSequentialSolver(*linear, useQR).eliminate(); - result = DoglegOptimizerImpl::Iterate(delta_, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, *graph_, *values(), *ordering_, error(), dlVerbose); + result = DoglegOptimizerImpl::Iterate(current->Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, *graph_, *values(), ordering, error(), dlVerbose); } else { throw runtime_error("Optimization parameter is invalid: DoglegParams::elimination"); } - // Update values - SharedValues newValues = boost::make_shared(values_->retract(result.dx_d, *ordering_)); - // Create new state with new values and new error DoglegOptimizer::SharedState newState = boost::make_shared(); - newState->values = newValues; - newState->error = rsult.f_error; + + // Update values + newState->values = current->values.retract(result.dx_d, ordering); + + newState->error = result.f_error; newState->iterations = current->iterations + 1; newState->Delta = result.Delta; return newState; } +/* ************************************************************************* */ +NonlinearOptimizer::SharedState DoglegOptimizer::initialState(const Values& initialValues) const { + SharedState initial = boost::make_shared(); + defaultInitialState(initialValues); + initial->Delta = params_->deltaInitial; + return initial; +} + } /* namespace gtsam */ diff --git a/gtsam/nonlinear/DoglegOptimizer.h b/gtsam/nonlinear/DoglegOptimizer.h index 89c88edb0..0ef9da536 100644 --- a/gtsam/nonlinear/DoglegOptimizer.h +++ b/gtsam/nonlinear/DoglegOptimizer.h @@ -18,7 +18,7 @@ #pragma once -#include +#include namespace gtsam { @@ -27,7 +27,7 @@ namespace gtsam { * common to all nonlinear optimization algorithms. This class also contains * all of those parameters. */ -class DoglegParams : public DirectOptimizerParams { +class DoglegParams : public SuccessiveLinearizationParams { public: /** See DoglegParams::dlVerbosity */ enum DLVerbosity { @@ -44,7 +44,7 @@ public: virtual ~DoglegParams() {} virtual void print(const std::string& str = "") const { - DirectOptimizerParams::print(str); + SuccessiveLinearizationParams::print(str); std::cout << " deltaInitial: " << deltaInitial << "\n"; std::cout.flush(); } @@ -53,7 +53,7 @@ public: /** * State for DoglegOptimizer */ -class DoglegState : public NonlinearOptimizerState { +class DoglegState : public SuccessiveLinearizationState { public: double Delta; @@ -63,11 +63,11 @@ public: /** * This class performs Dogleg nonlinear optimization */ -class DoglegOptimizer : public DirectOptimizer { +class DoglegOptimizer : public SuccessiveLinearizationOptimizer { public: - typedef boost::shared_ptr SharedParams; + typedef boost::shared_ptr SharedParams; typedef boost::shared_ptr SharedState; typedef boost::shared_ptr shared_ptr; @@ -85,7 +85,7 @@ public: DoglegOptimizer(const NonlinearFactorGraph& graph, const DoglegParams& params = DoglegParams(), const Ordering& ordering = Ordering()) : - DirectOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), + SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), params_(new DoglegParams(params)) {} /** Standard constructor, requires a nonlinear factor graph, initial @@ -96,9 +96,8 @@ public: * @param values The initial variable assignments * @param params The optimization parameters */ - DoglegOptimizer(const NonlinearFactorGraph& graph, - const Ordering& ordering) : - DirectOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), + DoglegOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) : + SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), params_(new DoglegParams()) {} /** Standard constructor, requires a nonlinear factor graph, initial @@ -110,14 +109,11 @@ public: DoglegOptimizer(const SharedGraph& graph, const DoglegParams& params = DoglegParams(), const SharedOrdering& ordering = SharedOrdering()) : - DirectOptimizer(graph), + SuccessiveLinearizationOptimizer(graph), params_(new DoglegParams(params)) {} /** Access the parameters */ - virtual const NonlinearOptimizer::SharedParams& params() const { return params_; } - - /** Access the parameters */ - const DoglegOptimizer::SharedParams& params() const { return params_; } + virtual NonlinearOptimizer::SharedParams params() const { return params_; } /// @} @@ -131,19 +127,18 @@ public: * containing the updated variable assignments, which may be retrieved with * values(). */ - virtual NonlinearOptimizer::SharedState iterate(const SharedState& current) const; + virtual NonlinearOptimizer::SharedState iterate(const NonlinearOptimizer::SharedState& current) const; - /** Create a copy of the NonlinearOptimizer */ - virtual NonlinearOptimizer::shared_ptr clone() const { - return boost::make_shared(*this); } + /** Create an initial state with the specified variable assignment values and + * all other default state. + */ + virtual NonlinearOptimizer::SharedState initialState(const Values& initialValues) const; /// @} protected: - const SharedParams params_; - - virtual void setParams(const NonlinearOptimizer::SharedParams& newParams) { params_ = newParams; } + SharedParams params_; }; } diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.cpp b/gtsam/nonlinear/GaussNewtonOptimizer.cpp index e3dd49461..3a5a7aee0 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.cpp +++ b/gtsam/nonlinear/GaussNewtonOptimizer.cpp @@ -24,6 +24,7 @@ using namespace std; namespace gtsam { +/* ************************************************************************* */ NonlinearOptimizer::auto_ptr GaussNewtonOptimizer::iterate() const { // Linearize graph @@ -61,4 +62,11 @@ NonlinearOptimizer::auto_ptr GaussNewtonOptimizer::iterate() const { return newOptimizer; } +/* ************************************************************************* */ +NonlinearOptimizer::SharedState GaussNewtonOptimizer::initialState(const Values& initialValues) const { + SharedState initial = boost::make_shared(); + defaultInitialState(*initial); + return initial; +} + } /* namespace gtsam */ diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.h b/gtsam/nonlinear/GaussNewtonOptimizer.h index f9deaeded..8b1d4fbe6 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.h +++ b/gtsam/nonlinear/GaussNewtonOptimizer.h @@ -18,65 +18,28 @@ #pragma once -#include +#include namespace gtsam { /** Parameters for Gauss-Newton optimization, inherits from * NonlinearOptimizationParams. */ -class GaussNewtonParams : public NonlinearOptimizerParams { -public: - /** See GaussNewtonParams::elimination */ - enum Elimination { - MULTIFRONTAL, - SEQUENTIAL - }; +class GaussNewtonParams : public SuccessiveLinearizationParams { +}; - /** See GaussNewtonParams::factorization */ - enum Factorization { - LDL, - QR, - }; - - Elimination elimination; ///< The elimination algorithm to use (default: MULTIFRONTAL) - Factorization factorization; ///< The numerical factorization (default: LDL) - - GaussNewtonParams() : - elimination(MULTIFRONTAL), factorization(LDL) {} - - ~GaussNewtonParams() {} - - virtual void print(const std::string& str = "") const { - NonlinearOptimizerParams::print(str); - if(elimination == MULTIFRONTAL) - std::cout << " elimination method: MULTIFRONTAL\n"; - else if(elimination == SEQUENTIAL) - std::cout << " elimination method: SEQUENTIAL\n"; - else - std::cout << " elimination method: (invalid)\n"; - - if(factorization == LDL) - std::cout << " factorization method: LDL\n"; - else if(factorization == QR) - std::cout << " factorization method: QR\n"; - else - std::cout << " factorization method: (invalid)\n"; - - std::cout.flush(); - } +class GaussNewtonState : public SuccessiveLinearizationState { }; /** * This class performs Gauss-Newton nonlinear optimization * TODO: use make_shared */ -class GaussNewtonOptimizer : public NonlinearOptimizer { +class GaussNewtonOptimizer : public SuccessiveLinearizationOptimizer { public: - typedef boost::shared_ptr SharedGNParams; - typedef boost::shared_ptr SharedOrdering; + typedef boost::shared_ptr SharedParams; /// @name Standard interface /// @{ @@ -89,17 +52,11 @@ public: * @param values The initial variable assignments * @param params The optimization parameters */ - GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& values, + GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const GaussNewtonParams& params = GaussNewtonParams(), const Ordering& ordering = Ordering()) : - NonlinearOptimizer( - SharedGraph(new NonlinearFactorGraph(graph)), - SharedValues(new Values(values)), - SharedGNParams(new GaussNewtonParams(params))), - gnParams_(boost::static_pointer_cast(params_)), - colamdOrdering_(ordering.size() == 0), - ordering_(colamdOrdering_ ? - graph_->orderingCOLAMD(*values_) : Ordering::shared_ptr(new Ordering(ordering))) {} + SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), + params_(new GaussNewtonParams(params)) {} /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. For convenience this @@ -109,16 +66,9 @@ public: * @param values The initial variable assignments * @param params The optimization parameters */ - GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& values, - const Ordering& ordering) : - NonlinearOptimizer( - SharedGraph(new NonlinearFactorGraph(graph)), - SharedValues(new Values(values)), - SharedGNParams(new GaussNewtonParams())), - gnParams_(boost::static_pointer_cast(params_)), - colamdOrdering_(ordering.size() == 0), - ordering_(colamdOrdering_ ? - graph_->orderingCOLAMD(*values_) : Ordering::shared_ptr(new Ordering(ordering))) {} + GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) : + SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), + params_(new GaussNewtonParams()) {} /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. @@ -126,16 +76,14 @@ public: * @param values The initial variable assignments * @param params The optimization parameters */ - GaussNewtonOptimizer(const SharedGraph& graph, const SharedValues& values, + GaussNewtonOptimizer(const SharedGraph& graph, const GaussNewtonParams& params = GaussNewtonParams(), const SharedOrdering& ordering = SharedOrdering()) : - NonlinearOptimizer(graph, values, SharedGNParams(new GaussNewtonParams(params))), - gnParams_(boost::static_pointer_cast(params_)), - colamdOrdering_(!ordering || ordering->size() == 0), - ordering_(colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : ordering) {} + SuccessiveLinearizationOptimizer(graph), + params_(new GaussNewtonParams(params)) {} - /** Access the variable ordering used by this optimizer */ - const SharedOrdering& ordering() const { return ordering_; } + /** Access the parameters */ + virtual NonlinearOptimizer::SharedParams params() const { return params_; } /// @} @@ -149,74 +97,18 @@ public: * containing the updated variable assignments, which may be retrieved with * values(). */ - virtual auto_ptr iterate() const; + virtual NonlinearOptimizer::SharedState iterate(const NonlinearOptimizer::SharedState& current) const; - /** Update the graph, values, and/or parameters, leaving all other state - * the same. Any of these that are empty shared pointers are left unchanged - * in the returned optimizer object. Returns a new updated - * NonlinearOptimizer object, the original is not modified. + /** Create an initial state with the specified variable assignment values and + * all other default state. */ - virtual auto_ptr update( - const SharedGraph& newGraph, - const SharedValues& newValues = SharedValues(), - const SharedParams& newParams = SharedParams()) const { - return auto_ptr(new GaussNewtonOptimizer(*this, newGraph, newValues, - boost::dynamic_pointer_cast(newParams))); - } - - /** Update the ordering, leaving all other state the same. If newOrdering - * is an empty pointer or contains an empty Ordering object - * (with zero size), a COLAMD ordering will be computed. Returns a new - * NonlinearOptimizer object, the original is not modified. - */ - virtual auto_ptr update(const SharedOrdering& newOrdering) const { - return auto_ptr(new GaussNewtonOptimizer(*this, newOrdering)); } - - /** Create a copy of the NonlinearOptimizer */ - virtual auto_ptr clone() const { - return auto_ptr(new GaussNewtonOptimizer(*this)); - } + virtual NonlinearOptimizer::SharedState initialState(const Values& initialValues) const; /// @} protected: - const SharedGNParams gnParams_; - const bool colamdOrdering_; - const SharedOrdering ordering_; - - /** Protected constructor called by update() to modify the graph, values, or - * parameters. Computes a COLAMD ordering if the optimizer was originally - * constructed with an empty ordering, and if the graph is changing. - */ - GaussNewtonOptimizer(const GaussNewtonOptimizer& original, const SharedGraph& newGraph, - const SharedValues& newValues, const SharedGNParams& newParams) : - NonlinearOptimizer(original, newGraph, newValues, newParams), - gnParams_(newParams ? newParams : original.gnParams_), - colamdOrdering_(original.colamdOrdering_), - ordering_(newGraph && colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : original.ordering_) {} - - /** Protected constructor called by update() to modify the ordering, computing - * a COLAMD ordering if the new ordering is empty, and also recomputing the - * dimensions. - */ - GaussNewtonOptimizer( - const GaussNewtonOptimizer& original, const SharedOrdering& newOrdering) : - NonlinearOptimizer(original), - gnParams_(original.gnParams_), - colamdOrdering_(!newOrdering || newOrdering->size() == 0), - ordering_(colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : newOrdering) {} - -private: - - // Special constructor for completing an iteration, updates the values and - // error, and increments the iteration count. - GaussNewtonOptimizer(const GaussNewtonOptimizer& original, - const SharedValues& newValues, double newError) : - NonlinearOptimizer(original.graph_, newValues, original.params_, newError, original.iterations_+1), - gnParams_(original.gnParams_), - colamdOrdering_(original.colamdOrdering_), - ordering_(original.ordering_) {} + const SharedParams params_; }; } diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index b90af8348..5657da572 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -26,7 +26,8 @@ using namespace std; namespace gtsam { -NonlinearOptimizer::auto_ptr LevenbergMarquardtOptimizer::iterate(const SharedState& current) const { +/* ************************************************************************* */ +NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const NonlinearOptimizer::SharedState& current) const { // Linearize graph GaussianFactorGraph::shared_ptr linear = graph_->linearize(*values_, *ordering_); @@ -141,4 +142,12 @@ NonlinearOptimizer::auto_ptr LevenbergMarquardtOptimizer::iterate(const SharedSt return newState; } +/* ************************************************************************* */ +NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::initialState(const Values& initialValues) const { + SharedState initial = boost::make_shared(); + defaultInitialState(*initial); + initial->lambda = params_->lambdaInitial; + return initial; +} + } /* namespace gtsam */ diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index 4ccb330f2..058bf347b 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -18,7 +18,7 @@ #pragma once -#include +#include namespace gtsam { @@ -27,7 +27,7 @@ namespace gtsam { * common to all nonlinear optimization algorithms. This class also contains * all of those parameters. */ -class LevenbergMarquardtParams : public DirectOptimizerParams { +class LevenbergMarquardtParams : public SuccessiveLinearizationParams { public: /** See LevenbergMarquardtParams::lmVerbosity */ enum LMVerbosity { @@ -50,7 +50,7 @@ public: virtual ~LevenbergMarquardtParams() {} virtual void print(const std::string& str = "") const { - DirectOptimizerParams::print(str); + SuccessiveLinearizationParams::print(str); std::cout << " lambdaInitial: " << lambdaInitial << "\n"; std::cout << " lambdaFactor: " << lambdaFactor << "\n"; std::cout << " lambdaUpperBound: " << lambdaUpperBound << "\n"; @@ -61,7 +61,7 @@ public: /** * State for LevenbergMarquardtOptimizer */ -class LevenbergMarquardtState : public NonlinearOptimizerState { +class LevenbergMarquardtState : public SuccessiveLinearizationState { public: double lambda; @@ -72,11 +72,11 @@ public: * This class performs Levenberg-Marquardt nonlinear optimization * TODO: use make_shared */ -class LevenbergMarquardtOptimizer : public DirectOptimizer { +class LevenbergMarquardtOptimizer : public SuccessiveLinearizationOptimizer { public: - typedef boost::shared_ptr SharedParams; + typedef boost::shared_ptr SharedParams; typedef boost::shared_ptr SharedState; typedef boost::shared_ptr shared_ptr; @@ -94,7 +94,7 @@ public: LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const LevenbergMarquardtParams& params = LevenbergMarquardtParams(), const Ordering& ordering = Ordering()) : - DirectOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), + SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), params_(new LevenbergMarquardtParams(params)) {} /** Standard constructor, requires a nonlinear factor graph, initial @@ -107,7 +107,7 @@ public: */ LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) : - DirectOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), + SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), params_(new LevenbergMarquardtParams()) {} /** Standard constructor, requires a nonlinear factor graph, initial @@ -119,14 +119,11 @@ public: LevenbergMarquardtOptimizer(const SharedGraph& graph, const LevenbergMarquardtParams& params = LevenbergMarquardtParams(), const SharedOrdering& ordering = SharedOrdering()) : - DirectOptimizer(graph), + SuccessiveLinearizationOptimizer(graph), params_(new LevenbergMarquardtParams(params)) {} /** Access the parameters */ - virtual const NonlinearOptimizer::SharedParams& params() const { return params_; } - - /** Access the parameters */ - const LevenbergMarquardtOptimizer::SharedParams& params() const { return params_; } + virtual NonlinearOptimizer::SharedParams params() const { return params_; } /// @} @@ -140,11 +137,12 @@ public: * containing the updated variable assignments, which may be retrieved with * values(). */ - virtual NonlinearOptimizer::SharedState iterate(const SharedState& current) const; + virtual NonlinearOptimizer::SharedState iterate(const NonlinearOptimizer::SharedState& current) const; - /** Create a copy of the NonlinearOptimizer */ - virtual NonlinearOptimizer::shared_ptr clone() const { - return boost::make_shared(*this); } + /** Create an initial state with the specified variable assignment values and + * all other default state. + */ + virtual NonlinearOptimizer::SharedState initialState(const Values& initialValues) const; /// @} @@ -152,7 +150,7 @@ protected: typedef boost::shared_ptr > SharedDimensions; - const SharedParams params_; + SharedParams params_; const SharedDimensions dimensions_; }; diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index dd91563f3..cfe8a4945 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -82,6 +82,13 @@ NonlinearOptimizer::SharedState NonlinearOptimizer::defaultOptimize(const Shared return next; } +/* ************************************************************************* */ +void NonlinearOptimizer::defaultInitialState(NonlinearOptimizerState& initial) const { + state.values = initialValues; + state.error = graph_->error(initialValues); + state.iterations = 0; +} + /* ************************************************************************* */ bool checkConvergence(double relativeErrorTreshold, double absoluteErrorTreshold, double errorThreshold, double currentError, double newError, diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index 52effe514..c4f8be2fc 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -78,8 +78,11 @@ public: /** The number of optimization iterations performed. */ unsigned int iterations; - /** Virtual destructor to enable RTTI */ + /** Virtual destructor */ virtual ~NonlinearOptimizerState() {} + + /** Clone the state (i.e. make a copy of the derived class) */ + virtual boost::shared_ptr clone() = 0; }; @@ -185,15 +188,19 @@ public: */ virtual SharedState optimize(const SharedState& initial) const { return defaultOptimize(initial); } + SharedState optimize(const Values& initialization) const { return optimize(initialState(initialization)); } + /** Shortcut to optimize and return the resulting Values of the maximum- * likelihood estimate. To access statistics and information such as the * final error and number of iterations, use optimize() instead. * @return The maximum-likelihood estimate. */ - virtual SharedValues optimized(const SharedState& initial) const { return this->optimize(initial)->values(); } + virtual Values optimized(const SharedState& initial) const { return this->optimize(initial)->values; } + + Values optimized(const Values& initialization) const { return optimized(initialState(initialization)); } /** Retrieve the parameters. */ - virtual const SharedParams& params() const = 0; + virtual SharedParams params() const = 0; /// @} @@ -209,20 +216,10 @@ public: */ virtual SharedState iterate(const SharedState& current) const = 0; - /** Update the graph, leaving all other parts of the optimizer unchanged, - * returns a new updated NonlinearOptimzier object, the original is not - * modified. + /** Create an initial state from a variable assignment Values, with all + * other state values at their default initial values. */ - virtual shared_ptr update(const SharedGraph& newGraph) const; - - /** Update the parameters, leaving all other parts of the optimizer unchanged, - * returns a new updated NonlinearOptimzier object, the original is not - * modified. - */ - const shared_ptr update(const SharedParams& newParams) const; - - /** Create a copy of the NonlinearOptimizer */ - virtual shared_ptr clone() const = 0; + virtual SharedState initialState(const Values& initialValues) const = 0; /// @} @@ -235,13 +232,12 @@ protected: */ SharedState defaultOptimize(const SharedState& initial) const; - /** Modify the parameters in-place (not for external use) */ - virtual void setParams(const NonlinearOptimizer::SharedParams& newParams); + /** Initialize a state, using the current error and 0 iterations */ + void defaultInitialState(SharedState& initial) const; /** Constructor for initial construction of base classes. */ - NonlinearOptimizer(const SharedGraph& graph) : - graph_(graph) {} + NonlinearOptimizer(const SharedGraph& graph) : graph_(graph) {} }; diff --git a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp new file mode 100644 index 000000000..2303c203a --- /dev/null +++ b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp @@ -0,0 +1,41 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file DirectOptimizer.cpp + * @brief + * @author Richard Roberts + * @date Apr 1, 2012 + */ + +#include +#include + +namespace gtsam { + +/* ************************************************************************* */ +const SuccessiveLinearizationOptimizer::SharedOrdering& SuccessiveLinearizationOptimizer::ordering(const Values& values) const { + + if(!ordering_) { + SharedParams params = + boost::dynamic_pointer_cast(params()); + + // If we're using a COLAMD ordering, compute it + if(!params.ordering) + ordering_ = graph_->orderingCOLAMD(values); + else + ordering_ = params.ordering; + } + + return ordering_; +} + +} /* namespace gtsam */ diff --git a/gtsam/nonlinear/DirectOptimizer.h b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h similarity index 59% rename from gtsam/nonlinear/DirectOptimizer.h rename to gtsam/nonlinear/SuccessiveLinearizationOptimizer.h index dac5a0004..e8c74d1a8 100644 --- a/gtsam/nonlinear/DirectOptimizer.h +++ b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h @@ -10,7 +10,7 @@ * -------------------------------------------------------------------------- */ /** - * @file DirectOptimizer.h + * @file SuccessiveLinearizationOptimizer.h * @brief * @author Richard Roberts * @date Apr 1, 2012 @@ -18,31 +18,65 @@ #pragma once +#include + #include namespace gtsam { -class DirectOptimizerParams : public NonlinearOptimizerParams { +/** + * This is an intermediate base class for NonlinearOptimizers that use direct + * solving, i.e. factorization. This class is here to reduce code duplication + * for storing the ordering, having factorization and elimination parameters, + * etc. + */ +class SuccessiveLinearizationOptimizer : public NonlinearOptimizer { public: - /** See DoglegParams::elimination */ + + class SuccessiveLinearizationParams; + class SuccessiveLinearizationState; + + typedef boost::shared_ptr SharedParams; + typedef boost::shared_ptr shared_ptr; + +protected: + + typedef boost::shared_ptr SharedOrdering; + + SuccessiveLinearizationOptimizer(const SharedGraph& graph) : NonlinearOptimizer(graph) {} + + const SharedOrdering& ordering(const Values& values) const; + +private: + + SharedOrdering ordering_; +}; + +class SuccessiveLinearizationParams : public NonlinearOptimizerParams { +public: + /** See SuccessiveLinearizationParams::elimination */ enum Elimination { MULTIFRONTAL, SEQUENTIAL }; - /** See DoglegParams::factorization */ + /** See SuccessiveLinearizationParams::factorization */ enum Factorization { LDL, QR, }; + /** See SuccessiveLinearizationParams::ordering */ + typedef boost::shared_ptr SharedOrdering; + Elimination elimination; ///< The elimination algorithm to use (default: MULTIFRONTAL) Factorization factorization; ///< The numerical factorization (default: LDL) + SharedOrdering ordering; ///< The variable elimination ordering, or empty to use COLAMD (default: empty) - DirectOptimizerParams() : + SuccessiveLinearizationParams() : elimination(MULTIFRONTAL), factorization(LDL) {} - virtual ~DirectOptimizerParams() {} + virtual ~SuccessiveLinearizationParams() {} virtual void print(const std::string& str = "") const { NonlinearOptimizerParams::print(str); @@ -60,40 +94,16 @@ public: else std::cout << " factorization method: (invalid)\n"; + if(ordering) + std::cout << " ordering: custom\n"; + else + std::cout << " ordering: COLAMD\n"; + std::cout.flush(); } }; -/** - * This is an intermediate base class for NonlinearOptimizers that use direct - * solving, i.e. factorization. This class is here to reduce code duplication - * for storing the ordering, having factorization and elimination parameters, - * etc. - */ -class DirectOptimizer : public NonlinearOptimizer { -public: - typedef boost::shared_ptr SharedOrdering; - typedef boost::shared_ptr shared_ptr; - - virtual NonlinearOptimizer::shared_ptr update(const SharedGraph& newGraph) const; - - virtual shared_ptr update(const SharedOrdering& newOrdering) const; - - virtual shared_ptr update(const SharedGraph& newGraph, const SharedOrdering& newOrdering) const; - - /** Access the variable ordering used by this optimizer */ - const SharedOrdering& ordering() const { return ordering_; } - -protected: - - const bool colamdOrdering_; - const SharedOrdering ordering_; - - DirectOptimizer(const SharedGraph& graph, - const SharedOrdering ordering = SharedOrdering()) : - NonlinearOptimizer(graph), - colamdOrdering_(!ordering || ordering->size() == 0), - ordering_(colamdOrdering_ ? graph_->orderingCOLAMD(*values_) : ordering) {} +class SuccessiveLinearizationState : public NonlinearOptimizerState { }; } /* namespace gtsam */ diff --git a/gtsam/slam/pose2SLAM.h b/gtsam/slam/pose2SLAM.h index 7fb86a46a..319dcb1c0 100644 --- a/gtsam/slam/pose2SLAM.h +++ b/gtsam/slam/pose2SLAM.h @@ -97,7 +97,7 @@ namespace pose2SLAM { /// Optimize Values optimize(const Values& initialEstimate) { - return *LevenbergMarquardtOptimizer(*this, initialEstimate).optimized(); + return *LevenbergMarquardtOptimizer(*this).optimized(initialEstimate); } }; From 7a41a1c7e409fd045904bdd7b961dad0c6307b6c Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Thu, 5 Apr 2012 02:45:44 +0000 Subject: [PATCH 36/57] Fixed warnings --- gtsam/linear/tests/timeSLAMlike.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gtsam/linear/tests/timeSLAMlike.cpp b/gtsam/linear/tests/timeSLAMlike.cpp index 24470f126..a02858def 100644 --- a/gtsam/linear/tests/timeSLAMlike.cpp +++ b/gtsam/linear/tests/timeSLAMlike.cpp @@ -41,7 +41,7 @@ int main(int argc, char *argv[]) { size_t vardim = 3; size_t blockdim = 3; - size_t nVars = 500; + int nVars = 500; size_t blocksPerVar = 5; size_t varsPerBlock = 2; size_t varSpread = 10; @@ -70,7 +70,7 @@ int main(int argc, char *argv[]) { for(size_t trial=0; trial > terms; terms.reserve(varsPerBlock); if(c == 0 && d == 0) From 48e277a095eaa96041bfb413870f2aaf07066bba Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Thu, 5 Apr 2012 02:45:45 +0000 Subject: [PATCH 37/57] Added swap method to Values --- gtsam/nonlinear/Values.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/gtsam/nonlinear/Values.h b/gtsam/nonlinear/Values.h index 57941ab27..b80348c01 100644 --- a/gtsam/nonlinear/Values.h +++ b/gtsam/nonlinear/Values.h @@ -393,6 +393,9 @@ namespace gtsam { /** Replace all keys and variables */ Values& operator=(const Values& rhs); + /** Swap the contents of two Values without copying data */ + void swap(Values& other) { values_.swap(other.values_); } + /** Remove all variables from the config */ void clear() { values_.clear(); } From c83a399bbaa266c74986f1f889d0f2f726fafcea Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Thu, 5 Apr 2012 02:45:47 +0000 Subject: [PATCH 38/57] Continuing code simplification / restructuring --- gtsam/nonlinear/DoglegOptimizer.cpp | 26 +++++----- gtsam/nonlinear/DoglegOptimizer.h | 11 ++--- gtsam/nonlinear/GaussNewtonOptimizer.cpp | 31 ++++++------ gtsam/nonlinear/GaussNewtonOptimizer.h | 11 ++--- .../nonlinear/LevenbergMarquardtOptimizer.cpp | 47 +++++++++++-------- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 16 +++---- gtsam/nonlinear/NonlinearOptimizer.cpp | 34 ++++---------- gtsam/nonlinear/NonlinearOptimizer.h | 7 +-- .../SuccessiveLinearizationOptimizer.cpp | 12 ++--- .../SuccessiveLinearizationOptimizer.h | 17 +++---- 10 files changed, 98 insertions(+), 114 deletions(-) diff --git a/gtsam/nonlinear/DoglegOptimizer.cpp b/gtsam/nonlinear/DoglegOptimizer.cpp index 9a7822c6f..f910c9661 100644 --- a/gtsam/nonlinear/DoglegOptimizer.cpp +++ b/gtsam/nonlinear/DoglegOptimizer.cpp @@ -27,11 +27,13 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -NonlinearOptimizer::SharedState DoglegOptimizer::iterate(const NonlinearOptimizer::SharedState& current) const { +NonlinearOptimizer::SharedState DoglegOptimizer::iterate(const NonlinearOptimizer::SharedState& _current) const { + + const DoglegState& current = dynamic_cast(*_current); // Linearize graph - const Ordering& ordering = *ordering(current->values); - GaussianFactorGraph::shared_ptr linear = graph_->linearize(current->values, ordering); + const Ordering& ordering = this->ordering(current.values); + GaussianFactorGraph::shared_ptr linear = graph_->linearize(current.values, ordering); // Check whether to use QR bool useQR; @@ -50,24 +52,24 @@ NonlinearOptimizer::SharedState DoglegOptimizer::iterate(const NonlinearOptimize if(params_->elimination == DoglegParams::MULTIFRONTAL) { GaussianBayesTree::shared_ptr bt = GaussianMultifrontalSolver(*linear, useQR).eliminate(); - result = DoglegOptimizerImpl::Iterate(current->Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, *graph_, *values(), ordering, error(), dlVerbose); + result = DoglegOptimizerImpl::Iterate(current.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, *graph_, current.values, ordering, current.error, dlVerbose); } else if(params_->elimination == DoglegParams::SEQUENTIAL) { GaussianBayesNet::shared_ptr bn = GaussianSequentialSolver(*linear, useQR).eliminate(); - result = DoglegOptimizerImpl::Iterate(current->Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, *graph_, *values(), ordering, error(), dlVerbose); + result = DoglegOptimizerImpl::Iterate(current.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, *graph_, current.values, ordering, current.error, dlVerbose); } else { throw runtime_error("Optimization parameter is invalid: DoglegParams::elimination"); } + // Maybe show output + if(params_->verbosity >= NonlinearOptimizerParams::DELTA) result.dx_d.print("delta"); + // Create new state with new values and new error - DoglegOptimizer::SharedState newState = boost::make_shared(); - - // Update values - newState->values = current->values.retract(result.dx_d, ordering); - + SharedState newState = boost::make_shared(); + newState->values = current.values.retract(result.dx_d, ordering); newState->error = result.f_error; - newState->iterations = current->iterations + 1; + newState->iterations = current.iterations + 1; newState->Delta = result.Delta; return newState; @@ -76,7 +78,7 @@ NonlinearOptimizer::SharedState DoglegOptimizer::iterate(const NonlinearOptimize /* ************************************************************************* */ NonlinearOptimizer::SharedState DoglegOptimizer::initialState(const Values& initialValues) const { SharedState initial = boost::make_shared(); - defaultInitialState(initialValues); + defaultInitialState(initialValues, *initial); initial->Delta = params_->deltaInitial; return initial; } diff --git a/gtsam/nonlinear/DoglegOptimizer.h b/gtsam/nonlinear/DoglegOptimizer.h index 0ef9da536..badd021e1 100644 --- a/gtsam/nonlinear/DoglegOptimizer.h +++ b/gtsam/nonlinear/DoglegOptimizer.h @@ -67,7 +67,7 @@ class DoglegOptimizer : public SuccessiveLinearizationOptimizer { public: - typedef boost::shared_ptr SharedParams; + typedef boost::shared_ptr SharedParams; typedef boost::shared_ptr SharedState; typedef boost::shared_ptr shared_ptr; @@ -83,8 +83,7 @@ public: * @param params The optimization parameters */ DoglegOptimizer(const NonlinearFactorGraph& graph, - const DoglegParams& params = DoglegParams(), - const Ordering& ordering = Ordering()) : + const DoglegParams& params = DoglegParams()) : SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), params_(new DoglegParams(params)) {} @@ -98,7 +97,8 @@ public: */ DoglegOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) : SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), - params_(new DoglegParams()) {} + params_(new DoglegParams()) { + params_->ordering = ordering; } /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. @@ -107,8 +107,7 @@ public: * @param params The optimization parameters */ DoglegOptimizer(const SharedGraph& graph, - const DoglegParams& params = DoglegParams(), - const SharedOrdering& ordering = SharedOrdering()) : + const DoglegParams& params = DoglegParams()) : SuccessiveLinearizationOptimizer(graph), params_(new DoglegParams(params)) {} diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.cpp b/gtsam/nonlinear/GaussNewtonOptimizer.cpp index 3a5a7aee0..8efc6aead 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.cpp +++ b/gtsam/nonlinear/GaussNewtonOptimizer.cpp @@ -25,25 +25,28 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -NonlinearOptimizer::auto_ptr GaussNewtonOptimizer::iterate() const { +NonlinearOptimizer::SharedState GaussNewtonOptimizer::iterate(const NonlinearOptimizer::SharedState& _current) const { + + const GaussNewtonState& current = dynamic_cast(*_current); // Linearize graph - GaussianFactorGraph::shared_ptr linear = graph_->linearize(*values_, *ordering_); + const Ordering& ordering = this->ordering(current.values); + GaussianFactorGraph::shared_ptr linear = graph_->linearize(current.values, ordering); // Check whether to use QR bool useQR; - if(gnParams_->factorization == GaussNewtonParams::LDL) + if(params_->factorization == GaussNewtonParams::LDL) useQR = false; - else if(gnParams_->factorization == GaussNewtonParams::QR) + else if(params_->factorization == GaussNewtonParams::QR) useQR = true; else throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::factorization"); // Optimize VectorValues::shared_ptr delta; - if(gnParams_->elimination == GaussNewtonParams::MULTIFRONTAL) + if(params_->elimination == GaussNewtonParams::MULTIFRONTAL) delta = GaussianMultifrontalSolver(*linear, useQR).optimize(); - else if(gnParams_->elimination == GaussNewtonParams::SEQUENTIAL) + else if(params_->elimination == GaussNewtonParams::SEQUENTIAL) delta = GaussianSequentialSolver(*linear, useQR).optimize(); else throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::elimination"); @@ -51,21 +54,19 @@ NonlinearOptimizer::auto_ptr GaussNewtonOptimizer::iterate() const { // Maybe show output if(params_->verbosity >= NonlinearOptimizerParams::DELTA) delta->print("delta"); - // Update values - SharedValues newValues(new Values(values_->retract(*delta, *ordering_))); - double newError = graph_->error(*newValues); + // Create new state with new values and new error + SharedState newState = boost::make_shared(); + newState->values = current.values.retract(*delta, ordering); + newState->error = graph_->error(newState->values); + newState->iterations = current.iterations + 1; - // Create new optimizer with new values and new error - NonlinearOptimizer::auto_ptr newOptimizer(new GaussNewtonOptimizer( - *this, newValues, newError)); - - return newOptimizer; + return newState; } /* ************************************************************************* */ NonlinearOptimizer::SharedState GaussNewtonOptimizer::initialState(const Values& initialValues) const { SharedState initial = boost::make_shared(); - defaultInitialState(*initial); + defaultInitialState(initialValues, *initial); return initial; } diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.h b/gtsam/nonlinear/GaussNewtonOptimizer.h index 8b1d4fbe6..5e6266feb 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.h +++ b/gtsam/nonlinear/GaussNewtonOptimizer.h @@ -39,7 +39,7 @@ class GaussNewtonOptimizer : public SuccessiveLinearizationOptimizer { public: - typedef boost::shared_ptr SharedParams; + typedef boost::shared_ptr SharedParams; /// @name Standard interface /// @{ @@ -53,8 +53,7 @@ public: * @param params The optimization parameters */ GaussNewtonOptimizer(const NonlinearFactorGraph& graph, - const GaussNewtonParams& params = GaussNewtonParams(), - const Ordering& ordering = Ordering()) : + const GaussNewtonParams& params = GaussNewtonParams()) : SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), params_(new GaussNewtonParams(params)) {} @@ -68,7 +67,8 @@ public: */ GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) : SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), - params_(new GaussNewtonParams()) {} + params_(new GaussNewtonParams()) { + params_->ordering = ordering; } /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. @@ -77,8 +77,7 @@ public: * @param params The optimization parameters */ GaussNewtonOptimizer(const SharedGraph& graph, - const GaussNewtonParams& params = GaussNewtonParams(), - const SharedOrdering& ordering = SharedOrdering()) : + const GaussNewtonParams& params = GaussNewtonParams()) : SuccessiveLinearizationOptimizer(graph), params_(new GaussNewtonParams(params)) {} diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 5657da572..0e641db29 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -27,29 +27,36 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const NonlinearOptimizer::SharedState& current) const { +NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const NonlinearOptimizer::SharedState& _current) const { + + const LevenbergMarquardtState& current = dynamic_cast(*_current); // Linearize graph - GaussianFactorGraph::shared_ptr linear = graph_->linearize(*values_, *ordering_); + const Ordering& ordering = this->ordering(current.values); + GaussianFactorGraph::shared_ptr linear = graph_->linearize(current.values, ordering); // Check whether to use QR bool useQR; - if(lmParams_->factorization == LevenbergMarquardtParams::LDL) + if(params_->factorization == LevenbergMarquardtParams::LDL) useQR = false; - else if(lmParams_->factorization == LevenbergMarquardtParams::QR) + else if(params_->factorization == LevenbergMarquardtParams::QR) useQR = true; else throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::factorization"); // Pull out parameters we'll use const NonlinearOptimizerParams::Verbosity nloVerbosity = params_->verbosity; - const LevenbergMarquardtParams::LMVerbosity lmVerbosity = lmParams_->lmVerbosity; - const double lambdaFactor = lmParams_->lambdaFactor; + const LevenbergMarquardtParams::LMVerbosity lmVerbosity = params_->lmVerbosity; + const double lambdaFactor = params_->lambdaFactor; // Variables to update during try_lambda loop - double lambda = lambda_; - double next_error = error(); - SharedValues next_values = values(); + double lambda = current.lambda; + double next_error = current.error; + Values next_values = current.values; + + // Compute dimensions if we haven't done it yet + if(!dimensions_) + dimensions_.reset(new vector(current.values.dims(ordering))); // Keep increasing lambda until we make make progress while(true) { @@ -79,9 +86,9 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli // Optimize VectorValues::shared_ptr delta; - if(lmParams_->elimination == LevenbergMarquardtParams::MULTIFRONTAL) + if(params_->elimination == LevenbergMarquardtParams::MULTIFRONTAL) delta = GaussianMultifrontalSolver(dampedSystem, useQR).optimize(); - else if(lmParams_->elimination == LevenbergMarquardtParams::SEQUENTIAL) + else if(params_->elimination == LevenbergMarquardtParams::SEQUENTIAL) delta = GaussianSequentialSolver(dampedSystem, useQR).optimize(); else throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination"); @@ -90,15 +97,15 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA) delta->print("delta"); // update values - SharedValues newValues(new Values(values_->retract(*delta, *ordering_))); + Values newValues = current.values.retract(*delta, ordering); // create new optimization state with more adventurous lambda - double error = graph_->error(*newValues); + double error = graph_->error(newValues); if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "next error = " << error << endl; - if( error <= error_ ) { - next_values = newValues; + if(error <= current.error) { + next_values.swap(newValues); next_error = error; lambda /= lambdaFactor; break; @@ -106,7 +113,7 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli // Either we're not cautious, or the same lambda was worse than the current error. // The more adventurous lambda was worse too, so make lambda more conservative // and keep the same values. - if(lambda >= lmParams_->lambdaUpperBound) { + if(lambda >= params_->lambdaUpperBound) { if(nloVerbosity >= NonlinearOptimizerParams::ERROR) cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; break; @@ -120,7 +127,7 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli // Either we're not cautious, or the same lambda was worse than the current error. // The more adventurous lambda was worse too, so make lambda more conservative // and keep the same values. - if(lambda >= lmParams_->lambdaUpperBound) { + if(lambda >= params_->lambdaUpperBound) { if(nloVerbosity >= NonlinearOptimizerParams::ERROR) cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; break; @@ -136,8 +143,8 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli LevenbergMarquardtOptimizer::SharedState newState = boost::make_shared(); newState->values = next_values; newState->error = next_error; - newState->iterations = current->iterations + 1; - newState->Delta = lambda; + newState->iterations = current.iterations + 1; + newState->lambda = lambda; return newState; } @@ -145,7 +152,7 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli /* ************************************************************************* */ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::initialState(const Values& initialValues) const { SharedState initial = boost::make_shared(); - defaultInitialState(*initial); + defaultInitialState(initialValues, *initial); initial->lambda = params_->lambdaInitial; return initial; } diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index 058bf347b..7787825a9 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -76,7 +76,7 @@ class LevenbergMarquardtOptimizer : public SuccessiveLinearizationOptimizer { public: - typedef boost::shared_ptr SharedParams; + typedef boost::shared_ptr SharedParams; typedef boost::shared_ptr SharedState; typedef boost::shared_ptr shared_ptr; @@ -92,8 +92,7 @@ public: * @param params The optimization parameters */ LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, - const LevenbergMarquardtParams& params = LevenbergMarquardtParams(), - const Ordering& ordering = Ordering()) : + const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) : SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), params_(new LevenbergMarquardtParams(params)) {} @@ -105,10 +104,10 @@ public: * @param values The initial variable assignments * @param params The optimization parameters */ - LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, - const Ordering& ordering) : + LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) : SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), - params_(new LevenbergMarquardtParams()) {} + params_(new LevenbergMarquardtParams()) { + params_->ordering = ordering; } /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. @@ -117,8 +116,7 @@ public: * @param params The optimization parameters */ LevenbergMarquardtOptimizer(const SharedGraph& graph, - const LevenbergMarquardtParams& params = LevenbergMarquardtParams(), - const SharedOrdering& ordering = SharedOrdering()) : + const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) : SuccessiveLinearizationOptimizer(graph), params_(new LevenbergMarquardtParams(params)) {} @@ -151,7 +149,7 @@ protected: typedef boost::shared_ptr > SharedDimensions; SharedParams params_; - const SharedDimensions dimensions_; + mutable SharedDimensions dimensions_; // Mutable because we compute it when needed and cache it }; } diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index cfe8a4945..ef40aeb78 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -24,25 +24,11 @@ using namespace std; namespace gtsam { -/* ************************************************************************* */ -NonlinearOptimizer::shared_ptr NonlinearOptimizer::update(const NonlinearOptimizer::SharedGraph& newGraph) const { - shared_ptr result(this->clone()); - result->graph_ = newGraph; - return result; -} - -/* ************************************************************************* */ -NonlinearOptimizer::shared_ptr NonlinearOptimizer::update(const NonlinearOptimizer::SharedParams& newParams) const { - shared_ptr result(this->clone()); - result->params_ = newParams; - return result; -} - /* ************************************************************************* */ NonlinearOptimizer::SharedState NonlinearOptimizer::defaultOptimize(const SharedState& initial) const { const SharedParams& params = this->params(); - double currentError = initial->error(); + double currentError = initial->error; // check if we're already close enough if(currentError <= params->errorTol) { @@ -52,11 +38,11 @@ NonlinearOptimizer::SharedState NonlinearOptimizer::defaultOptimize(const Shared } // Maybe show output - if (params->verbosity >= NonlinearOptimizerParams::VALUES) this->values()->print("Initial values"); - if (params->verbosity >= NonlinearOptimizerParams::ERROR) cout << "Initial error: " << this->error() << endl; + if (params->verbosity >= NonlinearOptimizerParams::VALUES) initial->values.print("Initial values"); + if (params->verbosity >= NonlinearOptimizerParams::ERROR) cout << "Initial error: " << initial->error << endl; // Return if we already have too many iterations - if(this->iterations() >= params->maxIterations) + if(initial->iterations >= params->maxIterations) return initial; // Iterative loop @@ -67,8 +53,8 @@ NonlinearOptimizer::SharedState NonlinearOptimizer::defaultOptimize(const Shared next = this->iterate(next); // Maybe show output - if (params->verbosity >= NonlinearOptimizerParams::VALUES) next->values->print("newValues"); - if (params->verbosity >= NonlinearOptimizerParams::ERROR) cout << "newError: " << next->error << endl; + if(params->verbosity >= NonlinearOptimizerParams::VALUES) next->values.print("newValues"); + if(params->verbosity >= NonlinearOptimizerParams::ERROR) cout << "newError: " << next->error << endl; } while(next->iterations < params->maxIterations && !checkConvergence(params->relativeErrorTol, params->absoluteErrorTol, params->errorTol, currentError, next->error, params->verbosity)); @@ -83,10 +69,10 @@ NonlinearOptimizer::SharedState NonlinearOptimizer::defaultOptimize(const Shared } /* ************************************************************************* */ -void NonlinearOptimizer::defaultInitialState(NonlinearOptimizerState& initial) const { - state.values = initialValues; - state.error = graph_->error(initialValues); - state.iterations = 0; +void NonlinearOptimizer::defaultInitialState(const Values& initialValues, NonlinearOptimizerState& initialState) const { + initialState.values = initialValues; + initialState.error = graph_->error(initialValues); + initialState.iterations = 0; } /* ************************************************************************* */ diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index c4f8be2fc..3b1e1dc58 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -36,7 +36,7 @@ public: LINEAR }; - int maxIterations; ///< The maximum iterations to stop iterating (default 100) + size_t maxIterations; ///< The maximum iterations to stop iterating (default 100) double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5) double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5) double errorTol; ///< The maximum total error to stop iterating (default 0.0) @@ -80,9 +80,6 @@ public: /** Virtual destructor */ virtual ~NonlinearOptimizerState() {} - - /** Clone the state (i.e. make a copy of the derived class) */ - virtual boost::shared_ptr clone() = 0; }; @@ -233,7 +230,7 @@ protected: SharedState defaultOptimize(const SharedState& initial) const; /** Initialize a state, using the current error and 0 iterations */ - void defaultInitialState(SharedState& initial) const; + void defaultInitialState(const Values& initialValues, NonlinearOptimizerState& initialState) const; /** Constructor for initial construction of base classes. */ diff --git a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp index 2303c203a..120db6d62 100644 --- a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp +++ b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp @@ -22,20 +22,20 @@ namespace gtsam { /* ************************************************************************* */ -const SuccessiveLinearizationOptimizer::SharedOrdering& SuccessiveLinearizationOptimizer::ordering(const Values& values) const { +const Ordering& SuccessiveLinearizationOptimizer::ordering(const Values& values) const { if(!ordering_) { SharedParams params = - boost::dynamic_pointer_cast(params()); + boost::dynamic_pointer_cast(this->params()); // If we're using a COLAMD ordering, compute it - if(!params.ordering) - ordering_ = graph_->orderingCOLAMD(values); + if(params->ordering) + ordering_ = params->ordering; else - ordering_ = params.ordering; + ordering_ = *graph_->orderingCOLAMD(values); } - return ordering_; + return *ordering_; } } /* namespace gtsam */ diff --git a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h index e8c74d1a8..ad5128b07 100644 --- a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h +++ b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h @@ -24,6 +24,9 @@ namespace gtsam { +class SuccessiveLinearizationParams; +class SuccessiveLinearizationState; + /** * This is an intermediate base class for NonlinearOptimizers that use direct * solving, i.e. factorization. This class is here to reduce code duplication @@ -33,23 +36,18 @@ namespace gtsam { class SuccessiveLinearizationOptimizer : public NonlinearOptimizer { public: - class SuccessiveLinearizationParams; - class SuccessiveLinearizationState; - typedef boost::shared_ptr SharedParams; typedef boost::shared_ptr shared_ptr; protected: - typedef boost::shared_ptr SharedOrdering; - SuccessiveLinearizationOptimizer(const SharedGraph& graph) : NonlinearOptimizer(graph) {} - const SharedOrdering& ordering(const Values& values) const; + const Ordering& ordering(const Values& values) const; private: - SharedOrdering ordering_; + mutable boost::optional ordering_; // Mutable because we set/compute it when needed and cache it }; class SuccessiveLinearizationParams : public NonlinearOptimizerParams { @@ -66,12 +64,9 @@ public: QR, }; - /** See SuccessiveLinearizationParams::ordering */ - typedef boost::shared_ptr SharedOrdering; - Elimination elimination; ///< The elimination algorithm to use (default: MULTIFRONTAL) Factorization factorization; ///< The numerical factorization (default: LDL) - SharedOrdering ordering; ///< The variable elimination ordering, or empty to use COLAMD (default: empty) + boost::optional ordering; ///< The variable elimination ordering, or empty to use COLAMD (default: empty) SuccessiveLinearizationParams() : elimination(MULTIFRONTAL), factorization(LDL) {} From 89e05a687570868498d885464b6371fa9b3442ec Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Thu, 5 Apr 2012 02:45:50 +0000 Subject: [PATCH 39/57] Updating unit tests and SLAM namespaces --- gtsam/slam/planarSLAM.h | 2 +- gtsam/slam/pose2SLAM.h | 2 +- gtsam/slam/tests/testGeneralSFMFactor.cpp | 24 ++--- .../testGeneralSFMFactor_Cal3Bundler.cpp | 20 ++--- gtsam/slam/tests/testPose2SLAM.cpp | 87 +++++++++---------- gtsam/slam/tests/testPose3SLAM.cpp | 6 +- gtsam/slam/tests/testStereoFactor.cpp | 13 +-- gtsam/slam/tests/testVSLAM.cpp | 53 +++++------ tests/testBoundingConstraint.cpp | 4 +- 9 files changed, 107 insertions(+), 104 deletions(-) diff --git a/gtsam/slam/planarSLAM.h b/gtsam/slam/planarSLAM.h index 49463a4db..321371d41 100644 --- a/gtsam/slam/planarSLAM.h +++ b/gtsam/slam/planarSLAM.h @@ -110,7 +110,7 @@ namespace planarSLAM { /// Optimize Values optimize(const Values& initialEstimate) { - return *LevenbergMarquardtOptimizer(*this, initialEstimate).optimized(); + return LevenbergMarquardtOptimizer(*this).optimized(initialEstimate); } }; diff --git a/gtsam/slam/pose2SLAM.h b/gtsam/slam/pose2SLAM.h index 319dcb1c0..ccae01ebd 100644 --- a/gtsam/slam/pose2SLAM.h +++ b/gtsam/slam/pose2SLAM.h @@ -97,7 +97,7 @@ namespace pose2SLAM { /// Optimize Values optimize(const Values& initialEstimate) { - return *LevenbergMarquardtOptimizer(*this).optimized(initialEstimate); + return LevenbergMarquardtOptimizer(*this).optimized(initialEstimate); } }; diff --git a/gtsam/slam/tests/testGeneralSFMFactor.cpp b/gtsam/slam/tests/testGeneralSFMFactor.cpp index b4fb6e2af..887f6b86c 100644 --- a/gtsam/slam/tests/testGeneralSFMFactor.cpp +++ b/gtsam/slam/tests/testGeneralSFMFactor.cpp @@ -175,8 +175,8 @@ TEST( GeneralSFMFactor, optimize_defaultK ) { // Create an ordering of the variables Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); - EXPECT(optimizer->error() < 0.5 * 1e-5 * nMeasurements); + NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); + EXPECT(final->error < 0.5 * 1e-5 * nMeasurements); } /* ************************************************************************* */ @@ -217,8 +217,8 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) { const double reproj_error = 1e-5; Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); - EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); + NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); + EXPECT(final->error < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -257,8 +257,8 @@ TEST( GeneralSFMFactor, optimize_varK_FixCameras ) { const double reproj_error = 1e-5 ; Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); - EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); + NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); + EXPECT(final->error < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -313,8 +313,8 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) { const double reproj_error = 1e-5 ; Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); - EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); + NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); + EXPECT(final->error < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -356,8 +356,8 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) { const double reproj_error = 1e-5 ; Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); - EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); + NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); + EXPECT(final->error < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -380,7 +380,7 @@ TEST(GeneralSFMFactor, GeneralCameraPoseRange) { LevenbergMarquardtParams params; params.absoluteErrorTol = 1e-9; params.relativeErrorTol = 1e-9; - Values actual = *LevenbergMarquardtOptimizer(graph, init, params).optimized(); + Values actual = LevenbergMarquardtOptimizer(graph, params).optimized(init); EXPECT(assert_equal(expected, actual, 1e-4)); } @@ -404,7 +404,7 @@ TEST(GeneralSFMFactor, CalibratedCameraPoseRange) { LevenbergMarquardtParams params; params.absoluteErrorTol = 1e-9; params.relativeErrorTol = 1e-9; - Values actual = *LevenbergMarquardtOptimizer(graph, init, params).optimized(); + Values actual = LevenbergMarquardtOptimizer(graph, params).optimized(init); EXPECT(assert_equal(expected, actual, 1e-4)); } diff --git a/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp b/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp index 044908e56..ed7c35663 100644 --- a/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp +++ b/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp @@ -176,8 +176,8 @@ TEST( GeneralSFMFactor, optimize_defaultK ) { // Create an ordering of the variables Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); - EXPECT(optimizer->error() < 0.5 * 1e-5 * nMeasurements); + NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); + EXPECT(final->error < 0.5 * 1e-5 * nMeasurements); } /* ************************************************************************* */ @@ -218,8 +218,8 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) { const double reproj_error = 1e-5; Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); - EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); + NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); + EXPECT(final->error < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -258,8 +258,8 @@ TEST( GeneralSFMFactor, optimize_varK_FixCameras ) { const double reproj_error = 1e-5 ; Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); - EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); + NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); + EXPECT(final->error < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -310,8 +310,8 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) { const double reproj_error = 1e-5 ; Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); - EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); + NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); + EXPECT(final->error < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -353,8 +353,8 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) { const double reproj_error = 1e-5 ; Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(graph, values, ordering).optimize(); - EXPECT(optimizer->error() < 0.5 * reproj_error * nMeasurements); + NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); + EXPECT(final->error < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ diff --git a/gtsam/slam/tests/testPose2SLAM.cpp b/gtsam/slam/tests/testPose2SLAM.cpp index 1abc8e476..5555b03b3 100644 --- a/gtsam/slam/tests/testPose2SLAM.cpp +++ b/gtsam/slam/tests/testPose2SLAM.cpp @@ -148,28 +148,29 @@ TEST( Pose2SLAM, linearization ) TEST(Pose2Graph, optimize) { // create a Pose graph with one equality constraint and one measurement - shared_ptr fg(new pose2SLAM::Graph); - fg->addPoseConstraint(0, Pose2(0,0,0)); - fg->addOdometry(0, 1, Pose2(1,2,M_PI_2), covariance); + pose2SLAM::Graph fg; + fg.addPoseConstraint(0, Pose2(0,0,0)); + fg.addOdometry(0, 1, Pose2(1,2,M_PI_2), covariance); // Create initial config - boost::shared_ptr initial(new Values()); - initial->insert(pose2SLAM::PoseKey(0), Pose2(0,0,0)); - initial->insert(pose2SLAM::PoseKey(1), Pose2(0,0,0)); + Values initial; + initial.insert(pose2SLAM::PoseKey(0), Pose2(0,0,0)); + initial.insert(pose2SLAM::PoseKey(1), Pose2(0,0,0)); // Choose an ordering and optimize - shared_ptr ordering(new Ordering); - *ordering += kx0, kx1; + Ordering ordering; + ordering += kx0, kx1; LevenbergMarquardtParams params; params.relativeErrorTol = 1e-15; - NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(fg, initial, params, ordering).optimize(); + params.ordering = ordering; + Values actual = LevenbergMarquardtOptimizer(fg, params).optimized(initial); // Check with expected config Values expected; expected.insert(pose2SLAM::PoseKey(0), Pose2(0,0,0)); expected.insert(pose2SLAM::PoseKey(1), Pose2(1,2,M_PI_2)); - CHECK(assert_equal(expected, *optimizer->values())); + CHECK(assert_equal(expected, actual)); } /* ************************************************************************* */ @@ -181,29 +182,28 @@ TEST(Pose2Graph, optimizeThreePoses) { Pose2 p0 = hexagon.pose(0), p1 = hexagon.pose(1); // create a Pose graph with one equality constraint and one measurement - shared_ptr fg(new pose2SLAM::Graph); - fg->addPoseConstraint(0, p0); + pose2SLAM::Graph fg; + fg.addPoseConstraint(0, p0); Pose2 delta = p0.between(p1); - fg->addOdometry(0, 1, delta, covariance); - fg->addOdometry(1, 2, delta, covariance); - fg->addOdometry(2, 0, delta, covariance); + fg.addOdometry(0, 1, delta, covariance); + fg.addOdometry(1, 2, delta, covariance); + fg.addOdometry(2, 0, delta, covariance); // Create initial config - boost::shared_ptr initial(new pose2SLAM::Values()); - initial->insertPose(0, p0); - initial->insertPose(1, hexagon.pose(1).retract(Vector_(3,-0.1, 0.1,-0.1))); - initial->insertPose(2, hexagon.pose(2).retract(Vector_(3, 0.1,-0.1, 0.1))); + pose2SLAM::Values initial; + initial.insertPose(0, p0); + initial.insertPose(1, hexagon.pose(1).retract(Vector_(3,-0.1, 0.1,-0.1))); + initial.insertPose(2, hexagon.pose(2).retract(Vector_(3, 0.1,-0.1, 0.1))); // Choose an ordering - shared_ptr ordering(new Ordering); - *ordering += kx0,kx1,kx2; + Ordering ordering; + ordering += kx0,kx1,kx2; // optimize LevenbergMarquardtParams params; params.relativeErrorTol = 1e-15; - NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(fg, initial, params, ordering).optimize(); - - Values actual = *optimizer->values(); + params.ordering = ordering; + Values actual = LevenbergMarquardtOptimizer(fg, params).optimized(initial); // Check with ground truth CHECK(assert_equal((const Values&)hexagon, actual)); @@ -218,35 +218,34 @@ TEST_UNSAFE(Pose2SLAM, optimizeCircle) { Pose2 p0 = hexagon.pose(0), p1 = hexagon.pose(1); // create a Pose graph with one equality constraint and one measurement - shared_ptr fg(new pose2SLAM::Graph); - fg->addPoseConstraint(0, p0); + pose2SLAM::Graph fg; + fg.addPoseConstraint(0, p0); Pose2 delta = p0.between(p1); - fg->addOdometry(0, 1, delta, covariance); - fg->addOdometry(1,2, delta, covariance); - fg->addOdometry(2,3, delta, covariance); - fg->addOdometry(3,4, delta, covariance); - fg->addOdometry(4,5, delta, covariance); - fg->addOdometry(5, 0, delta, covariance); + fg.addOdometry(0, 1, delta, covariance); + fg.addOdometry(1,2, delta, covariance); + fg.addOdometry(2,3, delta, covariance); + fg.addOdometry(3,4, delta, covariance); + fg.addOdometry(4,5, delta, covariance); + fg.addOdometry(5, 0, delta, covariance); // Create initial config - boost::shared_ptr initial(new pose2SLAM::Values()); - initial->insertPose(0, p0); - initial->insertPose(1, hexagon.pose(1).retract(Vector_(3,-0.1, 0.1,-0.1))); - initial->insertPose(2, hexagon.pose(2).retract(Vector_(3, 0.1,-0.1, 0.1))); - initial->insertPose(3, hexagon.pose(3).retract(Vector_(3,-0.1, 0.1,-0.1))); - initial->insertPose(4, hexagon.pose(4).retract(Vector_(3, 0.1,-0.1, 0.1))); - initial->insertPose(5, hexagon.pose(5).retract(Vector_(3,-0.1, 0.1,-0.1))); + pose2SLAM::Values initial; + initial.insertPose(0, p0); + initial.insertPose(1, hexagon.pose(1).retract(Vector_(3,-0.1, 0.1,-0.1))); + initial.insertPose(2, hexagon.pose(2).retract(Vector_(3, 0.1,-0.1, 0.1))); + initial.insertPose(3, hexagon.pose(3).retract(Vector_(3,-0.1, 0.1,-0.1))); + initial.insertPose(4, hexagon.pose(4).retract(Vector_(3, 0.1,-0.1, 0.1))); + initial.insertPose(5, hexagon.pose(5).retract(Vector_(3,-0.1, 0.1,-0.1))); // Choose an ordering - shared_ptr ordering(new Ordering); - *ordering += kx0,kx1,kx2,kx3,kx4,kx5; + Ordering ordering; + ordering += kx0,kx1,kx2,kx3,kx4,kx5; // optimize LevenbergMarquardtParams params; params.relativeErrorTol = 1e-15; - NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(fg, initial, params, ordering).optimize(); - - Values actual = *optimizer->values(); + params.ordering = ordering; + Values actual = LevenbergMarquardtOptimizer(fg, params).optimized(initial); // Check with ground truth CHECK(assert_equal((const Values&)hexagon, actual)); diff --git a/gtsam/slam/tests/testPose3SLAM.cpp b/gtsam/slam/tests/testPose3SLAM.cpp index 2c9562275..dac562267 100644 --- a/gtsam/slam/tests/testPose3SLAM.cpp +++ b/gtsam/slam/tests/testPose3SLAM.cpp @@ -78,7 +78,7 @@ TEST(Pose3Graph, optimizeCircle) { Ordering ordering; ordering += kx0,kx1,kx2,kx3,kx4,kx5; - Values actual = *LevenbergMarquardtOptimizer(fg, initial, ordering).optimized(); + Values actual = LevenbergMarquardtOptimizer(fg, ordering).optimized(initial); // Check with ground truth CHECK(assert_equal(hexagon, actual,1e-4)); @@ -113,7 +113,7 @@ TEST(Pose3Graph, partial_prior_height) { // linearization EXPECT_DOUBLES_EQUAL(2.0, height.error(values), tol); - Values actual = *LevenbergMarquardtOptimizer(graph, values).optimized(); + Values actual = LevenbergMarquardtOptimizer(graph).optimized(values); EXPECT(assert_equal(expected, actual.at(key), tol)); EXPECT_DOUBLES_EQUAL(0.0, graph.error(actual), tol); } @@ -165,7 +165,7 @@ TEST(Pose3Graph, partial_prior_xy) { Values values; values.insert(key, init); - Values actual = *LevenbergMarquardtOptimizer(graph, values).optimized(); + Values actual = LevenbergMarquardtOptimizer(graph).optimized(values); EXPECT(assert_equal(expected, actual.at(key), tol)); EXPECT_DOUBLES_EQUAL(0.0, graph.error(actual), tol); } diff --git a/gtsam/slam/tests/testStereoFactor.cpp b/gtsam/slam/tests/testStereoFactor.cpp index c1393450e..bfc93fbaa 100644 --- a/gtsam/slam/tests/testStereoFactor.cpp +++ b/gtsam/slam/tests/testStereoFactor.cpp @@ -65,19 +65,20 @@ TEST( StereoFactor, singlePoint) Point3 l1(0, 0, 0); values.insert(PointKey(1), l1); // add point at origin; - NonlinearOptimizer::auto_ptr optimizer(new GaussNewtonOptimizer(graph, values)); + GaussNewtonOptimizer optimizer(graph); + NonlinearOptimizer::SharedState initial = optimizer.initialState(values); // We expect the initial to be zero because config is the ground truth - DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); + DOUBLES_EQUAL(0.0, initial->error, 1e-9); // Iterate once, and the config should not have changed - NonlinearOptimizer::auto_ptr afterOneIteration = optimizer->iterate(); - DOUBLES_EQUAL(0.0, afterOneIteration->error(), 1e-9); + NonlinearOptimizer::SharedState afterOneIteration = optimizer.iterate(initial); + DOUBLES_EQUAL(0.0, afterOneIteration->error, 1e-9); // Complete solution - NonlinearOptimizer::auto_ptr final = optimizer->optimize(); + NonlinearOptimizer::SharedState final = optimizer.optimize(initial); - DOUBLES_EQUAL(0.0, final->error(), 1e-6); + DOUBLES_EQUAL(0.0, final->error, 1e-6); } /* ************************************************************************* */ diff --git a/gtsam/slam/tests/testVSLAM.cpp b/gtsam/slam/tests/testVSLAM.cpp index 5f65b757d..0c42772de 100644 --- a/gtsam/slam/tests/testVSLAM.cpp +++ b/gtsam/slam/tests/testVSLAM.cpp @@ -102,16 +102,17 @@ TEST( Graph, optimizeLM) // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - NonlinearOptimizer::auto_ptr optimizer(new LevenbergMarquardtOptimizer(graph, initialEstimate, ordering)); - DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); + const NonlinearOptimizer& optimizer = LevenbergMarquardtOptimizer(graph, ordering); + NonlinearOptimizer::SharedState initial = optimizer.initialState(initialEstimate); + DOUBLES_EQUAL(0.0, initial->error, 1e-9); // Iterate once, and the config should not have changed because we started // with the ground truth - NonlinearOptimizer::auto_ptr afterOneIteration = optimizer->iterate(); - DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); + NonlinearOptimizer::SharedState afterOneIteration = optimizer.iterate(initial); + DOUBLES_EQUAL(0.0, afterOneIteration->error, 1e-9); // check if correct - CHECK(assert_equal(initialEstimate,*(afterOneIteration->values()))); + CHECK(assert_equal(initialEstimate, afterOneIteration->values)); } @@ -139,16 +140,17 @@ TEST( Graph, optimizeLM2) // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - NonlinearOptimizer::auto_ptr optimizer(new LevenbergMarquardtOptimizer(graph, initialEstimate, ordering)); - DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); + LevenbergMarquardtOptimizer optimizer(graph, ordering); + NonlinearOptimizer::SharedState initial = optimizer.initialState(initialEstimate); + DOUBLES_EQUAL(0.0, initial->error, 1e-9); // Iterate once, and the config should not have changed because we started // with the ground truth - NonlinearOptimizer::auto_ptr afterOneIteration = optimizer->iterate(); - DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); + NonlinearOptimizer::SharedState afterOneIteration = optimizer.iterate(initial); + DOUBLES_EQUAL(0.0, afterOneIteration->error, 1e-9); // check if correct - CHECK(assert_equal(initialEstimate,*(afterOneIteration->values()))); + CHECK(assert_equal(initialEstimate, afterOneIteration->values)); } @@ -156,32 +158,33 @@ TEST( Graph, optimizeLM2) TEST( Graph, CHECK_ORDERING) { // build a graph - shared_ptr graph(new visualSLAM::Graph(testGraph())); + visualSLAM::Graph graph = testGraph(); // add 2 camera constraints - graph->addPoseConstraint(1, camera1); - graph->addPoseConstraint(2, camera2); + graph.addPoseConstraint(1, camera1); + graph.addPoseConstraint(2, camera2); // Create an initial values structure corresponding to the ground truth - boost::shared_ptr initialEstimate(new Values); - initialEstimate->insert(PoseKey(1), camera1); - initialEstimate->insert(PoseKey(2), camera2); - initialEstimate->insert(PointKey(1), landmark1); - initialEstimate->insert(PointKey(2), landmark2); - initialEstimate->insert(PointKey(3), landmark3); - initialEstimate->insert(PointKey(4), landmark4); + Values initialEstimate; + initialEstimate.insert(PoseKey(1), camera1); + initialEstimate.insert(PoseKey(2), camera2); + initialEstimate.insert(PointKey(1), landmark1); + initialEstimate.insert(PointKey(2), landmark2); + initialEstimate.insert(PointKey(3), landmark3); + initialEstimate.insert(PointKey(4), landmark4); // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - NonlinearOptimizer::auto_ptr optimizer(new LevenbergMarquardtOptimizer(graph, initialEstimate)); - DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); + LevenbergMarquardtOptimizer optimizer(graph); + NonlinearOptimizer::SharedState initial = optimizer.initialState(initialEstimate); + DOUBLES_EQUAL(0.0, initial->error, 1e-9); // Iterate once, and the config should not have changed because we started // with the ground truth - NonlinearOptimizer::auto_ptr afterOneIteration = optimizer->iterate(); - DOUBLES_EQUAL(0.0, optimizer->error(), 1e-9); + NonlinearOptimizer::SharedState afterOneIteration = optimizer.iterate(initial); + DOUBLES_EQUAL(0.0, afterOneIteration->error, 1e-9); // check if correct - CHECK(assert_equal(*initialEstimate,*(afterOneIteration->values()))); + CHECK(assert_equal(initialEstimate, afterOneIteration->values)); } /* ************************************************************************* */ diff --git a/tests/testBoundingConstraint.cpp b/tests/testBoundingConstraint.cpp index c062484a6..b9230705a 100644 --- a/tests/testBoundingConstraint.cpp +++ b/tests/testBoundingConstraint.cpp @@ -154,7 +154,7 @@ TEST( testBoundingConstraint, unary_simple_optimization1) { Values initValues; initValues.insert(x1, start_pt); - Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimized(); + Values actual = LevenbergMarquardtOptimizer(graph).optimized(initValues); Values expected; expected.insert(x1, goal_pt); CHECK(assert_equal(expected, actual, tol)); @@ -176,7 +176,7 @@ TEST( testBoundingConstraint, unary_simple_optimization2) { Values initValues; initValues.insert(x1, start_pt); - Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimized(); + Values actual = LevenbergMarquardtOptimizer(graph).optimized(initValues); Values expected; expected.insert(x1, goal_pt); CHECK(assert_equal(expected, actual, tol)); From 8aa04312df8f637a578263f641c794bd209633c9 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Mon, 14 May 2012 16:51:30 +0000 Subject: [PATCH 40/57] Project file --- .cproject | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/.cproject b/.cproject index e80acb39e..5916e18da 100644 --- a/.cproject +++ b/.cproject @@ -149,6 +149,57 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From 6c4bd1160a2071f4ef4a27aaacdaac59c8d62ab9 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Mon, 14 May 2012 16:51:33 +0000 Subject: [PATCH 41/57] Changes in progress --- gtsam/nonlinear/GaussNewtonOptimizer.cpp | 34 ++-- gtsam/nonlinear/GaussNewtonOptimizer.h | 74 ++++---- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 168 +++++++++-------- gtsam/nonlinear/NonlinearOptimizer.cpp | 51 ++--- gtsam/nonlinear/NonlinearOptimizer.h | 174 ++++++++---------- .../SuccessiveLinearizationOptimizer.cpp | 41 ----- .../SuccessiveLinearizationOptimizer.h | 31 ---- 7 files changed, 228 insertions(+), 345 deletions(-) delete mode 100644 gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.cpp b/gtsam/nonlinear/GaussNewtonOptimizer.cpp index 8efc6aead..02960f569 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.cpp +++ b/gtsam/nonlinear/GaussNewtonOptimizer.cpp @@ -25,49 +25,39 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -NonlinearOptimizer::SharedState GaussNewtonOptimizer::iterate(const NonlinearOptimizer::SharedState& _current) const { +void GaussNewtonOptimizer::iterate() const { - const GaussNewtonState& current = dynamic_cast(*_current); + const NonlinearOptimizerState& current = state_; // Linearize graph - const Ordering& ordering = this->ordering(current.values); - GaussianFactorGraph::shared_ptr linear = graph_->linearize(current.values, ordering); + GaussianFactorGraph::shared_ptr linear = graph_.linearize(current.values, *params_.ordering); // Check whether to use QR bool useQR; - if(params_->factorization == GaussNewtonParams::LDL) + if(params_.factorization == GaussNewtonParams::LDL) useQR = false; - else if(params_->factorization == GaussNewtonParams::QR) + else if(params_.factorization == GaussNewtonParams::QR) useQR = true; else throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::factorization"); // Optimize VectorValues::shared_ptr delta; - if(params_->elimination == GaussNewtonParams::MULTIFRONTAL) + if(params_.elimination == GaussNewtonParams::MULTIFRONTAL) delta = GaussianMultifrontalSolver(*linear, useQR).optimize(); - else if(params_->elimination == GaussNewtonParams::SEQUENTIAL) + else if(params_.elimination == GaussNewtonParams::SEQUENTIAL) delta = GaussianSequentialSolver(*linear, useQR).optimize(); else throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::elimination"); // Maybe show output - if(params_->verbosity >= NonlinearOptimizerParams::DELTA) delta->print("delta"); + if(params_.verbosity >= NonlinearOptimizerParams::DELTA) delta->print("delta"); // Create new state with new values and new error - SharedState newState = boost::make_shared(); - newState->values = current.values.retract(*delta, ordering); - newState->error = graph_->error(newState->values); - newState->iterations = current.iterations + 1; - - return newState; -} - -/* ************************************************************************* */ -NonlinearOptimizer::SharedState GaussNewtonOptimizer::initialState(const Values& initialValues) const { - SharedState initial = boost::make_shared(); - defaultInitialState(initialValues, *initial); - return initial; + NonlinearOptimizerState newState; + state_.values = current.values.retract(*delta, *params_.ordering); + state_.error = graph_.error(state_.values); + ++ state_.iterations; } } /* namespace gtsam */ diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.h b/gtsam/nonlinear/GaussNewtonOptimizer.h index 5e6266feb..1528eb1df 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.h +++ b/gtsam/nonlinear/GaussNewtonOptimizer.h @@ -22,25 +22,13 @@ namespace gtsam { -/** Parameters for Gauss-Newton optimization, inherits from - * NonlinearOptimizationParams. - */ -class GaussNewtonParams : public SuccessiveLinearizationParams { -}; - -class GaussNewtonState : public SuccessiveLinearizationState { -}; - /** * This class performs Gauss-Newton nonlinear optimization - * TODO: use make_shared */ -class GaussNewtonOptimizer : public SuccessiveLinearizationOptimizer { +class GaussNewtonOptimizer : public NonlinearOptimizer { public: - typedef boost::shared_ptr SharedParams; - /// @name Standard interface /// @{ @@ -52,10 +40,9 @@ public: * @param values The initial variable assignments * @param params The optimization parameters */ - GaussNewtonOptimizer(const NonlinearFactorGraph& graph, + GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const GaussNewtonParams& params = GaussNewtonParams()) : - SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), - params_(new GaussNewtonParams(params)) {} + NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues) {} /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. For convenience this @@ -65,24 +52,9 @@ public: * @param values The initial variable assignments * @param params The optimization parameters */ - GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) : - SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), - params_(new GaussNewtonParams()) { - params_->ordering = ordering; } - - /** Standard constructor, requires a nonlinear factor graph, initial - * variable assignments, and optimization parameters. - * @param graph The nonlinear factor graph to optimize - * @param values The initial variable assignments - * @param params The optimization parameters - */ - GaussNewtonOptimizer(const SharedGraph& graph, - const GaussNewtonParams& params = GaussNewtonParams()) : - SuccessiveLinearizationOptimizer(graph), - params_(new GaussNewtonParams(params)) {} - - /** Access the parameters */ - virtual NonlinearOptimizer::SharedParams params() const { return params_; } + GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : + NonlinearOptimizer(graph), state_(graph, initialValues) { + params_.ordering = ordering; } /// @} @@ -96,18 +68,40 @@ public: * containing the updated variable assignments, which may be retrieved with * values(). */ - virtual NonlinearOptimizer::SharedState iterate(const NonlinearOptimizer::SharedState& current) const; + virtual void iterate() const; - /** Create an initial state with the specified variable assignment values and - * all other default state. - */ - virtual NonlinearOptimizer::SharedState initialState(const Values& initialValues) const; + /** Access the parameters */ + const GaussNewtonParams& params() const { return params_; } + + /** Access the last state */ + const NonlinearOptimizerState& state() const { return state_; } /// @} protected: - const SharedParams params_; + GaussNewtonParams params_; + NonlinearOptimizerState state_; + + /** Access the parameters (base class version) */ + virtual const NonlinearOptimizerParams& _params() const { return params_; } + + /** Access the state (base class version) */ + virtual const NonlinearOptimizerState& _state() const { return state_; } + + /** Internal function for computing a COLAMD ordering if no ordering is specified */ + GaussNewtonParams ensureHasOrdering(GaussNewtonParams params, const NonlinearFactorGraph& graph, const Values& values) const { + if(!params.ordering) + params.ordering = graph.orderingCOLAMD(values); + return params; + } + +}; + +/** Parameters for Gauss-Newton optimization, inherits from + * NonlinearOptimizationParams. + */ +class GaussNewtonParams : public SuccessiveLinearizationParams { }; } diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index 7787825a9..5ce5f5f1f 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -22,6 +22,88 @@ namespace gtsam { +/** + * This class performs Levenberg-Marquardt nonlinear optimization + * TODO: use make_shared + */ +class LevenbergMarquardtOptimizer : public NonlinearOptimizer { + +public: + + typedef boost::shared_ptr shared_ptr; + + /// @name Standard interface + /// @{ + + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. For convenience this + * version takes plain objects instead of shared pointers, but internally + * copies the objects. + * @param graph The nonlinear factor graph to optimize + * @param values The initial variable assignments + * @param params The optimization parameters + */ + LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, + const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) : + NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues) {} + + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. For convenience this + * version takes plain objects instead of shared pointers, but internally + * copies the objects. + * @param graph The nonlinear factor graph to optimize + * @param values The initial variable assignments + * @param params The optimization parameters + */ + LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : + NonlinearOptimizer(graph), state_(graph, initialValues) { + params_.ordering = ordering; } + + /// @} + + /// @name Advanced interface + /// @{ + + /** Virtual destructor */ + virtual ~LevenbergMarquardtOptimizer() {} + + /** Perform a single iteration, returning a new NonlinearOptimizer class + * containing the updated variable assignments, which may be retrieved with + * values(). + */ + virtual void iterate() const; + + /** Access the parameters */ + const GaussNewtonParams& params() const { return params_; } + + /** Access the last state */ + const NonlinearOptimizerState& state() const { return state_; } + + /// @} + +protected: + + typedef boost::shared_ptr > SharedDimensions; + + mutable SharedDimensions dimensions_; // Mutable because we compute it when needed and cache it + + LevenbergMarquardtParams params_; + LevenbergMarquardtState state_; + + /** Access the parameters (base class version) */ + virtual const NonlinearOptimizerParams& _params() const { return params_; } + + /** Access the state (base class version) */ + virtual const NonlinearOptimizerState& _state() const { return state_; } + + /** Internal function for computing a COLAMD ordering if no ordering is specified */ + GaussNewtonParams ensureHasOrdering(GaussNewtonParams params, const NonlinearFactorGraph& graph, const Values& values) const { + if(!params.ordering) + params.ordering = graph.orderingCOLAMD(values); + return params; + } +}; + /** Parameters for Levenberg-Marquardt optimization. Note that this parameters * class inherits from NonlinearOptimizerParams, which specifies the parameters * common to all nonlinear optimization algorithms. This class also contains @@ -61,95 +143,11 @@ public: /** * State for LevenbergMarquardtOptimizer */ -class LevenbergMarquardtState : public SuccessiveLinearizationState { +class LevenbergMarquardtState : public NonlinearOptimizerState { public: double lambda; }; -/** - * This class performs Levenberg-Marquardt nonlinear optimization - * TODO: use make_shared - */ -class LevenbergMarquardtOptimizer : public SuccessiveLinearizationOptimizer { - -public: - - typedef boost::shared_ptr SharedParams; - typedef boost::shared_ptr SharedState; - typedef boost::shared_ptr shared_ptr; - - /// @name Standard interface - /// @{ - - /** Standard constructor, requires a nonlinear factor graph, initial - * variable assignments, and optimization parameters. For convenience this - * version takes plain objects instead of shared pointers, but internally - * copies the objects. - * @param graph The nonlinear factor graph to optimize - * @param values The initial variable assignments - * @param params The optimization parameters - */ - LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, - const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) : - SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), - params_(new LevenbergMarquardtParams(params)) {} - - /** Standard constructor, requires a nonlinear factor graph, initial - * variable assignments, and optimization parameters. For convenience this - * version takes plain objects instead of shared pointers, but internally - * copies the objects. - * @param graph The nonlinear factor graph to optimize - * @param values The initial variable assignments - * @param params The optimization parameters - */ - LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) : - SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), - params_(new LevenbergMarquardtParams()) { - params_->ordering = ordering; } - - /** Standard constructor, requires a nonlinear factor graph, initial - * variable assignments, and optimization parameters. - * @param graph The nonlinear factor graph to optimize - * @param values The initial variable assignments - * @param params The optimization parameters - */ - LevenbergMarquardtOptimizer(const SharedGraph& graph, - const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) : - SuccessiveLinearizationOptimizer(graph), - params_(new LevenbergMarquardtParams(params)) {} - - /** Access the parameters */ - virtual NonlinearOptimizer::SharedParams params() const { return params_; } - - /// @} - - /// @name Advanced interface - /// @{ - - /** Virtual destructor */ - virtual ~LevenbergMarquardtOptimizer() {} - - /** Perform a single iteration, returning a new NonlinearOptimizer class - * containing the updated variable assignments, which may be retrieved with - * values(). - */ - virtual NonlinearOptimizer::SharedState iterate(const NonlinearOptimizer::SharedState& current) const; - - /** Create an initial state with the specified variable assignment values and - * all other default state. - */ - virtual NonlinearOptimizer::SharedState initialState(const Values& initialValues) const; - - /// @} - -protected: - - typedef boost::shared_ptr > SharedDimensions; - - SharedParams params_; - mutable SharedDimensions dimensions_; // Mutable because we compute it when needed and cache it -}; - } diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index ef40aeb78..c80714d34 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -25,54 +25,43 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -NonlinearOptimizer::SharedState NonlinearOptimizer::defaultOptimize(const SharedState& initial) const { +void NonlinearOptimizer::defaultOptimize() { - const SharedParams& params = this->params(); - double currentError = initial->error; + const NonlinearOptimizerParams& params = this->params(); + double currentError = this->error(); // check if we're already close enough - if(currentError <= params->errorTol) { - if (params->verbosity >= NonlinearOptimizerParams::ERROR) - cout << "Exiting, as error = " << currentError << " < " << params->errorTol << endl; - return initial; + if(currentError <= params.errorTol) { + if (params.verbosity >= NonlinearOptimizerParams::ERROR) + cout << "Exiting, as error = " << currentError << " < " << params.errorTol << endl; + return; } // Maybe show output - if (params->verbosity >= NonlinearOptimizerParams::VALUES) initial->values.print("Initial values"); - if (params->verbosity >= NonlinearOptimizerParams::ERROR) cout << "Initial error: " << initial->error << endl; + if (params.verbosity >= NonlinearOptimizerParams::VALUES) this->values().print("Initial values"); + if (params.verbosity >= NonlinearOptimizerParams::ERROR) cout << "Initial error: " << currentError << endl; // Return if we already have too many iterations - if(initial->iterations >= params->maxIterations) - return initial; + if(this->iterations() >= params.maxIterations) + return; // Iterative loop - SharedState next = initial; do { // Do next iteration - currentError = next->error; - next = this->iterate(next); + currentError = this->error(); + this->iterate(); // Maybe show output - if(params->verbosity >= NonlinearOptimizerParams::VALUES) next->values.print("newValues"); - if(params->verbosity >= NonlinearOptimizerParams::ERROR) cout << "newError: " << next->error << endl; - } while(next->iterations < params->maxIterations && - !checkConvergence(params->relativeErrorTol, params->absoluteErrorTol, - params->errorTol, currentError, next->error, params->verbosity)); + if(params.verbosity >= NonlinearOptimizerParams::VALUES) this->values().print("newValues"); + if(params.verbosity >= NonlinearOptimizerParams::ERROR) cout << "newError: " << this->error() << endl; + } while(this->iterations() < params.maxIterations && + !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, + params.errorTol, currentError, this->error(), params.verbosity)); // Printing if verbose - if (params->verbosity >= NonlinearOptimizerParams::ERROR && - next->iterations >= params->maxIterations) + if (params.verbosity >= NonlinearOptimizerParams::ERROR && + this->iterations() >= params.maxIterations) cout << "Terminating because reached maximum iterations" << endl; - - // Return optimizer from final iteration - return next; -} - -/* ************************************************************************* */ -void NonlinearOptimizer::defaultInitialState(const Values& initialValues, NonlinearOptimizerState& initialState) const { - initialState.values = initialValues; - initialState.error = graph_->error(initialValues); - initialState.iterations = 0; } /* ************************************************************************* */ diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index 3b1e1dc58..f3f4abb92 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -22,67 +22,6 @@ namespace gtsam { -/** The common parameters for Nonlinear optimizers. Most optimizers - * deriving from NonlinearOptimizer also subclass the parameters. - */ -class NonlinearOptimizerParams { -public: - /** See NonlinearOptimizerParams::verbosity */ - enum Verbosity { - SILENT, - ERROR, - VALUES, - DELTA, - LINEAR - }; - - size_t maxIterations; ///< The maximum iterations to stop iterating (default 100) - double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5) - double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5) - double errorTol; ///< The maximum total error to stop iterating (default 0.0) - Verbosity verbosity; ///< The printing verbosity during optimization (default SILENT) - - NonlinearOptimizerParams() : - maxIterations(100.0), relativeErrorTol(1e-5), absoluteErrorTol(1e-5), - errorTol(0.0), verbosity(SILENT) {} - - virtual void print(const std::string& str = "") const { - std::cout << str << "\n"; - std::cout << "relative decrease threshold: " << relativeErrorTol << "\n"; - std::cout << "absolute decrease threshold: " << absoluteErrorTol << "\n"; - std::cout << " total error threshold: " << errorTol << "\n"; - std::cout << " maximum iterations: " << maxIterations << "\n"; - std::cout << " verbosity level: " << verbosity << std::endl; - } - - virtual ~NonlinearOptimizerParams() {} -}; - - -/** - * Base class for a nonlinear optimization state, including the current estimate - * of the variable values, error, and number of iterations. Optimizers derived - * from NonlinearOptimizer usually also define a derived state class containing - * additional state specific to the algorithm (for example, Dogleg state - * contains the current trust region radius). - */ -class NonlinearOptimizerState { -public: - - /** The current estimate of the variable values. */ - Values values; - - /** The factor graph error on the current values. */ - double error; - - /** The number of optimization iterations performed. */ - unsigned int iterations; - - /** Virtual destructor */ - virtual ~NonlinearOptimizerState() {} -}; - - /** * This is the abstract interface for classes that can optimize for the * maximum-likelihood estimate of a NonlinearFactorGraph. @@ -162,15 +101,6 @@ public: /** A shared pointer to this class */ typedef boost::shared_ptr shared_ptr; - /** A const shared_ptr to a NonlinearFactorGraph */ - typedef boost::shared_ptr SharedGraph; - - /** A const shared_ptr to the parameters */ - typedef boost::shared_ptr SharedParams; - - /** A shared_ptr to an optimizer state */ - typedef boost::shared_ptr SharedState; - /// @name Standard interface /// @{ @@ -183,21 +113,13 @@ public: * process, you may call iterate() and check_convergence() yourself, and if * needed modify the optimization state between iterations. */ - virtual SharedState optimize(const SharedState& initial) const { return defaultOptimize(initial); } + virtual const Values& optimize() const { return defaultOptimize(); } - SharedState optimize(const Values& initialization) const { return optimize(initialState(initialization)); } + double error() const { return _state().error; } - /** Shortcut to optimize and return the resulting Values of the maximum- - * likelihood estimate. To access statistics and information such as the - * final error and number of iterations, use optimize() instead. - * @return The maximum-likelihood estimate. - */ - virtual Values optimized(const SharedState& initial) const { return this->optimize(initial)->values; } + unsigned int iterations() const { return _state().iterations; } - Values optimized(const Values& initialization) const { return optimized(initialState(initialization)); } - - /** Retrieve the parameters. */ - virtual SharedParams params() const = 0; + const Values& values() const { return _state().values; } /// @} @@ -211,31 +133,93 @@ public: * containing the updated variable assignments, which may be retrieved with * values(). */ - virtual SharedState iterate(const SharedState& current) const = 0; - - /** Create an initial state from a variable assignment Values, with all - * other state values at their default initial values. - */ - virtual SharedState initialState(const Values& initialValues) const = 0; + virtual void iterate() const = 0; /// @} protected: - const SharedGraph graph_; + NonlinearFactorGraph graph_; /** A default implementation of the optimization loop, which calls iterate() * until checkConvergence returns true. */ - SharedState defaultOptimize(const SharedState& initial) const; + void defaultOptimize(); - /** Initialize a state, using the current error and 0 iterations */ - void defaultInitialState(const Values& initialValues, NonlinearOptimizerState& initialState) const; + virtual const NonlinearOptimizerState& _state() const = 0; - /** Constructor for initial construction of base classes. - */ - NonlinearOptimizer(const SharedGraph& graph) : graph_(graph) {} + virtual const NonlinearOptimizerParams& _params() const = 0; + /** Constructor for initial construction of base classes. */ + NonlinearOptimizer(const NonlinearFactorGraph& graph) : graph_(graph) {} + +}; + + +/** The common parameters for Nonlinear optimizers. Most optimizers + * deriving from NonlinearOptimizer also subclass the parameters. + */ +class NonlinearOptimizerParams { +public: + /** See NonlinearOptimizerParams::verbosity */ + enum Verbosity { + SILENT, + ERROR, + VALUES, + DELTA, + LINEAR + }; + + size_t maxIterations; ///< The maximum iterations to stop iterating (default 100) + double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5) + double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5) + double errorTol; ///< The maximum total error to stop iterating (default 0.0) + Verbosity verbosity; ///< The printing verbosity during optimization (default SILENT) + + NonlinearOptimizerParams() : + maxIterations(100.0), relativeErrorTol(1e-5), absoluteErrorTol(1e-5), + errorTol(0.0), verbosity(SILENT) {} + + virtual void print(const std::string& str = "") const { + std::cout << str << "\n"; + std::cout << "relative decrease threshold: " << relativeErrorTol << "\n"; + std::cout << "absolute decrease threshold: " << absoluteErrorTol << "\n"; + std::cout << " total error threshold: " << errorTol << "\n"; + std::cout << " maximum iterations: " << maxIterations << "\n"; + std::cout << " verbosity level: " << verbosity << std::endl; + } + + virtual ~NonlinearOptimizerParams() {} +}; + + +/** + * Base class for a nonlinear optimization state, including the current estimate + * of the variable values, error, and number of iterations. Optimizers derived + * from NonlinearOptimizer usually also define a derived state class containing + * additional state specific to the algorithm (for example, Dogleg state + * contains the current trust region radius). + */ +class NonlinearOptimizerState { +public: + + /** The current estimate of the variable values. */ + Values values; + + /** The factor graph error on the current values. */ + double error; + + /** The number of optimization iterations performed. */ + unsigned int iterations; + + NonlinearOptimizerState(const NonlinearFactorGraph& graph, const Values& values, unsigned int iterations = 0) : + values(values), error(graph.error(values)), iterations(iterations) {} + + NonlinearOptimizerState(const Values& values, double error, unsigned int iterations) : + values(values), error(error), iterations(iterations) {} + + /** Virtual destructor */ + virtual ~NonlinearOptimizerState() {} }; /** Check whether the relative error decrease is less than relativeErrorTreshold, diff --git a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp deleted file mode 100644 index 120db6d62..000000000 --- a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.cpp +++ /dev/null @@ -1,41 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file DirectOptimizer.cpp - * @brief - * @author Richard Roberts - * @date Apr 1, 2012 - */ - -#include -#include - -namespace gtsam { - -/* ************************************************************************* */ -const Ordering& SuccessiveLinearizationOptimizer::ordering(const Values& values) const { - - if(!ordering_) { - SharedParams params = - boost::dynamic_pointer_cast(this->params()); - - // If we're using a COLAMD ordering, compute it - if(params->ordering) - ordering_ = params->ordering; - else - ordering_ = *graph_->orderingCOLAMD(values); - } - - return *ordering_; -} - -} /* namespace gtsam */ diff --git a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h index ad5128b07..e95502b28 100644 --- a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h +++ b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h @@ -18,38 +18,10 @@ #pragma once -#include - #include namespace gtsam { -class SuccessiveLinearizationParams; -class SuccessiveLinearizationState; - -/** - * This is an intermediate base class for NonlinearOptimizers that use direct - * solving, i.e. factorization. This class is here to reduce code duplication - * for storing the ordering, having factorization and elimination parameters, - * etc. - */ -class SuccessiveLinearizationOptimizer : public NonlinearOptimizer { -public: - - typedef boost::shared_ptr SharedParams; - typedef boost::shared_ptr shared_ptr; - -protected: - - SuccessiveLinearizationOptimizer(const SharedGraph& graph) : NonlinearOptimizer(graph) {} - - const Ordering& ordering(const Values& values) const; - -private: - - mutable boost::optional ordering_; // Mutable because we set/compute it when needed and cache it -}; - class SuccessiveLinearizationParams : public NonlinearOptimizerParams { public: /** See SuccessiveLinearizationParams::elimination */ @@ -98,7 +70,4 @@ public: } }; -class SuccessiveLinearizationState : public NonlinearOptimizerState { -}; - } /* namespace gtsam */ From fdc4cc586df8cf67d24fc782e93976d517ee4529 Mon Sep 17 00:00:00 2001 From: Stephen Williams Date: Mon, 14 May 2012 18:11:52 +0000 Subject: [PATCH 42/57] Incremental modifications to the new Nonlinear Optimizer interface. --- gtsam/nonlinear/DoglegOptimizer.cpp | 41 ++--- gtsam/nonlinear/DoglegOptimizer.h | 158 +++++++++--------- gtsam/nonlinear/GaussNewtonOptimizer.h | 6 +- .../nonlinear/LevenbergMarquardtOptimizer.cpp | 73 +++----- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 22 +-- 5 files changed, 126 insertions(+), 174 deletions(-) diff --git a/gtsam/nonlinear/DoglegOptimizer.cpp b/gtsam/nonlinear/DoglegOptimizer.cpp index f910c9661..9d52fb7c1 100644 --- a/gtsam/nonlinear/DoglegOptimizer.cpp +++ b/gtsam/nonlinear/DoglegOptimizer.cpp @@ -27,19 +27,17 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -NonlinearOptimizer::SharedState DoglegOptimizer::iterate(const NonlinearOptimizer::SharedState& _current) const { - - const DoglegState& current = dynamic_cast(*_current); +void DoglegOptimizer::iterate(void) const { // Linearize graph - const Ordering& ordering = this->ordering(current.values); - GaussianFactorGraph::shared_ptr linear = graph_->linearize(current.values, ordering); + const Ordering& ordering = *params_.ordering; + GaussianFactorGraph::shared_ptr linear = graph_->linearize(state_.values, ordering); // Check whether to use QR bool useQR; - if(params_->factorization == DoglegParams::LDL) + if(params_.factorization == DoglegParams::LDL) useQR = false; - else if(params_->factorization == DoglegParams::QR) + else if(params_.factorization == DoglegParams::QR) useQR = true; else throw runtime_error("Optimization parameter is invalid: DoglegParams::factorization"); @@ -50,37 +48,26 @@ NonlinearOptimizer::SharedState DoglegOptimizer::iterate(const NonlinearOptimize // Do Dogleg iteration with either Multifrontal or Sequential elimination DoglegOptimizerImpl::IterationResult result; - if(params_->elimination == DoglegParams::MULTIFRONTAL) { + if(params_.elimination == DoglegParams::MULTIFRONTAL) { GaussianBayesTree::shared_ptr bt = GaussianMultifrontalSolver(*linear, useQR).eliminate(); - result = DoglegOptimizerImpl::Iterate(current.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, *graph_, current.values, ordering, current.error, dlVerbose); + result = DoglegOptimizerImpl::Iterate(state_.delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, graph_, state_.values, ordering, state_.error, dlVerbose); - } else if(params_->elimination == DoglegParams::SEQUENTIAL) { + } else if(params_.elimination == DoglegParams::SEQUENTIAL) { GaussianBayesNet::shared_ptr bn = GaussianSequentialSolver(*linear, useQR).eliminate(); - result = DoglegOptimizerImpl::Iterate(current.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, *graph_, current.values, ordering, current.error, dlVerbose); + result = DoglegOptimizerImpl::Iterate(state_.delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, graph_, state_.values, ordering, state_.error, dlVerbose); } else { throw runtime_error("Optimization parameter is invalid: DoglegParams::elimination"); } // Maybe show output - if(params_->verbosity >= NonlinearOptimizerParams::DELTA) result.dx_d.print("delta"); + if(params_.verbosity >= NonlinearOptimizerParams::DELTA) result.dx_d.print("delta"); // Create new state with new values and new error - SharedState newState = boost::make_shared(); - newState->values = current.values.retract(result.dx_d, ordering); - newState->error = result.f_error; - newState->iterations = current.iterations + 1; - newState->Delta = result.Delta; - - return newState; -} - -/* ************************************************************************* */ -NonlinearOptimizer::SharedState DoglegOptimizer::initialState(const Values& initialValues) const { - SharedState initial = boost::make_shared(); - defaultInitialState(initialValues, *initial); - initial->Delta = params_->deltaInitial; - return initial; + state_.values = state_.values.retract(result.dx_d, ordering); + state_.error = result.f_error; + state_.delta = result.delta; + ++state_.iterations; } } /* namespace gtsam */ diff --git a/gtsam/nonlinear/DoglegOptimizer.h b/gtsam/nonlinear/DoglegOptimizer.h index badd021e1..af9c91179 100644 --- a/gtsam/nonlinear/DoglegOptimizer.h +++ b/gtsam/nonlinear/DoglegOptimizer.h @@ -22,6 +22,82 @@ namespace gtsam { +/** + * This class performs Dogleg nonlinear optimization + */ +class DoglegOptimizer : public SuccessiveLinearizationOptimizer { + +public: + + typedef boost::shared_ptr shared_ptr; + + /// @name Standard interface + /// @{ + + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. For convenience this + * version takes plain objects instead of shared pointers, but internally + * copies the objects. + * @param graph The nonlinear factor graph to optimize + * @param initialValues The initial variable assignments + * @param params The optimization parameters + */ + DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, + const DoglegParams& params = DoglegParams()) : + NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues) {} + + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. For convenience this + * version takes plain objects instead of shared pointers, but internally + * copies the objects. + * @param graph The nonlinear factor graph to optimize + * @param initialValues The initial variable assignments + * @param params The optimization parameters + */ + DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : + NonlinearOptimizer(graph), state_(graph, initialValues) { + *params_.ordering = ordering; } + + /// @} + + /// @name Advanced interface + /// @{ + + /** Virtual destructor */ + virtual ~DoglegOptimizer() {} + + /** Perform a single iteration, returning a new NonlinearOptimizer class + * containing the updated variable assignments, which may be retrieved with + * values(). + */ + virtual void iterate() const; + + /** Access the parameters */ + const DoglegParams& params() const { return params_; } + + /** Access the last state */ + const DoglegState& state() const { return state_; } + + /// @} + +protected: + DoglegParams params_; + DoglegState state_; + + /** Access the parameters (base class version) */ + virtual const NonlinearOptimizerParams& _params() const { return params_; } + + /** Access the state (base class version) */ + virtual const NonlinearOptimizerState& _state() const { return state_; } + + /** Internal function for computing a COLAMD ordering if no ordering is specified */ + DoglegParams ensureHasOrdering(DoglegParams params, const NonlinearFactorGraph& graph, const Values& values) const { + if(!params.ordering) + params.ordering = graph.orderingCOLAMD(values); + return params; + } +}; + /** Parameters for Levenberg-Marquardt optimization. Note that this parameters * class inherits from NonlinearOptimizerParams, which specifies the parameters * common to all nonlinear optimization algorithms. This class also contains @@ -56,88 +132,8 @@ public: class DoglegState : public SuccessiveLinearizationState { public: - double Delta; + double delta; }; -/** - * This class performs Dogleg nonlinear optimization - */ -class DoglegOptimizer : public SuccessiveLinearizationOptimizer { - -public: - - typedef boost::shared_ptr SharedParams; - typedef boost::shared_ptr SharedState; - typedef boost::shared_ptr shared_ptr; - - /// @name Standard interface - /// @{ - - /** Standard constructor, requires a nonlinear factor graph, initial - * variable assignments, and optimization parameters. For convenience this - * version takes plain objects instead of shared pointers, but internally - * copies the objects. - * @param graph The nonlinear factor graph to optimize - * @param values The initial variable assignments - * @param params The optimization parameters - */ - DoglegOptimizer(const NonlinearFactorGraph& graph, - const DoglegParams& params = DoglegParams()) : - SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), - params_(new DoglegParams(params)) {} - - /** Standard constructor, requires a nonlinear factor graph, initial - * variable assignments, and optimization parameters. For convenience this - * version takes plain objects instead of shared pointers, but internally - * copies the objects. - * @param graph The nonlinear factor graph to optimize - * @param values The initial variable assignments - * @param params The optimization parameters - */ - DoglegOptimizer(const NonlinearFactorGraph& graph, const Ordering& ordering) : - SuccessiveLinearizationOptimizer(SharedGraph(new NonlinearFactorGraph(graph))), - params_(new DoglegParams()) { - params_->ordering = ordering; } - - /** Standard constructor, requires a nonlinear factor graph, initial - * variable assignments, and optimization parameters. - * @param graph The nonlinear factor graph to optimize - * @param values The initial variable assignments - * @param params The optimization parameters - */ - DoglegOptimizer(const SharedGraph& graph, - const DoglegParams& params = DoglegParams()) : - SuccessiveLinearizationOptimizer(graph), - params_(new DoglegParams(params)) {} - - /** Access the parameters */ - virtual NonlinearOptimizer::SharedParams params() const { return params_; } - - /// @} - - /// @name Advanced interface - /// @{ - - /** Virtual destructor */ - virtual ~DoglegOptimizer() {} - - /** Perform a single iteration, returning a new NonlinearOptimizer class - * containing the updated variable assignments, which may be retrieved with - * values(). - */ - virtual NonlinearOptimizer::SharedState iterate(const NonlinearOptimizer::SharedState& current) const; - - /** Create an initial state with the specified variable assignment values and - * all other default state. - */ - virtual NonlinearOptimizer::SharedState initialState(const Values& initialValues) const; - - /// @} - -protected: - - SharedParams params_; -}; - } diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.h b/gtsam/nonlinear/GaussNewtonOptimizer.h index 1528eb1df..6258366af 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.h +++ b/gtsam/nonlinear/GaussNewtonOptimizer.h @@ -37,7 +37,7 @@ public: * version takes plain objects instead of shared pointers, but internally * copies the objects. * @param graph The nonlinear factor graph to optimize - * @param values The initial variable assignments + * @param initialValues The initial variable assignments * @param params The optimization parameters */ GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, @@ -49,12 +49,12 @@ public: * version takes plain objects instead of shared pointers, but internally * copies the objects. * @param graph The nonlinear factor graph to optimize - * @param values The initial variable assignments + * @param initialValues The initial variable assignments * @param params The optimization parameters */ GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : NonlinearOptimizer(graph), state_(graph, initialValues) { - params_.ordering = ordering; } + *params_.ordering = ordering; } /// @} diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 0e641db29..fc25a697d 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -27,36 +27,23 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const NonlinearOptimizer::SharedState& _current) const { - - const LevenbergMarquardtState& current = dynamic_cast(*_current); +void LevenbergMarquardtOptimizer::iterate() const { // Linearize graph - const Ordering& ordering = this->ordering(current.values); - GaussianFactorGraph::shared_ptr linear = graph_->linearize(current.values, ordering); + GaussianFactorGraph::shared_ptr linear = graph_->linearize(state_.values, *params_.ordering); // Check whether to use QR bool useQR; - if(params_->factorization == LevenbergMarquardtParams::LDL) + if(params_.factorization == LevenbergMarquardtParams::LDL) useQR = false; - else if(params_->factorization == LevenbergMarquardtParams::QR) + else if(params_.factorization == LevenbergMarquardtParams::QR) useQR = true; else throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::factorization"); // Pull out parameters we'll use - const NonlinearOptimizerParams::Verbosity nloVerbosity = params_->verbosity; - const LevenbergMarquardtParams::LMVerbosity lmVerbosity = params_->lmVerbosity; - const double lambdaFactor = params_->lambdaFactor; - - // Variables to update during try_lambda loop - double lambda = current.lambda; - double next_error = current.error; - Values next_values = current.values; - - // Compute dimensions if we haven't done it yet - if(!dimensions_) - dimensions_.reset(new vector(current.values.dims(ordering))); + const NonlinearOptimizerParams::Verbosity nloVerbosity = params_.verbosity; + const LevenbergMarquardtParams::LMVerbosity lmVerbosity = params_.lmVerbosity; // Keep increasing lambda until we make make progress while(true) { @@ -68,10 +55,10 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli GaussianFactorGraph dampedSystem(*linear); { double sigma = 1.0 / sqrt(lambda); - dampedSystem.reserve(dampedSystem.size() + dimensions_->size()); + dampedSystem.reserve(dampedSystem.size() + dimensions_.size()); // for each of the variables, add a prior - for(Index j=0; jsize(); ++j) { - size_t dim = (*dimensions_)[j]; + for(Index j=0; jelimination == LevenbergMarquardtParams::MULTIFRONTAL) + if(params_.elimination == LevenbergMarquardtParams::MULTIFRONTAL) delta = GaussianMultifrontalSolver(dampedSystem, useQR).optimize(); - else if(params_->elimination == LevenbergMarquardtParams::SEQUENTIAL) + else if(params_.elimination == LevenbergMarquardtParams::SEQUENTIAL) delta = GaussianSequentialSolver(dampedSystem, useQR).optimize(); else throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination"); @@ -97,28 +84,28 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA) delta->print("delta"); // update values - Values newValues = current.values.retract(*delta, ordering); + Values newValues = state_.values.retract(*delta, *params_.ordering); // create new optimization state with more adventurous lambda - double error = graph_->error(newValues); + double error = graph_.error(newValues); if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "next error = " << error << endl; - if(error <= current.error) { - next_values.swap(newValues); - next_error = error; - lambda /= lambdaFactor; + if(error <= state_.error) { + state_.values.swap(newValues); + state_.error = error; + state_.lambda /= params_.lambdaFactor; break; } else { // Either we're not cautious, or the same lambda was worse than the current error. // The more adventurous lambda was worse too, so make lambda more conservative // and keep the same values. - if(lambda >= params_->lambdaUpperBound) { + if(state_.lambda >= params_.lambdaUpperBound) { if(nloVerbosity >= NonlinearOptimizerParams::ERROR) cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; break; } else { - lambda *= lambdaFactor; + state_.lambda *= params_.lambdaFactor; } } } catch(const NegativeMatrixException& e) { @@ -127,34 +114,20 @@ NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::iterate(const Nonli // Either we're not cautious, or the same lambda was worse than the current error. // The more adventurous lambda was worse too, so make lambda more conservative // and keep the same values. - if(lambda >= params_->lambdaUpperBound) { + if(lambda >= params_.lambdaUpperBound) { if(nloVerbosity >= NonlinearOptimizerParams::ERROR) cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; break; } else { - lambda *= lambdaFactor; + state_.lambda *= params_.lambdaFactor; } } catch(...) { throw; } } // end while - // Create new state with new values and new error - LevenbergMarquardtOptimizer::SharedState newState = boost::make_shared(); - newState->values = next_values; - newState->error = next_error; - newState->iterations = current.iterations + 1; - newState->lambda = lambda; - - return newState; -} - -/* ************************************************************************* */ -NonlinearOptimizer::SharedState LevenbergMarquardtOptimizer::initialState(const Values& initialValues) const { - SharedState initial = boost::make_shared(); - defaultInitialState(initialValues, *initial); - initial->lambda = params_->lambdaInitial; - return initial; + // Increment the iteration counter + ++state_.iterations; } } /* namespace gtsam */ diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index 5ce5f5f1f..b5b6d21f6 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -24,7 +24,6 @@ namespace gtsam { /** * This class performs Levenberg-Marquardt nonlinear optimization - * TODO: use make_shared */ class LevenbergMarquardtOptimizer : public NonlinearOptimizer { @@ -40,24 +39,24 @@ public: * version takes plain objects instead of shared pointers, but internally * copies the objects. * @param graph The nonlinear factor graph to optimize - * @param values The initial variable assignments + * @param initialValues The initial variable assignments * @param params The optimization parameters */ LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) : - NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues) {} + NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues), dimensions_(initialValues.dims(*params_.ordering)) {} /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. For convenience this * version takes plain objects instead of shared pointers, but internally * copies the objects. * @param graph The nonlinear factor graph to optimize - * @param values The initial variable assignments + * @param initialValues The initial variable assignments * @param params The optimization parameters */ LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : - NonlinearOptimizer(graph), state_(graph, initialValues) { - params_.ordering = ordering; } + NonlinearOptimizer(graph), state_(graph, initialValues), dimensions_(initialValues.dims(ordering)) { + *params_.ordering = ordering; } /// @} @@ -74,21 +73,18 @@ public: virtual void iterate() const; /** Access the parameters */ - const GaussNewtonParams& params() const { return params_; } + const LevenbergMarquardtParams& params() const { return params_; } /** Access the last state */ - const NonlinearOptimizerState& state() const { return state_; } + const LevenbergMarquardtState& state() const { return state_; } /// @} protected: - typedef boost::shared_ptr > SharedDimensions; - - mutable SharedDimensions dimensions_; // Mutable because we compute it when needed and cache it - LevenbergMarquardtParams params_; LevenbergMarquardtState state_; + std::vector dimensions_; /** Access the parameters (base class version) */ virtual const NonlinearOptimizerParams& _params() const { return params_; } @@ -97,7 +93,7 @@ protected: virtual const NonlinearOptimizerState& _state() const { return state_; } /** Internal function for computing a COLAMD ordering if no ordering is specified */ - GaussNewtonParams ensureHasOrdering(GaussNewtonParams params, const NonlinearFactorGraph& graph, const Values& values) const { + LevenbergMarquardtParams ensureHasOrdering(LevenbergMarquardtParams params, const NonlinearFactorGraph& graph, const Values& values) const { if(!params.ordering) params.ordering = graph.orderingCOLAMD(values); return params; From 5f94e477a4459e675c3bc1a88d0f7409f788585d Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Mon, 14 May 2012 18:32:54 +0000 Subject: [PATCH 43/57] Changes in progress --- gtsam/nonlinear/DoglegOptimizer.h | 76 +++++++-------- gtsam/nonlinear/GaussNewtonOptimizer.h | 12 +-- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 95 ++++++++++--------- 3 files changed, 92 insertions(+), 91 deletions(-) diff --git a/gtsam/nonlinear/DoglegOptimizer.h b/gtsam/nonlinear/DoglegOptimizer.h index af9c91179..1c6f3cb6f 100644 --- a/gtsam/nonlinear/DoglegOptimizer.h +++ b/gtsam/nonlinear/DoglegOptimizer.h @@ -22,6 +22,44 @@ namespace gtsam { +/** Parameters for Levenberg-Marquardt optimization. Note that this parameters + * class inherits from NonlinearOptimizerParams, which specifies the parameters + * common to all nonlinear optimization algorithms. This class also contains + * all of those parameters. + */ +class DoglegParams : public SuccessiveLinearizationParams { +public: + /** See DoglegParams::dlVerbosity */ + enum DLVerbosity { + SILENT, + VERBOSE + }; + + double deltaInitial; ///< The initial trust region radius (default: 1.0) + DLVerbosity dlVerbosity; ///< The verbosity level for Dogleg (default: SILENT), see also NonlinearOptimizerParams::verbosity + + DoglegParams() : + deltaInitial(1.0), dlVerbosity(SILENT) {} + + virtual ~DoglegParams() {} + + virtual void print(const std::string& str = "") const { + SuccessiveLinearizationParams::print(str); + std::cout << " deltaInitial: " << deltaInitial << "\n"; + std::cout.flush(); + } +}; + +/** + * State for DoglegOptimizer + */ +class DoglegState : public SuccessiveLinearizationState { +public: + + double delta; + +}; + /** * This class performs Dogleg nonlinear optimization */ @@ -98,42 +136,4 @@ protected: } }; -/** Parameters for Levenberg-Marquardt optimization. Note that this parameters - * class inherits from NonlinearOptimizerParams, which specifies the parameters - * common to all nonlinear optimization algorithms. This class also contains - * all of those parameters. - */ -class DoglegParams : public SuccessiveLinearizationParams { -public: - /** See DoglegParams::dlVerbosity */ - enum DLVerbosity { - SILENT, - VERBOSE - }; - - double deltaInitial; ///< The initial trust region radius (default: 1.0) - DLVerbosity dlVerbosity; ///< The verbosity level for Dogleg (default: SILENT), see also NonlinearOptimizerParams::verbosity - - DoglegParams() : - deltaInitial(1.0), dlVerbosity(SILENT) {} - - virtual ~DoglegParams() {} - - virtual void print(const std::string& str = "") const { - SuccessiveLinearizationParams::print(str); - std::cout << " deltaInitial: " << deltaInitial << "\n"; - std::cout.flush(); - } -}; - -/** - * State for DoglegOptimizer - */ -class DoglegState : public SuccessiveLinearizationState { -public: - - double delta; - -}; - } diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.h b/gtsam/nonlinear/GaussNewtonOptimizer.h index 6258366af..afc0fe30d 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.h +++ b/gtsam/nonlinear/GaussNewtonOptimizer.h @@ -22,6 +22,12 @@ namespace gtsam { +/** Parameters for Gauss-Newton optimization, inherits from + * NonlinearOptimizationParams. + */ +class GaussNewtonParams : public SuccessiveLinearizationParams { +}; + /** * This class performs Gauss-Newton nonlinear optimization */ @@ -98,10 +104,4 @@ protected: }; -/** Parameters for Gauss-Newton optimization, inherits from - * NonlinearOptimizationParams. - */ -class GaussNewtonParams : public SuccessiveLinearizationParams { -}; - } diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index b5b6d21f6..c522d0a82 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -22,6 +22,52 @@ namespace gtsam { +/** Parameters for Levenberg-Marquardt optimization. Note that this parameters + * class inherits from NonlinearOptimizerParams, which specifies the parameters + * common to all nonlinear optimization algorithms. This class also contains + * all of those parameters. + */ +class LevenbergMarquardtParams : public SuccessiveLinearizationParams { +public: + /** See LevenbergMarquardtParams::lmVerbosity */ + enum LMVerbosity { + SILENT, + LAMBDA, + TRYLAMBDA, + TRYCONFIG, + TRYDELTA, + DAMPED + }; + + double lambdaInitial; ///< The initial Levenberg-Marquardt damping term (default: 1e-5) + double lambdaFactor; ///< The amount by which to multiply or divide lambda when adjusting lambda (default: 10.0) + double lambdaUpperBound; ///< The maximum lambda to try before assuming the optimization has failed (default: 1e5) + LMVerbosity lmVerbosity; ///< The verbosity level for Levenberg-Marquardt (default: SILENT), see also NonlinearOptimizerParams::verbosity + + LevenbergMarquardtParams() : + lambdaInitial(1e-5), lambdaFactor(10.0), lambdaUpperBound(1e5), lmVerbosity(SILENT) {} + + virtual ~LevenbergMarquardtParams() {} + + virtual void print(const std::string& str = "") const { + SuccessiveLinearizationParams::print(str); + std::cout << " lambdaInitial: " << lambdaInitial << "\n"; + std::cout << " lambdaFactor: " << lambdaFactor << "\n"; + std::cout << " lambdaUpperBound: " << lambdaUpperBound << "\n"; + std::cout.flush(); + } +}; + +/** + * State for LevenbergMarquardtOptimizer + */ +class LevenbergMarquardtState : public NonlinearOptimizerState { +public: + + double lambda; + +}; + /** * This class performs Levenberg-Marquardt nonlinear optimization */ @@ -44,7 +90,8 @@ public: */ LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) : - NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues), dimensions_(initialValues.dims(*params_.ordering)) {} + NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), + state_(graph, initialValues), dimensions_(initialValues.dims(*params_.ordering)) {} /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. For convenience this @@ -100,50 +147,4 @@ protected: } }; -/** Parameters for Levenberg-Marquardt optimization. Note that this parameters - * class inherits from NonlinearOptimizerParams, which specifies the parameters - * common to all nonlinear optimization algorithms. This class also contains - * all of those parameters. - */ -class LevenbergMarquardtParams : public SuccessiveLinearizationParams { -public: - /** See LevenbergMarquardtParams::lmVerbosity */ - enum LMVerbosity { - SILENT, - LAMBDA, - TRYLAMBDA, - TRYCONFIG, - TRYDELTA, - DAMPED - }; - - double lambdaInitial; ///< The initial Levenberg-Marquardt damping term (default: 1e-5) - double lambdaFactor; ///< The amount by which to multiply or divide lambda when adjusting lambda (default: 10.0) - double lambdaUpperBound; ///< The maximum lambda to try before assuming the optimization has failed (default: 1e5) - LMVerbosity lmVerbosity; ///< The verbosity level for Levenberg-Marquardt (default: SILENT), see also NonlinearOptimizerParams::verbosity - - LevenbergMarquardtParams() : - lambdaInitial(1e-5), lambdaFactor(10.0), lambdaUpperBound(1e5), lmVerbosity(SILENT) {} - - virtual ~LevenbergMarquardtParams() {} - - virtual void print(const std::string& str = "") const { - SuccessiveLinearizationParams::print(str); - std::cout << " lambdaInitial: " << lambdaInitial << "\n"; - std::cout << " lambdaFactor: " << lambdaFactor << "\n"; - std::cout << " lambdaUpperBound: " << lambdaUpperBound << "\n"; - std::cout.flush(); - } -}; - -/** - * State for LevenbergMarquardtOptimizer - */ -class LevenbergMarquardtState : public NonlinearOptimizerState { -public: - - double lambda; - -}; - } From 75bd1689dfa7056744178edea5a13c5d560c9552 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Mon, 14 May 2012 19:10:02 +0000 Subject: [PATCH 44/57] Changes in progress --- gtsam/nonlinear/DoglegOptimizer.cpp | 12 +- gtsam/nonlinear/DoglegOptimizer.h | 31 +++- gtsam/nonlinear/GaussNewtonOptimizer.cpp | 3 +- gtsam/nonlinear/GaussNewtonOptimizer.h | 20 ++- .../nonlinear/LevenbergMarquardtOptimizer.cpp | 10 +- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 27 +++- gtsam/nonlinear/NonlinearOptimizer.cpp | 2 +- gtsam/nonlinear/NonlinearOptimizer.h | 144 +++++++++--------- gtsam/slam/pose2SLAM.h | 2 +- 9 files changed, 148 insertions(+), 103 deletions(-) diff --git a/gtsam/nonlinear/DoglegOptimizer.cpp b/gtsam/nonlinear/DoglegOptimizer.cpp index 9d52fb7c1..4eeac0296 100644 --- a/gtsam/nonlinear/DoglegOptimizer.cpp +++ b/gtsam/nonlinear/DoglegOptimizer.cpp @@ -27,11 +27,11 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -void DoglegOptimizer::iterate(void) const { +void DoglegOptimizer::iterate(void) { // Linearize graph const Ordering& ordering = *params_.ordering; - GaussianFactorGraph::shared_ptr linear = graph_->linearize(state_.values, ordering); + GaussianFactorGraph::shared_ptr linear = graph_.linearize(state_.values, ordering); // Check whether to use QR bool useQR; @@ -43,18 +43,18 @@ void DoglegOptimizer::iterate(void) const { throw runtime_error("Optimization parameter is invalid: DoglegParams::factorization"); // Pull out parameters we'll use - const bool dlVerbose = (params_->dlVerbosity > DoglegParams::SILENT); + const bool dlVerbose = (params_.dlVerbosity > DoglegParams::SILENT); // Do Dogleg iteration with either Multifrontal or Sequential elimination DoglegOptimizerImpl::IterationResult result; if(params_.elimination == DoglegParams::MULTIFRONTAL) { GaussianBayesTree::shared_ptr bt = GaussianMultifrontalSolver(*linear, useQR).eliminate(); - result = DoglegOptimizerImpl::Iterate(state_.delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, graph_, state_.values, ordering, state_.error, dlVerbose); + result = DoglegOptimizerImpl::Iterate(state_.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, graph_, state_.values, ordering, state_.error, dlVerbose); } else if(params_.elimination == DoglegParams::SEQUENTIAL) { GaussianBayesNet::shared_ptr bn = GaussianSequentialSolver(*linear, useQR).eliminate(); - result = DoglegOptimizerImpl::Iterate(state_.delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, graph_, state_.values, ordering, state_.error, dlVerbose); + result = DoglegOptimizerImpl::Iterate(state_.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, graph_, state_.values, ordering, state_.error, dlVerbose); } else { throw runtime_error("Optimization parameter is invalid: DoglegParams::elimination"); @@ -66,7 +66,7 @@ void DoglegOptimizer::iterate(void) const { // Create new state with new values and new error state_.values = state_.values.retract(result.dx_d, ordering); state_.error = result.f_error; - state_.delta = result.delta; + state_.Delta = result.Delta; ++state_.iterations; } diff --git a/gtsam/nonlinear/DoglegOptimizer.h b/gtsam/nonlinear/DoglegOptimizer.h index 1c6f3cb6f..44813befa 100644 --- a/gtsam/nonlinear/DoglegOptimizer.h +++ b/gtsam/nonlinear/DoglegOptimizer.h @@ -22,6 +22,8 @@ namespace gtsam { +class DoglegOptimizer; + /** Parameters for Levenberg-Marquardt optimization. Note that this parameters * class inherits from NonlinearOptimizerParams, which specifies the parameters * common to all nonlinear optimization algorithms. This class also contains @@ -53,17 +55,26 @@ public: /** * State for DoglegOptimizer */ -class DoglegState : public SuccessiveLinearizationState { +class DoglegState : public NonlinearOptimizerState { public: - double delta; + double Delta; + DoglegState() {} + + virtual ~DoglegState() {} + +protected: + DoglegState(const NonlinearFactorGraph& graph, const Values& values, const DoglegParams& params, unsigned int interations = 0) : + NonlinearOptimizerState(graph, values, iterations), Delta(params.deltaInitial) {} + + friend class DoglegOptimizer; }; /** * This class performs Dogleg nonlinear optimization */ -class DoglegOptimizer : public SuccessiveLinearizationOptimizer { +class DoglegOptimizer : public NonlinearOptimizer { public: @@ -82,7 +93,7 @@ public: */ DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const DoglegParams& params = DoglegParams()) : - NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues) {} + NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph, initialValues)), state_(graph, initialValues, params_) {} /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. For convenience this @@ -93,8 +104,9 @@ public: * @param params The optimization parameters */ DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : - NonlinearOptimizer(graph), state_(graph, initialValues) { - *params_.ordering = ordering; } + NonlinearOptimizer(graph) { + *params_.ordering = ordering; + state_ = DoglegState(graph, initialValues, params_); } /// @} @@ -108,7 +120,7 @@ public: * containing the updated variable assignments, which may be retrieved with * values(). */ - virtual void iterate() const; + virtual void iterate(); /** Access the parameters */ const DoglegParams& params() const { return params_; } @@ -116,6 +128,9 @@ public: /** Access the last state */ const DoglegState& state() const { return state_; } + /** Access the current trust region radius Delta */ + double Delta() const { return state_.Delta; } + /// @} protected: @@ -131,7 +146,7 @@ protected: /** Internal function for computing a COLAMD ordering if no ordering is specified */ DoglegParams ensureHasOrdering(DoglegParams params, const NonlinearFactorGraph& graph, const Values& values) const { if(!params.ordering) - params.ordering = graph.orderingCOLAMD(values); + params.ordering = *graph.orderingCOLAMD(values); return params; } }; diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.cpp b/gtsam/nonlinear/GaussNewtonOptimizer.cpp index 02960f569..39a4d32d5 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.cpp +++ b/gtsam/nonlinear/GaussNewtonOptimizer.cpp @@ -25,7 +25,7 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -void GaussNewtonOptimizer::iterate() const { +void GaussNewtonOptimizer::iterate() { const NonlinearOptimizerState& current = state_; @@ -54,7 +54,6 @@ void GaussNewtonOptimizer::iterate() const { if(params_.verbosity >= NonlinearOptimizerParams::DELTA) delta->print("delta"); // Create new state with new values and new error - NonlinearOptimizerState newState; state_.values = current.values.retract(*delta, *params_.ordering); state_.error = graph_.error(state_.values); ++ state_.iterations; diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.h b/gtsam/nonlinear/GaussNewtonOptimizer.h index afc0fe30d..f7c57c3e9 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.h +++ b/gtsam/nonlinear/GaussNewtonOptimizer.h @@ -22,12 +22,22 @@ namespace gtsam { +class GaussNewtonOptimizer; + /** Parameters for Gauss-Newton optimization, inherits from * NonlinearOptimizationParams. */ class GaussNewtonParams : public SuccessiveLinearizationParams { }; +class GaussNewtonState : public NonlinearOptimizerState { +protected: + GaussNewtonState(const NonlinearFactorGraph& graph, const Values& values, unsigned int iterations = 0) : + NonlinearOptimizerState(graph, values, iterations) {} + + friend class GaussNewtonOptimizer; +}; + /** * This class performs Gauss-Newton nonlinear optimization */ @@ -48,7 +58,7 @@ public: */ GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const GaussNewtonParams& params = GaussNewtonParams()) : - NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues) {} + NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph, initialValues)), state_(graph, initialValues) {} /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. For convenience this @@ -74,20 +84,20 @@ public: * containing the updated variable assignments, which may be retrieved with * values(). */ - virtual void iterate() const; + virtual void iterate(); /** Access the parameters */ const GaussNewtonParams& params() const { return params_; } /** Access the last state */ - const NonlinearOptimizerState& state() const { return state_; } + const GaussNewtonState& state() const { return state_; } /// @} protected: GaussNewtonParams params_; - NonlinearOptimizerState state_; + GaussNewtonState state_; /** Access the parameters (base class version) */ virtual const NonlinearOptimizerParams& _params() const { return params_; } @@ -98,7 +108,7 @@ protected: /** Internal function for computing a COLAMD ordering if no ordering is specified */ GaussNewtonParams ensureHasOrdering(GaussNewtonParams params, const NonlinearFactorGraph& graph, const Values& values) const { if(!params.ordering) - params.ordering = graph.orderingCOLAMD(values); + params.ordering = *graph.orderingCOLAMD(values); return params; } diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index fc25a697d..50d87a0d3 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -27,10 +27,10 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -void LevenbergMarquardtOptimizer::iterate() const { +void LevenbergMarquardtOptimizer::iterate() { // Linearize graph - GaussianFactorGraph::shared_ptr linear = graph_->linearize(state_.values, *params_.ordering); + GaussianFactorGraph::shared_ptr linear = graph_.linearize(state_.values, *params_.ordering); // Check whether to use QR bool useQR; @@ -48,13 +48,13 @@ void LevenbergMarquardtOptimizer::iterate() const { // Keep increasing lambda until we make make progress while(true) { if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) - cout << "trying lambda = " << lambda << endl; + cout << "trying lambda = " << state_.lambda << endl; // Add prior-factors // TODO: replace this dampening with a backsubstitution approach GaussianFactorGraph dampedSystem(*linear); { - double sigma = 1.0 / sqrt(lambda); + double sigma = 1.0 / sqrt(state_.lambda); dampedSystem.reserve(dampedSystem.size() + dimensions_.size()); // for each of the variables, add a prior for(Index j=0; j= params_.lambdaUpperBound) { + if(state_.lambda >= params_.lambdaUpperBound) { if(nloVerbosity >= NonlinearOptimizerParams::ERROR) cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; break; diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index c522d0a82..f2e2e4fc7 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -22,6 +22,8 @@ namespace gtsam { +class LevenbergMarquardtOptimizer; + /** Parameters for Levenberg-Marquardt optimization. Note that this parameters * class inherits from NonlinearOptimizerParams, which specifies the parameters * common to all nonlinear optimization algorithms. This class also contains @@ -66,6 +68,15 @@ public: double lambda; + LevenbergMarquardtState() {} + + virtual ~LevenbergMarquardtState() {} + +protected: + LevenbergMarquardtState(const NonlinearFactorGraph& graph, const Values& initialValues, const LevenbergMarquardtParams& params, unsigned int iterations = 0) : + NonlinearOptimizerState(graph, values, iterations), lambda(params.lambdaInitial) {} + + friend class LevenbergMarquardtOptimizer; }; /** @@ -90,8 +101,8 @@ public: */ LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) : - NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), - state_(graph, initialValues), dimensions_(initialValues.dims(*params_.ordering)) {} + NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph, initialValues)), + state_(graph, initialValues, params_), dimensions_(initialValues.dims(*params_.ordering)) {} /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. For convenience this @@ -102,8 +113,12 @@ public: * @param params The optimization parameters */ LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : - NonlinearOptimizer(graph), state_(graph, initialValues), dimensions_(initialValues.dims(ordering)) { - *params_.ordering = ordering; } + NonlinearOptimizer(graph), dimensions_(initialValues.dims(ordering)) { + *params_.ordering = ordering; + state_ = LevenbergMarquardtState(graph, initialValues, params_); } + + /** Access the current damping value */ + double lambda() const { return state_.lambda; } /// @} @@ -117,7 +132,7 @@ public: * containing the updated variable assignments, which may be retrieved with * values(). */ - virtual void iterate() const; + virtual void iterate(); /** Access the parameters */ const LevenbergMarquardtParams& params() const { return params_; } @@ -142,7 +157,7 @@ protected: /** Internal function for computing a COLAMD ordering if no ordering is specified */ LevenbergMarquardtParams ensureHasOrdering(LevenbergMarquardtParams params, const NonlinearFactorGraph& graph, const Values& values) const { if(!params.ordering) - params.ordering = graph.orderingCOLAMD(values); + params.ordering = *graph.orderingCOLAMD(values); return params; } }; diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index c80714d34..76d1fe573 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -27,7 +27,7 @@ namespace gtsam { /* ************************************************************************* */ void NonlinearOptimizer::defaultOptimize() { - const NonlinearOptimizerParams& params = this->params(); + const NonlinearOptimizerParams& params = this->_params(); double currentError = this->error(); // check if we're already close enough diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index f3f4abb92..388058ac0 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -22,6 +22,79 @@ namespace gtsam { +class NonlinearOptimizer; + +/** The common parameters for Nonlinear optimizers. Most optimizers + * deriving from NonlinearOptimizer also subclass the parameters. + */ +class NonlinearOptimizerParams { +public: + /** See NonlinearOptimizerParams::verbosity */ + enum Verbosity { + SILENT, + ERROR, + VALUES, + DELTA, + LINEAR + }; + + size_t maxIterations; ///< The maximum iterations to stop iterating (default 100) + double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5) + double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5) + double errorTol; ///< The maximum total error to stop iterating (default 0.0) + Verbosity verbosity; ///< The printing verbosity during optimization (default SILENT) + + NonlinearOptimizerParams() : + maxIterations(100.0), relativeErrorTol(1e-5), absoluteErrorTol(1e-5), + errorTol(0.0), verbosity(SILENT) {} + + virtual void print(const std::string& str = "") const { + std::cout << str << "\n"; + std::cout << "relative decrease threshold: " << relativeErrorTol << "\n"; + std::cout << "absolute decrease threshold: " << absoluteErrorTol << "\n"; + std::cout << " total error threshold: " << errorTol << "\n"; + std::cout << " maximum iterations: " << maxIterations << "\n"; + std::cout << " verbosity level: " << verbosity << std::endl; + } + + virtual ~NonlinearOptimizerParams() {} +}; + + +/** + * Base class for a nonlinear optimization state, including the current estimate + * of the variable values, error, and number of iterations. Optimizers derived + * from NonlinearOptimizer usually also define a derived state class containing + * additional state specific to the algorithm (for example, Dogleg state + * contains the current trust region radius). + */ +class NonlinearOptimizerState { +public: + + /** The current estimate of the variable values. */ + Values values; + + /** The factor graph error on the current values. */ + double error; + + /** The number of optimization iterations performed. */ + unsigned int iterations; + + NonlinearOptimizerState() {} + + /** Virtual destructor */ + virtual ~NonlinearOptimizerState() {} + +protected: + NonlinearOptimizerState(const NonlinearFactorGraph& graph, const Values& values, unsigned int iterations = 0) : + values(values), error(graph.error(values)), iterations(iterations) {} + + NonlinearOptimizerState(const Values& values, double error, unsigned int iterations) : + values(values), error(error), iterations(iterations) {} + + friend class NonlinearOptimizer; +}; + /** * This is the abstract interface for classes that can optimize for the * maximum-likelihood estimate of a NonlinearFactorGraph. @@ -113,7 +186,7 @@ public: * process, you may call iterate() and check_convergence() yourself, and if * needed modify the optimization state between iterations. */ - virtual const Values& optimize() const { return defaultOptimize(); } + virtual const Values& optimize() { defaultOptimize(); return values(); } double error() const { return _state().error; } @@ -133,7 +206,7 @@ public: * containing the updated variable assignments, which may be retrieved with * values(). */ - virtual void iterate() const = 0; + virtual void iterate() = 0; /// @} @@ -155,73 +228,6 @@ protected: }; - -/** The common parameters for Nonlinear optimizers. Most optimizers - * deriving from NonlinearOptimizer also subclass the parameters. - */ -class NonlinearOptimizerParams { -public: - /** See NonlinearOptimizerParams::verbosity */ - enum Verbosity { - SILENT, - ERROR, - VALUES, - DELTA, - LINEAR - }; - - size_t maxIterations; ///< The maximum iterations to stop iterating (default 100) - double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5) - double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5) - double errorTol; ///< The maximum total error to stop iterating (default 0.0) - Verbosity verbosity; ///< The printing verbosity during optimization (default SILENT) - - NonlinearOptimizerParams() : - maxIterations(100.0), relativeErrorTol(1e-5), absoluteErrorTol(1e-5), - errorTol(0.0), verbosity(SILENT) {} - - virtual void print(const std::string& str = "") const { - std::cout << str << "\n"; - std::cout << "relative decrease threshold: " << relativeErrorTol << "\n"; - std::cout << "absolute decrease threshold: " << absoluteErrorTol << "\n"; - std::cout << " total error threshold: " << errorTol << "\n"; - std::cout << " maximum iterations: " << maxIterations << "\n"; - std::cout << " verbosity level: " << verbosity << std::endl; - } - - virtual ~NonlinearOptimizerParams() {} -}; - - -/** - * Base class for a nonlinear optimization state, including the current estimate - * of the variable values, error, and number of iterations. Optimizers derived - * from NonlinearOptimizer usually also define a derived state class containing - * additional state specific to the algorithm (for example, Dogleg state - * contains the current trust region radius). - */ -class NonlinearOptimizerState { -public: - - /** The current estimate of the variable values. */ - Values values; - - /** The factor graph error on the current values. */ - double error; - - /** The number of optimization iterations performed. */ - unsigned int iterations; - - NonlinearOptimizerState(const NonlinearFactorGraph& graph, const Values& values, unsigned int iterations = 0) : - values(values), error(graph.error(values)), iterations(iterations) {} - - NonlinearOptimizerState(const Values& values, double error, unsigned int iterations) : - values(values), error(error), iterations(iterations) {} - - /** Virtual destructor */ - virtual ~NonlinearOptimizerState() {} -}; - /** Check whether the relative error decrease is less than relativeErrorTreshold, * the absolute error decrease is less than absoluteErrorTreshold, or * the error itself is less than errorThreshold. diff --git a/gtsam/slam/pose2SLAM.h b/gtsam/slam/pose2SLAM.h index ccae01ebd..42bd8758c 100644 --- a/gtsam/slam/pose2SLAM.h +++ b/gtsam/slam/pose2SLAM.h @@ -97,7 +97,7 @@ namespace pose2SLAM { /// Optimize Values optimize(const Values& initialEstimate) { - return LevenbergMarquardtOptimizer(*this).optimized(initialEstimate); + return LevenbergMarquardtOptimizer(*this, initialEstimate).optimize(); } }; From 7f0881f2e4023a661ded2d6ce51d7f9cb430c666 Mon Sep 17 00:00:00 2001 From: Stephen Williams Date: Mon, 14 May 2012 20:25:20 +0000 Subject: [PATCH 45/57] Updated examples and namespaces for the new NonlinearOptimizer interface --- examples/CameraResectioning.cpp | 2 +- examples/PlanarSLAMExample_easy.cpp | 2 +- examples/PlanarSLAMSelfContained_advanced.cpp | 42 +++++++++--------- examples/Pose2SLAMExample_advanced.cpp | 44 ++++++++++--------- examples/Pose2SLAMExample_easy.cpp | 2 +- examples/SimpleRotation.cpp | 2 +- examples/vSLAMexample/vSFMexample.cpp | 10 ++--- gtsam/slam/planarSLAM.h | 2 +- 8 files changed, 54 insertions(+), 52 deletions(-) diff --git a/examples/CameraResectioning.cpp b/examples/CameraResectioning.cpp index bbbb454f4..7d0e37e7f 100644 --- a/examples/CameraResectioning.cpp +++ b/examples/CameraResectioning.cpp @@ -95,7 +95,7 @@ int main(int argc, char* argv[]) { 0.,0.,-1.), Point3(0.,0.,2.0))); /* 4. Optimize the graph using Levenberg-Marquardt*/ - Values result = *LevenbergMarquardtOptimizer(graph, initial).optimized(); + Values result = LevenbergMarquardtOptimizer(graph, initial).optimize(); result.print("Final result: "); return 0; diff --git a/examples/PlanarSLAMExample_easy.cpp b/examples/PlanarSLAMExample_easy.cpp index b660a1c16..912589080 100644 --- a/examples/PlanarSLAMExample_easy.cpp +++ b/examples/PlanarSLAMExample_easy.cpp @@ -83,7 +83,7 @@ int main(int argc, char** argv) { initialEstimate.print("initial estimate"); // optimize using Levenberg-Marquardt optimization with an ordering from colamd - planarSLAM::Values result = *LevenbergMarquardtOptimizer(graph, initialEstimate).optimized(); + planarSLAM::Values result = LevenbergMarquardtOptimizer(graph, initialEstimate).optimize(); result.print("final result"); return 0; diff --git a/examples/PlanarSLAMSelfContained_advanced.cpp b/examples/PlanarSLAMSelfContained_advanced.cpp index 46f81d94d..a7a45b726 100644 --- a/examples/PlanarSLAMSelfContained_advanced.cpp +++ b/examples/PlanarSLAMSelfContained_advanced.cpp @@ -57,14 +57,14 @@ int main(int argc, char** argv) { Symbol l1('l',1), l2('l',2); // create graph container and add factors to it - NonlinearFactorGraph::shared_ptr graph(new NonlinearFactorGraph); + NonlinearFactorGraph graph; /* add prior */ // gaussian for prior SharedDiagonal prior_model = noiseModel::Diagonal::Sigmas(Vector_(3, 0.3, 0.3, 0.1)); Pose2 prior_measurement(0.0, 0.0, 0.0); // prior at origin PriorFactor posePrior(x1, prior_measurement, prior_model); // create the factor - graph->add(posePrior); // add the factor to the graph + graph.add(posePrior); // add the factor to the graph /* add odometry */ // general noisemodel for odometry @@ -73,8 +73,8 @@ int main(int argc, char** argv) { // create between factors to represent odometry BetweenFactor odom12(x1, x2, odom_measurement, odom_model); BetweenFactor odom23(x2, x3, odom_measurement, odom_model); - graph->add(odom12); // add both to graph - graph->add(odom23); + graph.add(odom12); // add both to graph + graph.add(odom23); /* add measurements */ // general noisemodel for measurements @@ -94,39 +94,39 @@ int main(int argc, char** argv) { BearingRangeFactor meas32(x3, l2, bearing32, range32, meas_model); // add the factors - graph->add(meas11); - graph->add(meas21); - graph->add(meas32); + graph.add(meas11); + graph.add(meas21); + graph.add(meas32); - graph->print("Full Graph"); + graph.print("Full Graph"); // initialize to noisy points - boost::shared_ptr initial(new Values); - initial->insert(x1, Pose2(0.5, 0.0, 0.2)); - initial->insert(x2, Pose2(2.3, 0.1,-0.2)); - initial->insert(x3, Pose2(4.1, 0.1, 0.1)); - initial->insert(l1, Point2(1.8, 2.1)); - initial->insert(l2, Point2(4.1, 1.8)); + Values initial; + initial.insert(x1, Pose2(0.5, 0.0, 0.2)); + initial.insert(x2, Pose2(2.3, 0.1,-0.2)); + initial.insert(x3, Pose2(4.1, 0.1, 0.1)); + initial.insert(l1, Point2(1.8, 2.1)); + initial.insert(l2, Point2(4.1, 1.8)); - initial->print("initial estimate"); + initial.print("initial estimate"); // optimize using Levenberg-Marquardt optimization with an ordering from colamd // first using sequential elimination LevenbergMarquardtParams lmParams; lmParams.elimination = LevenbergMarquardtParams::SEQUENTIAL; - Values::const_shared_ptr resultSequential = LevenbergMarquardtOptimizer(graph, initial, lmParams).optimized(); - resultSequential->print("final result (solved with a sequential solver)"); + Values resultSequential = LevenbergMarquardtOptimizer(graph, initial, lmParams).optimize(); + resultSequential.print("final result (solved with a sequential solver)"); // then using multifrontal, advanced interface // Note that we keep the original optimizer object so we can use the COLAMD // ordering it computes. LevenbergMarquardtOptimizer optimizer(graph, initial); - Values::const_shared_ptr resultMultifrontal = optimizer.optimized(); - resultMultifrontal->print("final result (solved with a multifrontal solver)"); + Values resultMultifrontal = optimizer.optimize(); + resultMultifrontal.print("final result (solved with a multifrontal solver)"); - const Ordering& ordering = *optimizer.ordering(); - GaussianMultifrontalSolver linearSolver(*graph->linearize(*resultMultifrontal, ordering)); + const Ordering& ordering = *optimizer.params().ordering; + GaussianMultifrontalSolver linearSolver(*graph.linearize(resultMultifrontal, ordering)); // Print marginals covariances for all variables print(linearSolver.marginalCovariance(ordering[x1]), "x1 covariance"); diff --git a/examples/Pose2SLAMExample_advanced.cpp b/examples/Pose2SLAMExample_advanced.cpp index 9cc8c2040..282287827 100644 --- a/examples/Pose2SLAMExample_advanced.cpp +++ b/examples/Pose2SLAMExample_advanced.cpp @@ -34,13 +34,13 @@ using namespace pose2SLAM; int main(int argc, char** argv) { /* 1. create graph container and add factors to it */ - shared_ptr graph(new Graph); + Graph graph; /* 2.a add prior */ // gaussian for prior SharedDiagonal prior_model = noiseModel::Diagonal::Sigmas(Vector_(3, 0.3, 0.3, 0.1)); Pose2 prior_measurement(0.0, 0.0, 0.0); // prior at origin - graph->addPrior(1, prior_measurement, prior_model); // add directly to graph + graph.addPrior(1, prior_measurement, prior_model); // add directly to graph /* 2.b add odometry */ // general noisemodel for odometry @@ -48,36 +48,38 @@ int main(int argc, char** argv) { /* Pose2 measurements take (x,y,theta), where theta is taken from the positive x-axis*/ Pose2 odom_measurement(2.0, 0.0, 0.0); // create a measurement for both factors (the same in this case) - graph->addOdometry(1, 2, odom_measurement, odom_model); - graph->addOdometry(2, 3, odom_measurement, odom_model); - graph->print("full graph"); + graph.addOdometry(1, 2, odom_measurement, odom_model); + graph.addOdometry(2, 3, odom_measurement, odom_model); + graph.print("full graph"); /* 3. Create the data structure to hold the initial estimate to the solution * initialize to noisy points */ - shared_ptr initial(new pose2SLAM::Values); - initial->insertPose(1, Pose2(0.5, 0.0, 0.2)); - initial->insertPose(2, Pose2(2.3, 0.1,-0.2)); - initial->insertPose(3, Pose2(4.1, 0.1, 0.1)); - initial->print("initial estimate"); + pose2SLAM::Values initial; + initial.insertPose(1, Pose2(0.5, 0.0, 0.2)); + initial.insertPose(2, Pose2(2.3, 0.1,-0.2)); + initial.insertPose(3, Pose2(4.1, 0.1, 0.1)); + initial.print("initial estimate"); /* 4.2.1 Alternatively, you can go through the process step by step * Choose an ordering */ - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*initial); + Ordering ordering = *graph.orderingCOLAMD(initial); /* 4.2.2 set up solver and optimize */ - NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-15, 1e-15); - Optimizer optimizer(graph, initial, ordering, params); - Optimizer optimizer_result = optimizer.levenbergMarquardt(); + LevenbergMarquardtParams params; + params.absoluteErrorTol = 1e-15; + params.relativeErrorTol = 1e-15; + params.ordering = ordering; + LevenbergMarquardtOptimizer optimizer(graph, initial, params); - pose2SLAM::Values result = *optimizer_result.values(); + pose2SLAM::Values result = optimizer.optimize(); result.print("final result"); - /* Get covariances */ - Matrix covariance1 = optimizer_result.marginalCovariance(PoseKey(1)); - Matrix covariance2 = optimizer_result.marginalCovariance(PoseKey(2)); - - print(covariance1, "Covariance1"); - print(covariance2, "Covariance2"); +// /* Get covariances */ +// Matrix covariance1 = optimizer_result.marginalCovariance(PoseKey(1)); +// Matrix covariance2 = optimizer_result.marginalCovariance(PoseKey(2)); +// +// print(covariance1, "Covariance1"); +// print(covariance2, "Covariance2"); return 0; } diff --git a/examples/Pose2SLAMExample_easy.cpp b/examples/Pose2SLAMExample_easy.cpp index 4a4b7aa4f..18a756aba 100644 --- a/examples/Pose2SLAMExample_easy.cpp +++ b/examples/Pose2SLAMExample_easy.cpp @@ -61,7 +61,7 @@ int main(int argc, char** argv) { /* 4 Single Step Optimization * optimize using Levenberg-Marquardt optimization with an ordering from colamd */ - pose2SLAM::Values result = *LevenbergMarquardtOptimizer(graph, initial).optimized(); + pose2SLAM::Values result = graph.optimize(initial); result.print("final result"); diff --git a/examples/SimpleRotation.cpp b/examples/SimpleRotation.cpp index f3722da79..603d345bf 100644 --- a/examples/SimpleRotation.cpp +++ b/examples/SimpleRotation.cpp @@ -105,7 +105,7 @@ int main() { * initial estimate. This will yield a new RotValues structure * with the final state of the optimization. */ - Values result = *LevenbergMarquardtOptimizer(graph, initial).optimized(); + Values result = LevenbergMarquardtOptimizer(graph, initial).optimize(); result.print("final result"); return 0; diff --git a/examples/vSLAMexample/vSFMexample.cpp b/examples/vSLAMexample/vSFMexample.cpp index 076430858..6edd06465 100644 --- a/examples/vSLAMexample/vSFMexample.cpp +++ b/examples/vSLAMexample/vSFMexample.cpp @@ -129,23 +129,23 @@ int main(int argc, char* argv[]) { readAllData(); // Create a graph using the 2D measurements (features) and the calibration data - boost::shared_ptr graph(new Graph(setupGraph(g_measurements, measurementSigma, g_calib))); + Graph graph(setupGraph(g_measurements, measurementSigma, g_calib)); // Create an initial Values structure using groundtruth values as the initial estimates - boost::shared_ptr initialEstimates(new Values(initialize(g_landmarks, g_poses))); + Values initialEstimates(initialize(g_landmarks, g_poses)); cout << "*******************************************************" << endl; - initialEstimates->print("INITIAL ESTIMATES: "); + initialEstimates.print("INITIAL ESTIMATES: "); // Add prior factor for all poses in the graph map::iterator poseit = g_poses.begin(); for (; poseit != g_poses.end(); poseit++) - graph->addPosePrior(poseit->first, poseit->second, noiseModel::Unit::Create(1)); + graph.addPosePrior(poseit->first, poseit->second, noiseModel::Unit::Create(1)); // Optimize the graph cout << "*******************************************************" << endl; LevenbergMarquardtParams params; params.lmVerbosity = LevenbergMarquardtParams::DAMPED; - visualSLAM::Values result = *LevenbergMarquardtOptimizer(graph, initialEstimates, params).optimized(); + visualSLAM::Values result = LevenbergMarquardtOptimizer(graph, initialEstimates, params).optimize(); // Print final results cout << "*******************************************************" << endl; diff --git a/gtsam/slam/planarSLAM.h b/gtsam/slam/planarSLAM.h index 321371d41..06994d8bd 100644 --- a/gtsam/slam/planarSLAM.h +++ b/gtsam/slam/planarSLAM.h @@ -110,7 +110,7 @@ namespace planarSLAM { /// Optimize Values optimize(const Values& initialEstimate) { - return LevenbergMarquardtOptimizer(*this).optimized(initialEstimate); + return LevenbergMarquardtOptimizer(*this, initialEstimate).optimize(); } }; From 4b541e1f62958198ec497bc223962cfef7e13fc9 Mon Sep 17 00:00:00 2001 From: Stephen Williams Date: Mon, 14 May 2012 21:07:56 +0000 Subject: [PATCH 46/57] Unit tests now compile with new NonlinearOptimizer --- gtsam/slam/tests/testGeneralSFMFactor.cpp | 29 +++-- .../testGeneralSFMFactor_Cal3Bundler.cpp | 25 +++-- gtsam/slam/tests/testPose2SLAM.cpp | 6 +- gtsam/slam/tests/testPose3SLAM.cpp | 6 +- gtsam/slam/tests/testStereoFactor.cpp | 13 +-- gtsam/slam/tests/testVSLAM.cpp | 33 +++--- tests/testBoundingConstraint.cpp | 4 +- tests/testNonlinearEquality.cpp | 16 +-- tests/testNonlinearOptimizer.cpp | 105 ++++++++---------- tests/testRot3Optimization.cpp | 2 +- 10 files changed, 116 insertions(+), 123 deletions(-) diff --git a/gtsam/slam/tests/testGeneralSFMFactor.cpp b/gtsam/slam/tests/testGeneralSFMFactor.cpp index 887f6b86c..88f4dc65e 100644 --- a/gtsam/slam/tests/testGeneralSFMFactor.cpp +++ b/gtsam/slam/tests/testGeneralSFMFactor.cpp @@ -175,8 +175,9 @@ TEST( GeneralSFMFactor, optimize_defaultK ) { // Create an ordering of the variables Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); - EXPECT(final->error < 0.5 * 1e-5 * nMeasurements); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * 1e-5 * nMeasurements); } /* ************************************************************************* */ @@ -217,8 +218,9 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) { const double reproj_error = 1e-5; Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); - EXPECT(final->error < 0.5 * reproj_error * nMeasurements); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -257,8 +259,9 @@ TEST( GeneralSFMFactor, optimize_varK_FixCameras ) { const double reproj_error = 1e-5 ; Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); - EXPECT(final->error < 0.5 * reproj_error * nMeasurements); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -313,8 +316,9 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) { const double reproj_error = 1e-5 ; Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); - EXPECT(final->error < 0.5 * reproj_error * nMeasurements); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -356,8 +360,9 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) { const double reproj_error = 1e-5 ; Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); - EXPECT(final->error < 0.5 * reproj_error * nMeasurements); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -380,7 +385,7 @@ TEST(GeneralSFMFactor, GeneralCameraPoseRange) { LevenbergMarquardtParams params; params.absoluteErrorTol = 1e-9; params.relativeErrorTol = 1e-9; - Values actual = LevenbergMarquardtOptimizer(graph, params).optimized(init); + Values actual = LevenbergMarquardtOptimizer(graph, init, params).optimize(); EXPECT(assert_equal(expected, actual, 1e-4)); } @@ -404,7 +409,7 @@ TEST(GeneralSFMFactor, CalibratedCameraPoseRange) { LevenbergMarquardtParams params; params.absoluteErrorTol = 1e-9; params.relativeErrorTol = 1e-9; - Values actual = LevenbergMarquardtOptimizer(graph, params).optimized(init); + Values actual = LevenbergMarquardtOptimizer(graph, init, params).optimize(); EXPECT(assert_equal(expected, actual, 1e-4)); } diff --git a/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp b/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp index ed7c35663..8a6c3b575 100644 --- a/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp +++ b/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp @@ -176,8 +176,9 @@ TEST( GeneralSFMFactor, optimize_defaultK ) { // Create an ordering of the variables Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); - EXPECT(final->error < 0.5 * 1e-5 * nMeasurements); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * 1e-5 * nMeasurements); } /* ************************************************************************* */ @@ -218,8 +219,9 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) { const double reproj_error = 1e-5; Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); - EXPECT(final->error < 0.5 * reproj_error * nMeasurements); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -258,8 +260,9 @@ TEST( GeneralSFMFactor, optimize_varK_FixCameras ) { const double reproj_error = 1e-5 ; Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); - EXPECT(final->error < 0.5 * reproj_error * nMeasurements); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -310,8 +313,9 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) { const double reproj_error = 1e-5 ; Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); - EXPECT(final->error < 0.5 * reproj_error * nMeasurements); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -353,8 +357,9 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) { const double reproj_error = 1e-5 ; Ordering ordering = *getOrdering(X,L); - NonlinearOptimizer::SharedState final = LevenbergMarquardtOptimizer(graph, ordering).optimize(values); - EXPECT(final->error < 0.5 * reproj_error * nMeasurements); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ diff --git a/gtsam/slam/tests/testPose2SLAM.cpp b/gtsam/slam/tests/testPose2SLAM.cpp index 5555b03b3..22c02ab52 100644 --- a/gtsam/slam/tests/testPose2SLAM.cpp +++ b/gtsam/slam/tests/testPose2SLAM.cpp @@ -164,7 +164,7 @@ TEST(Pose2Graph, optimize) { LevenbergMarquardtParams params; params.relativeErrorTol = 1e-15; params.ordering = ordering; - Values actual = LevenbergMarquardtOptimizer(fg, params).optimized(initial); + Values actual = LevenbergMarquardtOptimizer(fg, initial, params).optimize(); // Check with expected config Values expected; @@ -203,7 +203,7 @@ TEST(Pose2Graph, optimizeThreePoses) { LevenbergMarquardtParams params; params.relativeErrorTol = 1e-15; params.ordering = ordering; - Values actual = LevenbergMarquardtOptimizer(fg, params).optimized(initial); + Values actual = LevenbergMarquardtOptimizer(fg, initial, params).optimize(); // Check with ground truth CHECK(assert_equal((const Values&)hexagon, actual)); @@ -245,7 +245,7 @@ TEST_UNSAFE(Pose2SLAM, optimizeCircle) { LevenbergMarquardtParams params; params.relativeErrorTol = 1e-15; params.ordering = ordering; - Values actual = LevenbergMarquardtOptimizer(fg, params).optimized(initial); + Values actual = LevenbergMarquardtOptimizer(fg, initial, params).optimize(); // Check with ground truth CHECK(assert_equal((const Values&)hexagon, actual)); diff --git a/gtsam/slam/tests/testPose3SLAM.cpp b/gtsam/slam/tests/testPose3SLAM.cpp index dac562267..688dcc7b9 100644 --- a/gtsam/slam/tests/testPose3SLAM.cpp +++ b/gtsam/slam/tests/testPose3SLAM.cpp @@ -78,7 +78,7 @@ TEST(Pose3Graph, optimizeCircle) { Ordering ordering; ordering += kx0,kx1,kx2,kx3,kx4,kx5; - Values actual = LevenbergMarquardtOptimizer(fg, ordering).optimized(initial); + Values actual = LevenbergMarquardtOptimizer(fg, initial, ordering).optimize(); // Check with ground truth CHECK(assert_equal(hexagon, actual,1e-4)); @@ -113,7 +113,7 @@ TEST(Pose3Graph, partial_prior_height) { // linearization EXPECT_DOUBLES_EQUAL(2.0, height.error(values), tol); - Values actual = LevenbergMarquardtOptimizer(graph).optimized(values); + Values actual = LevenbergMarquardtOptimizer(graph, values).optimize(); EXPECT(assert_equal(expected, actual.at(key), tol)); EXPECT_DOUBLES_EQUAL(0.0, graph.error(actual), tol); } @@ -165,7 +165,7 @@ TEST(Pose3Graph, partial_prior_xy) { Values values; values.insert(key, init); - Values actual = LevenbergMarquardtOptimizer(graph).optimized(values); + Values actual = LevenbergMarquardtOptimizer(graph, values).optimize(); EXPECT(assert_equal(expected, actual.at(key), tol)); EXPECT_DOUBLES_EQUAL(0.0, graph.error(actual), tol); } diff --git a/gtsam/slam/tests/testStereoFactor.cpp b/gtsam/slam/tests/testStereoFactor.cpp index bfc93fbaa..25cd6e16f 100644 --- a/gtsam/slam/tests/testStereoFactor.cpp +++ b/gtsam/slam/tests/testStereoFactor.cpp @@ -65,20 +65,19 @@ TEST( StereoFactor, singlePoint) Point3 l1(0, 0, 0); values.insert(PointKey(1), l1); // add point at origin; - GaussNewtonOptimizer optimizer(graph); - NonlinearOptimizer::SharedState initial = optimizer.initialState(values); + GaussNewtonOptimizer optimizer(graph, values); // We expect the initial to be zero because config is the ground truth - DOUBLES_EQUAL(0.0, initial->error, 1e-9); + DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // Iterate once, and the config should not have changed - NonlinearOptimizer::SharedState afterOneIteration = optimizer.iterate(initial); - DOUBLES_EQUAL(0.0, afterOneIteration->error, 1e-9); + optimizer.iterate(); + DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // Complete solution - NonlinearOptimizer::SharedState final = optimizer.optimize(initial); + optimizer.optimize(); - DOUBLES_EQUAL(0.0, final->error, 1e-6); + DOUBLES_EQUAL(0.0, optimizer.error(), 1e-6); } /* ************************************************************************* */ diff --git a/gtsam/slam/tests/testVSLAM.cpp b/gtsam/slam/tests/testVSLAM.cpp index 0c42772de..bb7623954 100644 --- a/gtsam/slam/tests/testVSLAM.cpp +++ b/gtsam/slam/tests/testVSLAM.cpp @@ -102,17 +102,16 @@ TEST( Graph, optimizeLM) // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - const NonlinearOptimizer& optimizer = LevenbergMarquardtOptimizer(graph, ordering); - NonlinearOptimizer::SharedState initial = optimizer.initialState(initialEstimate); - DOUBLES_EQUAL(0.0, initial->error, 1e-9); + LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, ordering); + DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // Iterate once, and the config should not have changed because we started // with the ground truth - NonlinearOptimizer::SharedState afterOneIteration = optimizer.iterate(initial); - DOUBLES_EQUAL(0.0, afterOneIteration->error, 1e-9); + optimizer.iterate(); + DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // check if correct - CHECK(assert_equal(initialEstimate, afterOneIteration->values)); + CHECK(assert_equal(initialEstimate, optimizer.values())); } @@ -140,17 +139,16 @@ TEST( Graph, optimizeLM2) // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - LevenbergMarquardtOptimizer optimizer(graph, ordering); - NonlinearOptimizer::SharedState initial = optimizer.initialState(initialEstimate); - DOUBLES_EQUAL(0.0, initial->error, 1e-9); + LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, ordering); + DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // Iterate once, and the config should not have changed because we started // with the ground truth - NonlinearOptimizer::SharedState afterOneIteration = optimizer.iterate(initial); - DOUBLES_EQUAL(0.0, afterOneIteration->error, 1e-9); + optimizer.iterate(); + DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // check if correct - CHECK(assert_equal(initialEstimate, afterOneIteration->values)); + CHECK(assert_equal(initialEstimate, optimizer.values())); } @@ -174,17 +172,16 @@ TEST( Graph, CHECK_ORDERING) // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - LevenbergMarquardtOptimizer optimizer(graph); - NonlinearOptimizer::SharedState initial = optimizer.initialState(initialEstimate); - DOUBLES_EQUAL(0.0, initial->error, 1e-9); + LevenbergMarquardtOptimizer optimizer(graph, initialEstimate); + DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // Iterate once, and the config should not have changed because we started // with the ground truth - NonlinearOptimizer::SharedState afterOneIteration = optimizer.iterate(initial); - DOUBLES_EQUAL(0.0, afterOneIteration->error, 1e-9); + optimizer.iterate(); + DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // check if correct - CHECK(assert_equal(initialEstimate, afterOneIteration->values)); + CHECK(assert_equal(initialEstimate, optimizer.values())); } /* ************************************************************************* */ diff --git a/tests/testBoundingConstraint.cpp b/tests/testBoundingConstraint.cpp index b9230705a..e1b685fc8 100644 --- a/tests/testBoundingConstraint.cpp +++ b/tests/testBoundingConstraint.cpp @@ -154,7 +154,7 @@ TEST( testBoundingConstraint, unary_simple_optimization1) { Values initValues; initValues.insert(x1, start_pt); - Values actual = LevenbergMarquardtOptimizer(graph).optimized(initValues); + Values actual = LevenbergMarquardtOptimizer(graph, initValues).optimize(); Values expected; expected.insert(x1, goal_pt); CHECK(assert_equal(expected, actual, tol)); @@ -176,7 +176,7 @@ TEST( testBoundingConstraint, unary_simple_optimization2) { Values initValues; initValues.insert(x1, start_pt); - Values actual = LevenbergMarquardtOptimizer(graph).optimized(initValues); + Values actual = LevenbergMarquardtOptimizer(graph, initValues).optimize(); Values expected; expected.insert(x1, goal_pt); CHECK(assert_equal(expected, actual, tol)); diff --git a/tests/testNonlinearEquality.cpp b/tests/testNonlinearEquality.cpp index ce86e71bc..94ab58831 100644 --- a/tests/testNonlinearEquality.cpp +++ b/tests/testNonlinearEquality.cpp @@ -196,12 +196,12 @@ TEST ( NonlinearEquality, allow_error_optimize ) { // optimize Ordering ordering; ordering.push_back(key1); - NonlinearOptimizer::auto_ptr result = LevenbergMarquardtOptimizer(graph, init, ordering).optimize(); + Values result = LevenbergMarquardtOptimizer(graph, init, ordering).optimize(); // verify Values expected; expected.insert(key1, feasible1); - EXPECT(assert_equal(expected, *result->values())); + EXPECT(assert_equal(expected, result)); } /* ************************************************************************* */ @@ -230,7 +230,7 @@ TEST ( NonlinearEquality, allow_error_optimize_with_factors ) { // optimize Ordering ordering; ordering.push_back(key1); - Values actual = *LevenbergMarquardtOptimizer(graph, init, ordering).optimized(); + Values actual = LevenbergMarquardtOptimizer(graph, init, ordering).optimize(); // verify Values expected; @@ -317,7 +317,7 @@ TEST( testNonlinearEqualityConstraint, unary_simple_optimization ) { EXPECT(constraint->active(expected)); EXPECT_DOUBLES_EQUAL(0.0, constraint->error(expected), tol); - Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimized(); + Values actual = LevenbergMarquardtOptimizer(graph, initValues).optimize(); EXPECT(assert_equal(expected, actual, tol)); } @@ -408,7 +408,7 @@ TEST( testNonlinearEqualityConstraint, odo_simple_optimize ) { initValues.insert(key1, Point2()); initValues.insert(key2, badPt); - Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimized(); + Values actual = LevenbergMarquardtOptimizer(graph, initValues).optimize(); Values expected; expected.insert(key1, truth_pt1); expected.insert(key2, truth_pt2); @@ -447,7 +447,7 @@ TEST (testNonlinearEqualityConstraint, two_pose ) { initialEstimate.insert(l1, Point2(1.0, 6.0)); // ground truth initialEstimate.insert(l2, Point2(-4.0, 0.0)); // starting with a separate reference frame - Values actual = *LevenbergMarquardtOptimizer(graph, initialEstimate).optimized(); + Values actual = LevenbergMarquardtOptimizer(graph, initialEstimate).optimize(); Values expected; expected.insert(x1, pt_x1); @@ -491,7 +491,7 @@ TEST (testNonlinearEqualityConstraint, map_warp ) { initialEstimate.insert(x2, Point2( 0.0, 0.0)); // other pose starts at origin // optimize - Values actual = *LevenbergMarquardtOptimizer(graph, initialEstimate).optimized(); + Values actual = LevenbergMarquardtOptimizer(graph, initialEstimate).optimize(); Values expected; expected.insert(x1, Point2(1.0, 1.0)); @@ -557,7 +557,7 @@ TEST (testNonlinearEqualityConstraint, stereo_constrained ) { initValues.insert(l2, landmark2); // optimize - Values actual = *LevenbergMarquardtOptimizer(graph, initValues).optimized(); + Values actual = LevenbergMarquardtOptimizer(graph, initValues).optimize(); // create config Values truthValues; diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index 4e35c0f77..e1088683e 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -46,105 +46,91 @@ Key kl(size_t i) { return Symbol('l',i); } TEST( NonlinearOptimizer, iterateLM ) { // really non-linear factor graph - shared_ptr fg(new example::Graph( - example::createReallyNonlinearFactorGraph())); + example::Graph fg(example::createReallyNonlinearFactorGraph()); // config far from minimum Point2 x0(3,0); - boost::shared_ptr config(new Values); - config->insert(simulated2D::PoseKey(1), x0); - - // ordering - shared_ptr ord(new Ordering()); - ord->push_back(kx(1)); - - // create initial optimization state, with lambda=0 - NonlinearOptimizer::auto_ptr optimizer = LevenbergMarquardtOptimizer(fg, config, LevenbergMarquardtParams(), ord).update(0.0); + Values config; + config.insert(simulated2D::PoseKey(1), x0); // normal iterate - NonlinearOptimizer::auto_ptr iterated1 = GaussNewtonOptimizer(fg, config, GaussNewtonParams(), ord).iterate(); + GaussNewtonOptimizer gnOptimizer(fg, config); + gnOptimizer.iterate(); // LM iterate with lambda 0 should be the same - NonlinearOptimizer::auto_ptr iterated2 = LevenbergMarquardtOptimizer(fg, config, LevenbergMarquardtParams(), ord).update(0.0)->iterate(); + LevenbergMarquardtOptimizer lmOptimizer(fg, config); + lmOptimizer.iterate(); - CHECK(assert_equal(*iterated1->values(), *iterated2->values(), 1e-9)); + CHECK(assert_equal(gnOptimizer.values(), lmOptimizer.values(), 1e-9)); } /* ************************************************************************* */ TEST( NonlinearOptimizer, optimize ) { - shared_ptr fg(new example::Graph( - example::createReallyNonlinearFactorGraph())); + example::Graph fg(example::createReallyNonlinearFactorGraph()); // test error at minimum Point2 xstar(0,0); Values cstar; cstar.insert(simulated2D::PoseKey(1), xstar); - DOUBLES_EQUAL(0.0,fg->error(cstar),0.0); + DOUBLES_EQUAL(0.0,fg.error(cstar),0.0); // test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 = Point2 x0(3,3); - boost::shared_ptr c0(new Values); - c0->insert(simulated2D::PoseKey(1), x0); - DOUBLES_EQUAL(199.0,fg->error(*c0),1e-3); - - // optimize parameters - shared_ptr ord(new Ordering()); - ord->push_back(kx(1)); + Values c0; + c0.insert(simulated2D::PoseKey(1), x0); + DOUBLES_EQUAL(199.0,fg.error(c0),1e-3); // Gauss-Newton - NonlinearOptimizer::auto_ptr actual1 = GaussNewtonOptimizer(fg, c0, GaussNewtonParams(), ord).optimize(); - DOUBLES_EQUAL(0,fg->error(*(actual1->values())),tol); + Values actual1 = GaussNewtonOptimizer(fg, c0).optimize(); + DOUBLES_EQUAL(0,fg.error(actual1),tol); // Levenberg-Marquardt - NonlinearOptimizer::auto_ptr actual2 = LevenbergMarquardtOptimizer(fg, c0, LevenbergMarquardtParams(), ord).optimize(); - DOUBLES_EQUAL(0,fg->error(*(actual2->values())),tol); + Values actual2 = LevenbergMarquardtOptimizer(fg, c0).optimize(); + DOUBLES_EQUAL(0,fg.error(actual2),tol); // Dogleg - NonlinearOptimizer::auto_ptr actual3 = DoglegOptimizer(fg, c0, DoglegParams(), ord).optimize(); - DOUBLES_EQUAL(0,fg->error(*(actual2->values())),tol); + Values actual3 = DoglegOptimizer(fg, c0).optimize(); + DOUBLES_EQUAL(0,fg.error(actual3),tol); } /* ************************************************************************* */ TEST( NonlinearOptimizer, SimpleLMOptimizer ) { - shared_ptr fg(new example::Graph( - example::createReallyNonlinearFactorGraph())); + example::Graph fg(example::createReallyNonlinearFactorGraph()); Point2 x0(3,3); - boost::shared_ptr c0(new Values); - c0->insert(simulated2D::PoseKey(1), x0); + Values c0; + c0.insert(simulated2D::PoseKey(1), x0); - Values::const_shared_ptr actual = LevenbergMarquardtOptimizer(fg, c0).optimized(); - DOUBLES_EQUAL(0,fg->error(*actual),tol); + Values actual = LevenbergMarquardtOptimizer(fg, c0).optimize(); + DOUBLES_EQUAL(0,fg.error(actual),tol); } /* ************************************************************************* */ TEST( NonlinearOptimizer, SimpleGNOptimizer ) { - shared_ptr fg(new example::Graph( - example::createReallyNonlinearFactorGraph())); + example::Graph fg(example::createReallyNonlinearFactorGraph()); - Point2 x0(3,3); - boost::shared_ptr c0(new Values); - c0->insert(simulated2D::PoseKey(1), x0); + Point2 x0(3,3); + Values c0; + c0.insert(simulated2D::PoseKey(1), x0); - Values::const_shared_ptr actual = GaussNewtonOptimizer(fg, c0).optimized(); - DOUBLES_EQUAL(0,fg->error(*actual),tol); + Values actual = GaussNewtonOptimizer(fg, c0).optimize(); + DOUBLES_EQUAL(0,fg.error(actual),tol); } /* ************************************************************************* */ TEST( NonlinearOptimizer, SimpleDLOptimizer ) { - shared_ptr fg(new example::Graph( - example::createReallyNonlinearFactorGraph())); + example::Graph fg(example::createReallyNonlinearFactorGraph()); Point2 x0(3,3); - boost::shared_ptr c0(new Values); - c0->insert(simulated2D::PoseKey(1), x0); + Values c0; + c0.insert(simulated2D::PoseKey(1), x0); - Values::const_shared_ptr actual = DoglegOptimizer(fg, c0).optimized(); - DOUBLES_EQUAL(0,fg->error(*actual),tol); + Values actual = DoglegOptimizer(fg, c0).optimize(); + DOUBLES_EQUAL(0,fg.error(actual),tol); } /* ************************************************************************* */ @@ -161,10 +147,10 @@ TEST( NonlinearOptimizer, optimization_method ) Values c0; c0.insert(simulated2D::PoseKey(1), x0); - Values actualMFQR = *LevenbergMarquardtOptimizer(fg, c0, paramsQR).optimized(); + Values actualMFQR = LevenbergMarquardtOptimizer(fg, c0, paramsQR).optimize(); DOUBLES_EQUAL(0,fg.error(actualMFQR),tol); - Values actualMFLDL = *LevenbergMarquardtOptimizer(fg, c0, paramsLDL).optimized(); + Values actualMFLDL = LevenbergMarquardtOptimizer(fg, c0, paramsLDL).optimize(); DOUBLES_EQUAL(0,fg.error(actualMFLDL),tol); } @@ -183,12 +169,13 @@ TEST( NonlinearOptimizer, Factorization ) ordering.push_back(pose2SLAM::PoseKey(1)); ordering.push_back(pose2SLAM::PoseKey(2)); - NonlinearOptimizer::auto_ptr optimized = LevenbergMarquardtOptimizer(graph, config, ordering).iterate(); + LevenbergMarquardtOptimizer optimizer(graph, config, ordering); + optimizer.iterate(); Values expected; expected.insert(pose2SLAM::PoseKey(1), Pose2(0.,0.,0.)); expected.insert(pose2SLAM::PoseKey(2), Pose2(1.,0.,0.)); - CHECK(assert_equal(expected, *optimized->values(), 1e-5)); + CHECK(assert_equal(expected, optimizer.values(), 1e-5)); } /* ************************************************************************* */ @@ -216,16 +203,16 @@ TEST(NonlinearOptimizer, NullFactor) { ord.push_back(kx(1)); // Gauss-Newton - NonlinearOptimizer::auto_ptr actual1 = GaussNewtonOptimizer(fg, c0, ord).optimize(); - DOUBLES_EQUAL(0,fg.error(*actual1->values()),tol); + Values actual1 = GaussNewtonOptimizer(fg, c0, ord).optimize(); + DOUBLES_EQUAL(0,fg.error(actual1),tol); // Levenberg-Marquardt - NonlinearOptimizer::auto_ptr actual2 = LevenbergMarquardtOptimizer(fg, c0, ord).optimize(); - DOUBLES_EQUAL(0,fg.error(*actual2->values()),tol); + Values actual2 = LevenbergMarquardtOptimizer(fg, c0, ord).optimize(); + DOUBLES_EQUAL(0,fg.error(actual2),tol); // Dogleg - NonlinearOptimizer::auto_ptr actual3 = DoglegOptimizer(fg, c0, ord).optimize(); - DOUBLES_EQUAL(0,fg.error(*actual2->values()),tol); + Values actual3 = DoglegOptimizer(fg, c0, ord).optimize(); + DOUBLES_EQUAL(0,fg.error(actual3),tol); } /* ************************************************************************* */ diff --git a/tests/testRot3Optimization.cpp b/tests/testRot3Optimization.cpp index 1c90f4f48..df2471933 100644 --- a/tests/testRot3Optimization.cpp +++ b/tests/testRot3Optimization.cpp @@ -47,7 +47,7 @@ TEST(Rot3, optimize) { fg.add(Between(Symbol('r',j), Symbol('r',(j+1)%6), Rot3::Rz(M_PI/3.0), sharedSigma(3, 0.01))); } - Values final = *GaussNewtonOptimizer(fg, initial).optimized(); + Values final = GaussNewtonOptimizer(fg, initial).optimize(); EXPECT(assert_equal(truth, final, 1e-5)); } From 6bd94352cacc79c320a6c213e6d08818d0273455 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Mon, 14 May 2012 21:33:02 +0000 Subject: [PATCH 47/57] Removed inactive code --- gtsam/nonlinear/Values-inl.h | 46 ------------------------------------ 1 file changed, 46 deletions(-) diff --git a/gtsam/nonlinear/Values-inl.h b/gtsam/nonlinear/Values-inl.h index a31e100ef..40e95188e 100644 --- a/gtsam/nonlinear/Values-inl.h +++ b/gtsam/nonlinear/Values-inl.h @@ -40,31 +40,6 @@ namespace gtsam { ValueCloneAllocator() {} }; -#if 0 - /* ************************************************************************* */ - class ValueAutomaticCasting { - Key key_; - const Value& value_; - - public: - ValueAutomaticCasting(Key key, const Value& value) : key_(key), value_(value) {} - - template - class ConvertibleToValue : public ValueType { - }; - - template - operator const ConvertibleToValue& () const { - // Check the type and throw exception if incorrect - if(typeid(ValueType) != typeid(value_)) - throw ValuesIncorrectType(key_, typeid(ValueType), typeid(value_)); - - // We have already checked the type, so do a "blind" static_cast, not dynamic_cast - return static_cast&>(value_); - } - }; -#endif - /* ************************************************************************* */ template const ValueType& Values::at(Key j) const { @@ -83,27 +58,6 @@ namespace gtsam { return static_cast(*item->second); } -#if 0 - /* ************************************************************************* */ - inline ValueAutomaticCasting Values::at(Key j) const { - // Find the item - KeyValueMap::const_iterator item = values_.find(j); - - // Throw exception if it does not exist - if(item == values_.end()) - throw ValuesKeyDoesNotExist("retrieve", j); - - return ValueAutomaticCasting(item->first, *item->second); - } -#endif - -#if 0 - /* ************************************************************************* */ - inline ValueAutomaticCasting Values::operator[](Key j) const { - return at(j); - } -#endif - /* ************************************************************************* */ template boost::optional Values::exists(Key j) const { From 51d38f4b5df774d1c83dce2ebcb003955e0531cb Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Mon, 14 May 2012 21:33:03 +0000 Subject: [PATCH 48/57] Added Marginals unit test and class --- gtsam/nonlinear/Marginals.cpp | 67 +++++++++++++++++ gtsam/nonlinear/Marginals.h | 52 +++++++++++++ tests/testMarginals.cpp | 136 ++++++++++++++++++++++++++++++++++ 3 files changed, 255 insertions(+) create mode 100644 gtsam/nonlinear/Marginals.cpp create mode 100644 gtsam/nonlinear/Marginals.h create mode 100644 tests/testMarginals.cpp diff --git a/gtsam/nonlinear/Marginals.cpp b/gtsam/nonlinear/Marginals.cpp new file mode 100644 index 000000000..06f9fd6cf --- /dev/null +++ b/gtsam/nonlinear/Marginals.cpp @@ -0,0 +1,67 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file Marginals.cpp + * @brief + * @author Richard Roberts + * @date May 14, 2012 + */ + +#include +#include +#include + +namespace gtsam { + +Marginals::Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization) { + + // Compute COLAMD ordering + ordering_ = *graph.orderingCOLAMD(solution); + + // Linearize graph + graph_ = *graph.linearize(solution, ordering_); + + // Compute BayesTree + factorization_ = factorization; + if(factorization_ == CHOLESKY) + bayesTree_ = *GaussianMultifrontalSolver(graph_, false).eliminate(); + else if(factorization_ == QR) + bayesTree_ = *GaussianMultifrontalSolver(graph_, true).eliminate(); +} + +Matrix Marginals::marginalCovariance(Key variable) const { + // Get linear key + Index index = ordering_[variable]; + + // Compute marginal + GaussianFactor::shared_ptr marginalFactor; + if(factorization_ == CHOLESKY) + marginalFactor = bayesTree_.marginalFactor(index, EliminatePreferCholesky); + else if(factorization_ == QR) + marginalFactor = bayesTree_.marginalFactor(index, EliminateQR); + + // Get information matrix (only store upper-right triangle) + Matrix info; + if(typeid(*marginalFactor) == typeid(JacobianFactor)) { + JacobianFactor::constABlock A = static_cast(*marginalFactor).getA(); + info = A.transpose() * A; // Compute A'A + } else if(typeid(*marginalFactor) == typeid(HessianFactor)) { + const HessianFactor& hessian = static_cast(*marginalFactor); + const size_t dim = hessian.getDim(hessian.begin()); + info = hessian.info().topLeftCorner(dim,dim).selfadjointView(); // Take the non-augmented part of the information matrix + } + + // Compute covariance + return info.inverse(); +} + +} /* namespace gtsam */ diff --git a/gtsam/nonlinear/Marginals.h b/gtsam/nonlinear/Marginals.h new file mode 100644 index 000000000..48acc007e --- /dev/null +++ b/gtsam/nonlinear/Marginals.h @@ -0,0 +1,52 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file Marginals.h + * @brief A class for computing marginals in a NonlinearFactorGraph + * @author Richard Roberts + * @date May 14, 2012 + */ + +#pragma once + +#include +#include +#include + +namespace gtsam { + +/** + * A class for computing marginals in a NonlinearFactorGraph + */ +class Marginals { + +public: + + enum Factorization { + CHOLESKY, + QR + }; + + Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization = CHOLESKY); + + Matrix marginalCovariance(Key variable) const; + +protected: + + GaussianFactorGraph graph_; + Ordering ordering_; + Factorization factorization_; + GaussianBayesTree bayesTree_; + +}; + +} /* namespace gtsam */ diff --git a/tests/testMarginals.cpp b/tests/testMarginals.cpp new file mode 100644 index 000000000..50b0d3793 --- /dev/null +++ b/tests/testMarginals.cpp @@ -0,0 +1,136 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testMarginals.cpp + * @brief + * @author Richard Roberts + * @date May 14, 2012 + */ + +#include + +// for all nonlinear keys +#include + +// for points and poses +#include +#include + +// for modeling measurement uncertainty - all models included here +#include + +// add in headers for specific factors +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; + +TEST(Marginals, planarSLAMmarginals) { + + // Taken from PlanarSLAMSelfContained_advanced + + // create keys for variables + Symbol x1('x',1), x2('x',2), x3('x',3); + Symbol l1('l',1), l2('l',2); + + // create graph container and add factors to it + NonlinearFactorGraph graph; + + /* add prior */ + // gaussian for prior + SharedDiagonal prior_model = noiseModel::Diagonal::Sigmas(Vector_(3, 0.3, 0.3, 0.1)); + Pose2 prior_measurement(0.0, 0.0, 0.0); // prior at origin + graph.add(PriorFactor(x1, prior_measurement, prior_model)); // add the factor to the graph + + /* add odometry */ + // general noisemodel for odometry + SharedDiagonal odom_model = noiseModel::Diagonal::Sigmas(Vector_(3, 0.2, 0.2, 0.1)); + Pose2 odom_measurement(2.0, 0.0, 0.0); // create a measurement for both factors (the same in this case) + // create between factors to represent odometry + graph.add(BetweenFactor(x1, x2, odom_measurement, odom_model)); + graph.add(BetweenFactor(x2, x3, odom_measurement, odom_model)); + + /* add measurements */ + // general noisemodel for measurements + SharedDiagonal meas_model = noiseModel::Diagonal::Sigmas(Vector_(2, 0.1, 0.2)); + + // create the measurement values - indices are (pose id, landmark id) + Rot2 bearing11 = Rot2::fromDegrees(45), + bearing21 = Rot2::fromDegrees(90), + bearing32 = Rot2::fromDegrees(90); + double range11 = sqrt(4+4), + range21 = 2.0, + range32 = 2.0; + + // create bearing/range factors + graph.add(BearingRangeFactor(x1, l1, bearing11, range11, meas_model)); + graph.add(BearingRangeFactor(x2, l1, bearing21, range21, meas_model)); + graph.add(BearingRangeFactor(x3, l2, bearing32, range32, meas_model)); + + // linearization point for marginals + Values soln; + soln.insert(x1, Pose2(0.0, 0.0, 0.0)); + soln.insert(x2, Pose2(2.0, 0.0, 0.0)); + soln.insert(x3, Pose2(4.0, 0.0, 0.0)); + soln.insert(l1, Point2(2.0, 2.0)); + soln.insert(l2, Point2(4.0, 2.0)); + + Matrix expectedx1(3,3); + expectedx1 << + 0.09, -7.1942452e-18, -1.27897692e-17, + -7.1942452e-18, 0.09, 1.27897692e-17, + -1.27897692e-17, 1.27897692e-17, 0.01; + Matrix expectedx2(3,3); + expectedx2 << + 0.120967742, -0.00129032258, 0.00451612903, + -0.00129032258, 0.158387097, 0.0206451613, + 0.00451612903, 0.0206451613, 0.0177419355; + Matrix expectedx3(3,3); + expectedx3 << + 0.160967742, 0.00774193548, 0.00451612903, + 0.00774193548, 0.351935484, 0.0561290323, + 0.00451612903, 0.0561290323, 0.0277419355; + Matrix expectedl1(2,2); + expectedl1 << + 0.168709677, -0.0477419355, + -0.0477419355, 0.163548387; + Matrix expectedl2(2,2); + expectedl2 << + 0.293870968, -0.104516129, + -0.104516129, 0.391935484; + + // Check marginals covariances for all variables (QR mode) + Marginals marginals(graph, soln, Marginals::QR); + EXPECT(assert_equal(expectedx1, marginals.marginalCovariance(x1), 1e-8)); + EXPECT(assert_equal(expectedx2, marginals.marginalCovariance(x2), 1e-8)); + EXPECT(assert_equal(expectedx3, marginals.marginalCovariance(x3), 1e-8)); + EXPECT(assert_equal(expectedl1, marginals.marginalCovariance(l1), 1e-8)); + EXPECT(assert_equal(expectedl2, marginals.marginalCovariance(l2), 1e-8)); + + // Check marginals covariances for all variables (Cholesky mode) + marginals = Marginals(graph, soln, Marginals::CHOLESKY); + EXPECT(assert_equal(expectedx1, marginals.marginalCovariance(x1), 1e-8)); + EXPECT(assert_equal(expectedx2, marginals.marginalCovariance(x2), 1e-8)); + EXPECT(assert_equal(expectedx3, marginals.marginalCovariance(x3), 1e-8)); + EXPECT(assert_equal(expectedl1, marginals.marginalCovariance(l1), 1e-8)); + EXPECT(assert_equal(expectedl2, marginals.marginalCovariance(l2), 1e-8)); +} + + + +/* ************************************************************************* */ +int main() { TestResult tr; return TestRegistry::runAllTests(tr);} +/* ************************************************************************* */ From 93d1defc0771fda54a134ae4dce109e63077838b Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Mon, 14 May 2012 21:33:05 +0000 Subject: [PATCH 49/57] Updated Marginals doxygen --- gtsam/nonlinear/Marginals.cpp | 14 ++++++++------ gtsam/nonlinear/Marginals.h | 14 +++++++++++++- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/gtsam/nonlinear/Marginals.cpp b/gtsam/nonlinear/Marginals.cpp index 06f9fd6cf..070fa89c9 100644 --- a/gtsam/nonlinear/Marginals.cpp +++ b/gtsam/nonlinear/Marginals.cpp @@ -39,6 +39,10 @@ Marginals::Marginals(const NonlinearFactorGraph& graph, const Values& solution, } Matrix Marginals::marginalCovariance(Key variable) const { + return marginalInformation(variable).inverse(); +} + +Matrix Marginals::marginalInformation(Key variable) const { // Get linear key Index index = ordering_[variable]; @@ -50,18 +54,16 @@ Matrix Marginals::marginalCovariance(Key variable) const { marginalFactor = bayesTree_.marginalFactor(index, EliminateQR); // Get information matrix (only store upper-right triangle) - Matrix info; if(typeid(*marginalFactor) == typeid(JacobianFactor)) { JacobianFactor::constABlock A = static_cast(*marginalFactor).getA(); - info = A.transpose() * A; // Compute A'A + return A.transpose() * A; // Compute A'A } else if(typeid(*marginalFactor) == typeid(HessianFactor)) { const HessianFactor& hessian = static_cast(*marginalFactor); const size_t dim = hessian.getDim(hessian.begin()); - info = hessian.info().topLeftCorner(dim,dim).selfadjointView(); // Take the non-augmented part of the information matrix + return hessian.info().topLeftCorner(dim,dim).selfadjointView(); // Take the non-augmented part of the information matrix + } else { + throw runtime_error("Internal error: Marginals::marginalInformation expected either a JacobianFactor or HessianFactor"); } - - // Compute covariance - return info.inverse(); } } /* namespace gtsam */ diff --git a/gtsam/nonlinear/Marginals.h b/gtsam/nonlinear/Marginals.h index 48acc007e..dde199ca0 100644 --- a/gtsam/nonlinear/Marginals.h +++ b/gtsam/nonlinear/Marginals.h @@ -25,21 +25,33 @@ namespace gtsam { /** - * A class for computing marginals in a NonlinearFactorGraph + * A class for computing Gaussian marginals of variables in a NonlinearFactorGraph */ class Marginals { public: + /** The linear factorization mode - either CHOLESKY (faster and suitable for most problems) or QR (slower but more numerically stable for poorly-conditioned problems). */ enum Factorization { CHOLESKY, QR }; + /** Construct a marginals class. + * @param graph The factor graph defining the full joint density on all variables. + * @param solution The linearization point about which to compute Gaussian marginals (usually the MLE as obtained from a NonlinearOptimizer). + * @param factorization The linear decomposition mode - either Marginals::CHOLESKY (faster and suitable for most problems) or Marginals::QR (slower but more numerically stable for poorly-conditioned problems). + */ Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization = CHOLESKY); + /** Compute the marginal covariance of a single variable */ Matrix marginalCovariance(Key variable) const; + /** Compute the marginal information matrix of a single variable. You can + * use LLt(const Matrix&) or RtR(const Matrix&) to obtain the square-root information + * matrix. */ + Matrix marginalInformation(Key variable) const; + protected: GaussianFactorGraph graph_; From 754e289737ad398a1b71084e1eecdf92c0b95bb2 Mon Sep 17 00:00:00 2001 From: Stephen Williams Date: Mon, 14 May 2012 22:31:42 +0000 Subject: [PATCH 50/57] Fixed errors in unit tests from updated NonlinearOptimizers --- gtsam/nonlinear/DoglegOptimizer.h | 4 ++-- gtsam/nonlinear/GaussNewtonOptimizer.h | 2 +- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 4 ++-- tests/testNonlinearOptimizer.cpp | 23 +++++++++++++++---- 4 files changed, 23 insertions(+), 10 deletions(-) diff --git a/gtsam/nonlinear/DoglegOptimizer.h b/gtsam/nonlinear/DoglegOptimizer.h index 44813befa..69d0503d7 100644 --- a/gtsam/nonlinear/DoglegOptimizer.h +++ b/gtsam/nonlinear/DoglegOptimizer.h @@ -65,7 +65,7 @@ public: virtual ~DoglegState() {} protected: - DoglegState(const NonlinearFactorGraph& graph, const Values& values, const DoglegParams& params, unsigned int interations = 0) : + DoglegState(const NonlinearFactorGraph& graph, const Values& values, const DoglegParams& params, unsigned int iterations = 0) : NonlinearOptimizerState(graph, values, iterations), Delta(params.deltaInitial) {} friend class DoglegOptimizer; @@ -105,7 +105,7 @@ public: */ DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : NonlinearOptimizer(graph) { - *params_.ordering = ordering; + params_.ordering = ordering; state_ = DoglegState(graph, initialValues, params_); } /// @} diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.h b/gtsam/nonlinear/GaussNewtonOptimizer.h index f7c57c3e9..39b64b667 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.h +++ b/gtsam/nonlinear/GaussNewtonOptimizer.h @@ -70,7 +70,7 @@ public: */ GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : NonlinearOptimizer(graph), state_(graph, initialValues) { - *params_.ordering = ordering; } + params_.ordering = ordering; } /// @} diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index f2e2e4fc7..a03140166 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -74,7 +74,7 @@ public: protected: LevenbergMarquardtState(const NonlinearFactorGraph& graph, const Values& initialValues, const LevenbergMarquardtParams& params, unsigned int iterations = 0) : - NonlinearOptimizerState(graph, values, iterations), lambda(params.lambdaInitial) {} + NonlinearOptimizerState(graph, initialValues, iterations), lambda(params.lambdaInitial) {} friend class LevenbergMarquardtOptimizer; }; @@ -114,7 +114,7 @@ public: */ LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : NonlinearOptimizer(graph), dimensions_(initialValues.dims(ordering)) { - *params_.ordering = ordering; + params_.ordering = ordering; state_ = LevenbergMarquardtState(graph, initialValues, params_); } /** Access the current damping value */ diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index e1088683e..b8d80d645 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -54,11 +54,14 @@ TEST( NonlinearOptimizer, iterateLM ) config.insert(simulated2D::PoseKey(1), x0); // normal iterate - GaussNewtonOptimizer gnOptimizer(fg, config); + GaussNewtonParams gnParams; + GaussNewtonOptimizer gnOptimizer(fg, config, gnParams); gnOptimizer.iterate(); // LM iterate with lambda 0 should be the same - LevenbergMarquardtOptimizer lmOptimizer(fg, config); + LevenbergMarquardtParams lmParams; + lmParams.lambdaInitial = 0.0; + LevenbergMarquardtOptimizer lmOptimizer(fg, config, lmParams); lmOptimizer.iterate(); CHECK(assert_equal(gnOptimizer.values(), lmOptimizer.values(), 1e-9)); @@ -81,16 +84,26 @@ TEST( NonlinearOptimizer, optimize ) c0.insert(simulated2D::PoseKey(1), x0); DOUBLES_EQUAL(199.0,fg.error(c0),1e-3); + // optimize parameters + Ordering ord; + ord.push_back(kx(1)); + // Gauss-Newton - Values actual1 = GaussNewtonOptimizer(fg, c0).optimize(); + GaussNewtonParams gnParams; + gnParams.ordering = ord; + Values actual1 = GaussNewtonOptimizer(fg, c0, gnParams).optimize(); DOUBLES_EQUAL(0,fg.error(actual1),tol); // Levenberg-Marquardt - Values actual2 = LevenbergMarquardtOptimizer(fg, c0).optimize(); + LevenbergMarquardtParams lmParams; + lmParams.ordering = ord; + Values actual2 = LevenbergMarquardtOptimizer(fg, c0, lmParams).optimize(); DOUBLES_EQUAL(0,fg.error(actual2),tol); // Dogleg - Values actual3 = DoglegOptimizer(fg, c0).optimize(); + DoglegParams dlParams; + dlParams.ordering = ord; + Values actual3 = DoglegOptimizer(fg, c0, dlParams).optimize(); DOUBLES_EQUAL(0,fg.error(actual3),tol); } From bd76692794cc79c33379a58b713b97a3765e0aef Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Tue, 15 May 2012 00:01:34 +0000 Subject: [PATCH 51/57] Removed inactive code --- gtsam/nonlinear/Values.h | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/gtsam/nonlinear/Values.h b/gtsam/nonlinear/Values.h index b80348c01..4af8e5a3e 100644 --- a/gtsam/nonlinear/Values.h +++ b/gtsam/nonlinear/Values.h @@ -302,22 +302,6 @@ namespace gtsam { */ const Value& at(Key j) const; -#if 0 - /** Retrieve a variable by key \c j. This non-templated version returns a - * special ValueAutomaticCasting object that may be assigned to the proper - * value. - * @param j Retrieve the value associated with this key - * @return A ValueAutomaticCasting object that may be assignmed to a Value - * of the proper type. - */ - ValueAutomaticCasting at(Key j) const; -#endif - -#if 0 - /** operator[] syntax for at(Key j) */ - ValueAutomaticCasting operator[](Key j) const; -#endif - /** Check if a value exists with key \c j. See exists<>(Key j) * and exists(const TypedKey& j) for versions that return the value if it * exists. */ From 516e1610a1a001ded548bc38acb5e48d29ebecf9 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Tue, 15 May 2012 00:01:38 +0000 Subject: [PATCH 52/57] Added joint marginals and unit tests --- .cproject | 142 ++++++++-------------------------- gtsam/nonlinear/Marginals.cpp | 74 ++++++++++++++++++ gtsam/nonlinear/Marginals.h | 54 ++++++++++++- tests/testMarginals.cpp | 59 +++++++++++++- 4 files changed, 215 insertions(+), 114 deletions(-) diff --git a/.cproject b/.cproject index c81094465..e65c713f6 100644 --- a/.cproject +++ b/.cproject @@ -844,14 +844,30 @@ true true - + make - -j5 + -j2 + install + true + true + true + + + make + -j2 check true true true + + make + -j2 + clean + true + true + true + make -j2 @@ -1028,6 +1044,14 @@ true true + + make + -j2 + SimpleRotation.run + true + true + true + make -j2 @@ -1635,14 +1659,6 @@ true true - - make - -j5 - testVector.run - true - true - true - make -j2 @@ -1803,14 +1819,6 @@ true true - - make - -j5 - UGM_small.run - true - true - true - make -j2 @@ -2122,110 +2130,26 @@ true true - + make - -j5 - wrap_gtsam + -j8 + testMarginals.run true true true - - make - VERBOSE=1 - wrap_gtsam - true - false - true - - - cpack - -G DEB - true - false - true - - - cpack - -G RPM - true - false - true - - - cpack - -G TGZ - true - false - true - - - cpack - --config CPackSourceConfig.cmake - true - false - true - - + make -j5 - check.discrete + wrap.testSpirit.run true true true - + make -j5 - wrap_gtsam_unstable - true - true - true - - - make - -j5 - check.wrap - true - true - true - - - make - -j5 - check.dynamics_unstable - true - true - true - - - make - -j5 - check.slam_unstable - true - true - true - - - make - -j5 - check.base_unstable - true - true - true - - - make - -j5 - testSpirit.run - true - true - true - - - make - -j5 - testWrap.run + wrap.testWrap.run true true true diff --git a/gtsam/nonlinear/Marginals.cpp b/gtsam/nonlinear/Marginals.cpp index 070fa89c9..34d620bc2 100644 --- a/gtsam/nonlinear/Marginals.cpp +++ b/gtsam/nonlinear/Marginals.cpp @@ -17,11 +17,13 @@ */ #include +#include #include #include namespace gtsam { +/* ************************************************************************* */ Marginals::Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization) { // Compute COLAMD ordering @@ -30,6 +32,9 @@ Marginals::Marginals(const NonlinearFactorGraph& graph, const Values& solution, // Linearize graph graph_ = *graph.linearize(solution, ordering_); + // Store values + values_ = solution; + // Compute BayesTree factorization_ = factorization; if(factorization_ == CHOLESKY) @@ -38,10 +43,12 @@ Marginals::Marginals(const NonlinearFactorGraph& graph, const Values& solution, bayesTree_ = *GaussianMultifrontalSolver(graph_, true).eliminate(); } +/* ************************************************************************* */ Matrix Marginals::marginalCovariance(Key variable) const { return marginalInformation(variable).inverse(); } +/* ************************************************************************* */ Matrix Marginals::marginalInformation(Key variable) const { // Get linear key Index index = ordering_[variable]; @@ -66,4 +73,71 @@ Matrix Marginals::marginalInformation(Key variable) const { } } +/* ************************************************************************* */ +JointMarginal Marginals::jointMarginalCovariance(const std::vector& variables) const { + JointMarginal info = jointMarginalInformation(variables); + info.fullMatrix_ = info.fullMatrix_.inverse(); + return info; +} + +/* ************************************************************************* */ +JointMarginal Marginals::jointMarginalInformation(const std::vector& variables) const { + + // If 2 variables, we can use the BayesTree::joint function, otherwise we + // have to use sequential elimination. + if(variables.size() == 1) { + Matrix info = marginalInformation(variables.front()); + std::vector dims; + dims.push_back(info.rows()); + Ordering indices; + indices.insert(variables.front(), 0); + return JointMarginal(info, dims, indices); + + } else { + // Convert keys to linear indices + vector indices(variables.size()); + for(size_t i=0; i usedIndices; + for(size_t i=0; i Index_Key; + BOOST_FOREACH(const Index_Key& index_key, usedIndices) { + variableConversion.insert(index_key.second, slot); + ++ slot; + } + } + + // Get dimensions from factor graph + std::vector dims(indices.size(), 0); + for(size_t i = 0; i < variables.size(); ++i) + dims[i] = values_.at(variables[i]).dim(); + + // Get information matrix + Matrix augmentedInfo = jointFG.denseHessian(); + Matrix info = augmentedInfo.topLeftCorner(augmentedInfo.rows()-1, augmentedInfo.cols()-1); + + return JointMarginal(info, dims, variableConversion); + } +} + } /* namespace gtsam */ diff --git a/gtsam/nonlinear/Marginals.h b/gtsam/nonlinear/Marginals.h index dde199ca0..77bccdfd2 100644 --- a/gtsam/nonlinear/Marginals.h +++ b/gtsam/nonlinear/Marginals.h @@ -18,12 +18,15 @@ #pragma once -#include +#include #include +#include #include namespace gtsam { +class JointMarginal; + /** * A class for computing Gaussian marginals of variables in a NonlinearFactorGraph */ @@ -52,13 +55,62 @@ public: * matrix. */ Matrix marginalInformation(Key variable) const; + /** Compute the joint marginal covariance of several variables */ + JointMarginal jointMarginalCovariance(const std::vector& variables) const; + + /** Compute the joint marginal information of several variables */ + JointMarginal jointMarginalInformation(const std::vector& variables) const; + protected: GaussianFactorGraph graph_; + Values values_; Ordering ordering_; Factorization factorization_; GaussianBayesTree bayesTree_; +}; +/** + * A class to store and access a joint marginal, returned from Marginals::jointMarginalCovariance and Marginals::jointMarginalInformation + */ +class JointMarginal { + +protected: + typedef SymmetricBlockView BlockView; + +public: + /** A block view of the joint marginal - this stores a reference to the + * JointMarginal object, so the JointMarginal object must be kept in scope + * while this block view is needed, otherwise assign this block object to a + * Matrix to store it. + */ + typedef BlockView::constBlock Block; + + /** Access a block, corresponding to a pair of variables, of the joint + * marginal. Each block is accessed by its "vertical position", + * corresponding to the variable with nonlinear Key \c iVariable and + * "horizontal position", corresponding to the variable with nonlinear Key + * \c jVariable. + * + * For example, if we have the joint marginal on a 2D pose "x3" and a 2D + * landmark "l2", then jointMarginal(Symbol('x',3), Symbol('l',2)) will + * return the 3x2 block of the joint covariance matrix corresponding to x3 + * and l2. + * @param iVariable The nonlinear Key specifying the "vertical position" of the requested block + * @param jVariable The nonlinear Key specifying the "horizontal position" of the requested block + */ + Block operator()(Key iVariable, Key jVariable) const { + return blockView_(indices_[iVariable], indices_[jVariable]); } + +protected: + Matrix fullMatrix_; + BlockView blockView_; + Ordering indices_; + + JointMarginal(const Matrix& fullMatrix, const std::vector& dims, const Ordering& indices) : + fullMatrix_(fullMatrix), blockView_(fullMatrix_, dims.begin(), dims.end()), indices_(indices) {} + + friend class Marginals; }; } /* namespace gtsam */ diff --git a/tests/testMarginals.cpp b/tests/testMarginals.cpp index 50b0d3793..a91e6b576 100644 --- a/tests/testMarginals.cpp +++ b/tests/testMarginals.cpp @@ -112,21 +112,72 @@ TEST(Marginals, planarSLAMmarginals) { 0.293870968, -0.104516129, -0.104516129, 0.391935484; - // Check marginals covariances for all variables (QR mode) - Marginals marginals(graph, soln, Marginals::QR); + // Check marginals covariances for all variables (Cholesky mode) + Marginals marginals(graph, soln, Marginals::CHOLESKY); EXPECT(assert_equal(expectedx1, marginals.marginalCovariance(x1), 1e-8)); EXPECT(assert_equal(expectedx2, marginals.marginalCovariance(x2), 1e-8)); EXPECT(assert_equal(expectedx3, marginals.marginalCovariance(x3), 1e-8)); EXPECT(assert_equal(expectedl1, marginals.marginalCovariance(l1), 1e-8)); EXPECT(assert_equal(expectedl2, marginals.marginalCovariance(l2), 1e-8)); - // Check marginals covariances for all variables (Cholesky mode) - marginals = Marginals(graph, soln, Marginals::CHOLESKY); + // Check marginals covariances for all variables (QR mode) + marginals = Marginals(graph, soln, Marginals::QR); EXPECT(assert_equal(expectedx1, marginals.marginalCovariance(x1), 1e-8)); EXPECT(assert_equal(expectedx2, marginals.marginalCovariance(x2), 1e-8)); EXPECT(assert_equal(expectedx3, marginals.marginalCovariance(x3), 1e-8)); EXPECT(assert_equal(expectedl1, marginals.marginalCovariance(l1), 1e-8)); EXPECT(assert_equal(expectedl2, marginals.marginalCovariance(l2), 1e-8)); + + // Check joint marginals for 3 variables + Matrix expected_l2x1x3(8,8); + expected_l2x1x3 << + 0.293871159514111, -0.104516127560770, 0.090000180000270, -0.000000000000000, -0.020000000000000, 0.151935669757191, -0.104516127560770, -0.050967744878460, + -0.104516127560770, 0.391935664055174, 0.000000000000000, 0.090000180000270, 0.040000000000000, 0.007741936219615, 0.351935664055174, 0.056129031890193, + 0.090000180000270, 0.000000000000000, 0.090000180000270, -0.000000000000000, 0.000000000000000, 0.090000180000270, 0.000000000000000, 0.000000000000000, + -0.000000000000000, 0.090000180000270, -0.000000000000000, 0.090000180000270, 0.000000000000000, -0.000000000000000, 0.090000180000270, 0.000000000000000, + -0.020000000000000, 0.040000000000000, 0.000000000000000, 0.000000000000000, 0.010000000000000, 0.000000000000000, 0.040000000000000, 0.010000000000000, + 0.151935669757191, 0.007741936219615, 0.090000180000270, -0.000000000000000, 0.000000000000000, 0.160967924878730, 0.007741936219615, 0.004516127560770, + -0.104516127560770, 0.351935664055174, 0.000000000000000, 0.090000180000270, 0.040000000000000, 0.007741936219615, 0.351935664055174, 0.056129031890193, + -0.050967744878460, 0.056129031890193, 0.000000000000000, 0.000000000000000, 0.010000000000000, 0.004516127560770, 0.056129031890193, 0.027741936219615; + vector variables(3); + variables[0] = l2; + variables[1] = x1; + variables[2] = x3; + JointMarginal joint_l2x1x3 = marginals.jointMarginalCovariance(variables); + EXPECT(assert_equal(Matrix(expected_l2x1x3.block(0,0,2,2)), Matrix(joint_l2x1x3(l2,l2)), 1e-6)); + EXPECT(assert_equal(Matrix(expected_l2x1x3.block(2,0,3,2)), Matrix(joint_l2x1x3(x1,l2)), 1e-6)); + EXPECT(assert_equal(Matrix(expected_l2x1x3.block(5,0,3,2)), Matrix(joint_l2x1x3(x3,l2)), 1e-6)); + + EXPECT(assert_equal(Matrix(expected_l2x1x3.block(0,2,2,3)), Matrix(joint_l2x1x3(l2,x1)), 1e-6)); + EXPECT(assert_equal(Matrix(expected_l2x1x3.block(2,2,3,3)), Matrix(joint_l2x1x3(x1,x1)), 1e-6)); + EXPECT(assert_equal(Matrix(expected_l2x1x3.block(5,2,3,3)), Matrix(joint_l2x1x3(x3,x1)), 1e-6)); + + EXPECT(assert_equal(Matrix(expected_l2x1x3.block(0,5,2,3)), Matrix(joint_l2x1x3(l2,x3)), 1e-6)); + EXPECT(assert_equal(Matrix(expected_l2x1x3.block(2,5,3,3)), Matrix(joint_l2x1x3(x1,x3)), 1e-6)); + EXPECT(assert_equal(Matrix(expected_l2x1x3.block(5,5,3,3)), Matrix(joint_l2x1x3(x3,x3)), 1e-6)); + + // Check joint marginals for 2 variables (different code path than >2 variable case above) + Matrix expected_l2x1(5,5); + expected_l2x1 << + 0.293871159514111, -0.104516127560770, 0.090000180000270, -0.000000000000000, -0.020000000000000, + -0.104516127560770, 0.391935664055174, 0.000000000000000, 0.090000180000270, 0.040000000000000, + 0.090000180000270, 0.000000000000000, 0.090000180000270, -0.000000000000000, 0.000000000000000, + -0.000000000000000, 0.090000180000270, -0.000000000000000, 0.090000180000270, 0.000000000000000, + -0.020000000000000, 0.040000000000000, 0.000000000000000, 0.000000000000000, 0.010000000000000; + variables.resize(2); + variables[0] = l2; + variables[1] = x1; + JointMarginal joint_l2x1 = marginals.jointMarginalCovariance(variables); + EXPECT(assert_equal(Matrix(expected_l2x1.block(0,0,2,2)), Matrix(joint_l2x1(l2,l2)), 1e-6)); + EXPECT(assert_equal(Matrix(expected_l2x1.block(2,0,3,2)), Matrix(joint_l2x1(x1,l2)), 1e-6)); + EXPECT(assert_equal(Matrix(expected_l2x1.block(0,2,2,3)), Matrix(joint_l2x1(l2,x1)), 1e-6)); + EXPECT(assert_equal(Matrix(expected_l2x1.block(2,2,3,3)), Matrix(joint_l2x1(x1,x1)), 1e-6)); + + // Check joint marginal for 1 variable (different code path than >1 variable cases above) + variables.resize(1); + variables[0] = x1; + JointMarginal joint_x1 = marginals.jointMarginalCovariance(variables); + EXPECT(assert_equal(expectedx1, Matrix(joint_l2x1(x1,x1)), 1e-6)); } From ff3edc6823a4ab8954ef660e993caf5fe123f552 Mon Sep 17 00:00:00 2001 From: Stephen Williams Date: Tue, 15 May 2012 00:15:11 +0000 Subject: [PATCH 53/57] Updated examples to use the new Marginals interface --- examples/PlanarSLAMSelfContained_advanced.cpp | 17 +++++++---------- examples/Pose2SLAMExample_advanced.cpp | 14 ++++++++------ 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/examples/PlanarSLAMSelfContained_advanced.cpp b/examples/PlanarSLAMSelfContained_advanced.cpp index a7a45b726..bd5c5295a 100644 --- a/examples/PlanarSLAMSelfContained_advanced.cpp +++ b/examples/PlanarSLAMSelfContained_advanced.cpp @@ -36,8 +36,7 @@ // implementations for structures - needed if self-contained, and these should be included last #include #include -#include -#include +#include using namespace std; using namespace gtsam; @@ -125,15 +124,13 @@ int main(int argc, char** argv) { Values resultMultifrontal = optimizer.optimize(); resultMultifrontal.print("final result (solved with a multifrontal solver)"); - const Ordering& ordering = *optimizer.params().ordering; - GaussianMultifrontalSolver linearSolver(*graph.linearize(resultMultifrontal, ordering)); - // Print marginals covariances for all variables - print(linearSolver.marginalCovariance(ordering[x1]), "x1 covariance"); - print(linearSolver.marginalCovariance(ordering[x2]), "x2 covariance"); - print(linearSolver.marginalCovariance(ordering[x3]), "x3 covariance"); - print(linearSolver.marginalCovariance(ordering[l1]), "l1 covariance"); - print(linearSolver.marginalCovariance(ordering[l2]), "l2 covariance"); + Marginals marginals(graph, resultMultifrontal, Marginals::CHOLESKY); + print(marginals.marginalCovariance(x1), "x1 covariance"); + print(marginals.marginalCovariance(x2), "x2 covariance"); + print(marginals.marginalCovariance(x3), "x3 covariance"); + print(marginals.marginalCovariance(l1), "l1 covariance"); + print(marginals.marginalCovariance(l2), "l2 covariance"); return 0; } diff --git a/examples/Pose2SLAMExample_advanced.cpp b/examples/Pose2SLAMExample_advanced.cpp index 282287827..29d8c58cf 100644 --- a/examples/Pose2SLAMExample_advanced.cpp +++ b/examples/Pose2SLAMExample_advanced.cpp @@ -23,6 +23,7 @@ // pull in the Pose2 SLAM domain with all typedefs and helper functions defined #include #include +#include #include #include @@ -74,12 +75,13 @@ int main(int argc, char** argv) { pose2SLAM::Values result = optimizer.optimize(); result.print("final result"); -// /* Get covariances */ -// Matrix covariance1 = optimizer_result.marginalCovariance(PoseKey(1)); -// Matrix covariance2 = optimizer_result.marginalCovariance(PoseKey(2)); -// -// print(covariance1, "Covariance1"); -// print(covariance2, "Covariance2"); + /* Get covariances */ + Marginals marginals(graph, result, Marginals::CHOLESKY); + Matrix covariance1 = marginals.marginalCovariance(PoseKey(1)); + Matrix covariance2 = marginals.marginalCovariance(PoseKey(2)); + + print(covariance1, "Covariance1"); + print(covariance2, "Covariance2"); return 0; } From 9e0996296affccdedc08e03efb79fc66b1e942d0 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Tue, 15 May 2012 05:08:57 +0000 Subject: [PATCH 54/57] Added Cholesky/LDL switch in NonlinearOptimizer, preparing to remove LDL, remove dependency of NonlinearOptimizer on linear solvers. --- gtsam/nonlinear/DoglegOptimizer.cpp | 21 +++++------- gtsam/nonlinear/GaussNewtonOptimizer.cpp | 34 ++++++++----------- .../nonlinear/LevenbergMarquardtOptimizer.cpp | 30 +++++++--------- .../SuccessiveLinearizationOptimizer.h | 16 ++++++++- tests/testNonlinearOptimizer.cpp | 5 +++ 5 files changed, 54 insertions(+), 52 deletions(-) diff --git a/gtsam/nonlinear/DoglegOptimizer.cpp b/gtsam/nonlinear/DoglegOptimizer.cpp index 4eeac0296..374ef0c6b 100644 --- a/gtsam/nonlinear/DoglegOptimizer.cpp +++ b/gtsam/nonlinear/DoglegOptimizer.cpp @@ -18,8 +18,8 @@ #include -#include -#include +#include +#include #include using namespace std; @@ -33,14 +33,8 @@ void DoglegOptimizer::iterate(void) { const Ordering& ordering = *params_.ordering; GaussianFactorGraph::shared_ptr linear = graph_.linearize(state_.values, ordering); - // Check whether to use QR - bool useQR; - if(params_.factorization == DoglegParams::LDL) - useQR = false; - else if(params_.factorization == DoglegParams::QR) - useQR = true; - else - throw runtime_error("Optimization parameter is invalid: DoglegParams::factorization"); + // Get elimination method + GaussianFactorGraph::Eliminate eliminationMethod = params_.getEliminationFunction(); // Pull out parameters we'll use const bool dlVerbose = (params_.dlVerbosity > DoglegParams::SILENT); @@ -49,11 +43,12 @@ void DoglegOptimizer::iterate(void) { DoglegOptimizerImpl::IterationResult result; if(params_.elimination == DoglegParams::MULTIFRONTAL) { - GaussianBayesTree::shared_ptr bt = GaussianMultifrontalSolver(*linear, useQR).eliminate(); - result = DoglegOptimizerImpl::Iterate(state_.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, graph_, state_.values, ordering, state_.error, dlVerbose); + GaussianBayesTree bt; + bt.insert(GaussianJunctionTree(*linear).eliminate(eliminationMethod)); + result = DoglegOptimizerImpl::Iterate(state_.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, bt, graph_, state_.values, ordering, state_.error, dlVerbose); } else if(params_.elimination == DoglegParams::SEQUENTIAL) { - GaussianBayesNet::shared_ptr bn = GaussianSequentialSolver(*linear, useQR).eliminate(); + GaussianBayesNet::shared_ptr bn = EliminationTree::Create(*linear)->eliminate(eliminationMethod); result = DoglegOptimizerImpl::Iterate(state_.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, graph_, state_.values, ordering, state_.error, dlVerbose); } else { diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.cpp b/gtsam/nonlinear/GaussNewtonOptimizer.cpp index 39a4d32d5..8e2b20859 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.cpp +++ b/gtsam/nonlinear/GaussNewtonOptimizer.cpp @@ -16,9 +16,9 @@ * @created Feb 26, 2012 */ +#include +#include #include -#include -#include using namespace std; @@ -32,29 +32,23 @@ void GaussNewtonOptimizer::iterate() { // Linearize graph GaussianFactorGraph::shared_ptr linear = graph_.linearize(current.values, *params_.ordering); - // Check whether to use QR - bool useQR; - if(params_.factorization == GaussNewtonParams::LDL) - useQR = false; - else if(params_.factorization == GaussNewtonParams::QR) - useQR = true; - else - throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::factorization"); - // Optimize - VectorValues::shared_ptr delta; - if(params_.elimination == GaussNewtonParams::MULTIFRONTAL) - delta = GaussianMultifrontalSolver(*linear, useQR).optimize(); - else if(params_.elimination == GaussNewtonParams::SEQUENTIAL) - delta = GaussianSequentialSolver(*linear, useQR).optimize(); - else - throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::elimination"); + VectorValues delta; + { + GaussianFactorGraph::Eliminate eliminationMethod = params_.getEliminationFunction(); + if(params_.elimination == GaussNewtonParams::MULTIFRONTAL) + delta = GaussianJunctionTree(*linear).optimize(eliminationMethod); + else if(params_.elimination == GaussNewtonParams::SEQUENTIAL) + delta = gtsam::optimize(*EliminationTree::Create(*linear)->eliminate(eliminationMethod)); + else + throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::elimination"); + } // Maybe show output - if(params_.verbosity >= NonlinearOptimizerParams::DELTA) delta->print("delta"); + if(params_.verbosity >= NonlinearOptimizerParams::DELTA) delta.print("delta"); // Create new state with new values and new error - state_.values = current.values.retract(*delta, *params_.ordering); + state_.values = current.values.retract(delta, *params_.ordering); state_.error = graph_.error(state_.values); ++ state_.iterations; } diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 50d87a0d3..2b9865ada 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -19,8 +19,8 @@ #include #include // For NegativeMatrixException -#include -#include +#include +#include using namespace std; @@ -32,14 +32,8 @@ void LevenbergMarquardtOptimizer::iterate() { // Linearize graph GaussianFactorGraph::shared_ptr linear = graph_.linearize(state_.values, *params_.ordering); - // Check whether to use QR - bool useQR; - if(params_.factorization == LevenbergMarquardtParams::LDL) - useQR = false; - else if(params_.factorization == LevenbergMarquardtParams::QR) - useQR = true; - else - throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::factorization"); + // Get elimination method + GaussianFactorGraph::Eliminate eliminationMethod = params_.getEliminationFunction(); // Pull out parameters we'll use const NonlinearOptimizerParams::Verbosity nloVerbosity = params_.verbosity; @@ -72,19 +66,19 @@ void LevenbergMarquardtOptimizer::iterate() { try { // Optimize - VectorValues::shared_ptr delta; - if(params_.elimination == LevenbergMarquardtParams::MULTIFRONTAL) - delta = GaussianMultifrontalSolver(dampedSystem, useQR).optimize(); - else if(params_.elimination == LevenbergMarquardtParams::SEQUENTIAL) - delta = GaussianSequentialSolver(dampedSystem, useQR).optimize(); + VectorValues delta; + if(params_.elimination == SuccessiveLinearizationParams::MULTIFRONTAL) + delta = GaussianJunctionTree(*linear).optimize(eliminationMethod); + else if(params_.elimination == SuccessiveLinearizationParams::SEQUENTIAL) + delta = gtsam::optimize(*EliminationTree::Create(*linear)->eliminate(eliminationMethod)); else throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination"); - if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "linear delta norm = " << delta->vector().norm() << endl; - if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA) delta->print("delta"); + if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "linear delta norm = " << delta.vector().norm() << endl; + if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA) delta.print("delta"); // update values - Values newValues = state_.values.retract(*delta, *params_.ordering); + Values newValues = state_.values.retract(delta, *params_.ordering); // create new optimization state with more adventurous lambda double error = graph_.error(newValues); diff --git a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h index e95502b28..8f02059fc 100644 --- a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h +++ b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h @@ -32,6 +32,7 @@ public: /** See SuccessiveLinearizationParams::factorization */ enum Factorization { + CHOLESKY, LDL, QR, }; @@ -54,7 +55,9 @@ public: else std::cout << " elimination method: (invalid)\n"; - if(factorization == LDL) + if(factorization == CHOLESKY) + std::cout << " factorization method: CHOLESKY\n"; + else if(factorization == LDL) std::cout << " factorization method: LDL\n"; else if(factorization == QR) std::cout << " factorization method: QR\n"; @@ -68,6 +71,17 @@ public: std::cout.flush(); } + + GaussianFactorGraph::Eliminate getEliminationFunction() const { + if(factorization == SuccessiveLinearizationParams::CHOLESKY) + return EliminatePreferCholesky; + else if(factorization == SuccessiveLinearizationParams::LDL) + return EliminatePreferLDL; + else if(factorization == SuccessiveLinearizationParams::QR) + return EliminateQR; + else + throw runtime_error("Nonlinear optimization parameter \"factorization\" is invalid"); + } }; } /* namespace gtsam */ diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index b8d80d645..7173253b8 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -153,6 +153,8 @@ TEST( NonlinearOptimizer, optimization_method ) paramsQR.factorization = LevenbergMarquardtParams::QR; LevenbergMarquardtParams paramsLDL; paramsLDL.factorization = LevenbergMarquardtParams::LDL; + LevenbergMarquardtParams paramsChol; + paramsLDL.factorization = LevenbergMarquardtParams::CHOLESKY; example::Graph fg = example::createReallyNonlinearFactorGraph(); @@ -165,6 +167,9 @@ TEST( NonlinearOptimizer, optimization_method ) Values actualMFLDL = LevenbergMarquardtOptimizer(fg, c0, paramsLDL).optimize(); DOUBLES_EQUAL(0,fg.error(actualMFLDL),tol); + + Values actualMFChol = LevenbergMarquardtOptimizer(fg, c0, paramsChol).optimize(); + DOUBLES_EQUAL(0,fg.error(actualMFChol),tol); } /* ************************************************************************* */ From a35a1322dd94a7d666eceb4101a65d6426cec2be Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Tue, 15 May 2012 05:09:05 +0000 Subject: [PATCH 55/57] Distribution tarball does not contain gtsam_unstable and cmake options related to gtsam_unstable are not available/visible in CMake in tarball builds. --- CMakeLists.txt | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 45f907e14..22713c6a4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -19,6 +19,13 @@ if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR}) message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ") endif() +# See whether gtsam_unstable is available (it will be present only if we're using an SVN checkout) +if(EXISTS "${PROJECT_SOURCE_DIR}/gtsam_unstable" AND IS_DIRECTORY "${PROJECT_SOURCE_DIR}/gtsam_unstable") + set(GTSAM_UNSTABLE_AVAILABLE 1) +else() + set(GTSAM_UNSTABLE_AVAILABLE 0) +endif() + # Load build type flags and default to Debug mode include(GtsamBuildTypes) @@ -41,7 +48,9 @@ endif() option(GTSAM_BUILD_TESTS "Enable/Disable building of tests" ON) option(GTSAM_BUILD_TIMING "Enable/Disable building of timing scripts" ON) option(GTSAM_BUILD_EXAMPLES "Enable/Disable building of examples" ON) -option(GTSAM_BUILD_UNSTABLE "Enable/Disable libgtsam_unstable" OFF) +if(GTSAM_UNSTABLE_AVAILABLE) + option(GTSAM_BUILD_UNSTABLE "Enable/Disable libgtsam_unstable" OFF) +endif() option(GTSAM_BUILD_WRAP "Enable/Disable building of matlab wrap utility (necessary for matlab interface)" ON) option(GTSAM_BUILD_SHARED_LIBRARY "Enable/Disable building of a shared version of gtsam" ON) option(GTSAM_BUILD_STATIC_LIBRARY "Enable/Disable building of a static version of gtsam" ON) @@ -130,7 +139,7 @@ set(CPACK_PACKAGE_VERSION_MINOR ${GTSAM_VERSION_MINOR}) set(CPACK_PACKAGE_VERSION_PATCH ${GTSAM_VERSION_PATCH}) set(CPACK_PACKAGE_INSTALL_DIRECTORY "CMake ${CMake_VERSION_MAJOR}.${CMake_VERSION_MINOR}") set(CPACK_INSTALLED_DIRECTORIES "doc;.") # Include doc directory -set(CPACK_SOURCE_IGNORE_FILES "/build;/\\\\.;/makedoc.sh$") +set(CPACK_SOURCE_IGNORE_FILES "/build;/\\\\.;/makedoc.sh$;/gtsam_unstable/") set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}") #set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-aspn${GTSAM_VERSION_PATCH}") # Used for creating ASPN tarballs @@ -149,7 +158,9 @@ print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap print_config_flag(${GTSAM_BUILD_SHARED_LIBRARY} "Build shared GTSAM Library ") print_config_flag(${GTSAM_BUILD_STATIC_LIBRARY} "Build static GTSAM Library ") print_config_flag(${GTSAM_BUILD_CONVENIENCE_LIBRARIES} "Build Convenience Libraries ") -print_config_flag(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ") +if(GTSAM_UNSTABLE_AVAILABLE) + print_config_flag(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ") +endif() print_config_flag(${GTSAM_ENABLE_INSTALL_TEST_FIX} "Tests excluded from all target ") string(TOUPPER "${CMAKE_BUILD_TYPE}" cmake_build_type_toupper) message(STATUS " Build type : ${CMAKE_BUILD_TYPE}") From 68467448a74189d250fbf9803906ee3b9bf13ec2 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Tue, 15 May 2012 13:23:43 +0000 Subject: [PATCH 56/57] Fixed small LM bug (solving original instead of damped system) and added unit test --- .cproject | 335 +++++++++--------- .../nonlinear/LevenbergMarquardtOptimizer.cpp | 4 +- tests/testNonlinearOptimizer.cpp | 23 ++ 3 files changed, 195 insertions(+), 167 deletions(-) diff --git a/.cproject b/.cproject index e65c713f6..0f9a11611 100644 --- a/.cproject +++ b/.cproject @@ -362,14 +362,6 @@ true true - - make - -j2 - testGaussianFactor.run - true - true - true - make -j2 @@ -396,6 +388,7 @@ make + tests/testBayesTree.run true false @@ -403,6 +396,7 @@ make + testBinaryBayesNet.run true false @@ -450,6 +444,7 @@ make + testSymbolicBayesNet.run true false @@ -457,6 +452,7 @@ make + tests/testSymbolicFactor.run true false @@ -464,6 +460,7 @@ make + testSymbolicFactorGraph.run true false @@ -479,11 +476,20 @@ make + tests/testBayesTree true false true + + make + -j2 + testGaussianFactor.run + true + true + true + make -j2 @@ -510,7 +516,6 @@ make - testGraph.run true false @@ -582,7 +587,6 @@ make - testInference.run true false @@ -590,7 +594,6 @@ make - testGaussianFactor.run true false @@ -598,7 +601,6 @@ make - testJunctionTree.run true false @@ -606,7 +608,6 @@ make - testSymbolicBayesNet.run true false @@ -614,7 +615,6 @@ make - testSymbolicFactorGraph.run true false @@ -684,22 +684,6 @@ false true - - make - -j2 - all - true - true - true - - - make - -j2 - clean - true - true - true - make -j2 @@ -716,6 +700,22 @@ true true + + make + -j2 + all + true + true + true + + + make + -j2 + clean + true + true + true + make -j2 @@ -740,18 +740,26 @@ true true - + make - -j5 - nonlinear.testValues.run + -j2 + all true true true - + make - -j5 - nonlinear.testOrdering.run + -j2 + check + true + true + true + + + make + -j2 + clean true true true @@ -788,26 +796,18 @@ true true - + make - -j2 - all + -j5 + nonlinear.testValues.run true true true - + make - -j2 - check - true - true - true - - - make - -j2 - clean + -j5 + nonlinear.testOrdering.run true true true @@ -1142,6 +1142,7 @@ make + testErrors.run true false @@ -1597,7 +1598,6 @@ make - testSimulated2DOriented.run true false @@ -1637,7 +1637,6 @@ make - testSimulated2D.run true false @@ -1645,7 +1644,6 @@ make - testSimulated3D.run true false @@ -1829,7 +1827,6 @@ make - tests/testGaussianISAM2 true false @@ -1851,102 +1848,6 @@ true true - - make - -j2 - testRot3.run - true - true - true - - - make - -j2 - testRot2.run - true - true - true - - - make - -j2 - testPose3.run - true - true - true - - - make - -j2 - timeRot3.run - true - true - true - - - make - -j2 - testPose2.run - true - true - true - - - make - -j2 - testCal3_S2.run - true - true - true - - - make - -j2 - testSimpleCamera.run - true - true - true - - - make - -j2 - testHomography2.run - true - true - true - - - make - -j2 - testCalibratedCamera.run - true - true - true - - - make - -j2 - check - true - true - true - - - make - -j2 - clean - true - true - true - - - make - -j2 - testPoint2.run - true - true - true - make -j2 @@ -2138,42 +2039,106 @@ true true - + make - -j5 - wrap.testSpirit.run + -j8 + testNonlinearOptimizer.run true true true - + make - -j5 - wrap.testWrap.run + -j2 + testRot3.run true true true - + make - -j5 - check.wrap + -j2 + testRot2.run true true true - + make - -j5 - wrap_gtsam + -j2 + testPose3.run true true true - + make - -j5 - wrap + -j2 + timeRot3.run + true + true + true + + + make + -j2 + testPose2.run + true + true + true + + + make + -j2 + testCal3_S2.run + true + true + true + + + make + -j2 + testSimpleCamera.run + true + true + true + + + make + -j2 + testHomography2.run + true + true + true + + + make + -j2 + testCalibratedCamera.run + true + true + true + + + make + -j2 + check + true + true + true + + + make + -j2 + clean + true + true + true + + + make + -j2 + testPoint2.run true true true @@ -2217,6 +2182,46 @@ false true + + make + -j5 + wrap.testSpirit.run + true + true + true + + + make + -j5 + wrap.testWrap.run + true + true + true + + + make + -j5 + check.wrap + true + true + true + + + make + -j5 + wrap_gtsam + true + true + true + + + make + -j5 + wrap + true + true + true + diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 2b9865ada..5295ce9ff 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -68,9 +68,9 @@ void LevenbergMarquardtOptimizer::iterate() { // Optimize VectorValues delta; if(params_.elimination == SuccessiveLinearizationParams::MULTIFRONTAL) - delta = GaussianJunctionTree(*linear).optimize(eliminationMethod); + delta = GaussianJunctionTree(dampedSystem).optimize(eliminationMethod); else if(params_.elimination == SuccessiveLinearizationParams::SEQUENTIAL) - delta = gtsam::optimize(*EliminationTree::Create(*linear)->eliminate(eliminationMethod)); + delta = gtsam::optimize(*EliminationTree::Create(dampedSystem)->eliminate(eliminationMethod)); else throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination"); diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index 7173253b8..80754ce02 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -233,6 +233,29 @@ TEST(NonlinearOptimizer, NullFactor) { DOUBLES_EQUAL(0,fg.error(actual3),tol); } +/* ************************************************************************* */ +TEST(NonlinearOptimizer, MoreOptimization) { + + NonlinearFactorGraph fg; + fg.add(PriorFactor(0, Pose2(0,0,0), sharedSigma(3,1))); + fg.add(BetweenFactor(0, 1, Pose2(1,0,M_PI/2), sharedSigma(3,1))); + fg.add(BetweenFactor(1, 2, Pose2(1,0,M_PI/2), sharedSigma(3,1))); + + Values init; + init.insert(0, Pose2(3,4,-M_PI)); + init.insert(1, Pose2(10,2,-M_PI)); + init.insert(2, Pose2(11,7,-M_PI)); + + Values expected; + expected.insert(0, Pose2(0,0,0)); + expected.insert(1, Pose2(1,0,M_PI/2)); + expected.insert(2, Pose2(1,1,M_PI)); + + // Try LM and Dogleg + EXPECT(assert_equal(expected, LevenbergMarquardtOptimizer(fg, init).optimize())); + EXPECT(assert_equal(expected, DoglegOptimizer(fg, init).optimize())); +} + /* ************************************************************************* */ int main() { TestResult tr; From 7b183d1237ba108172edc0f4bee954d89f3233e5 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Tue, 15 May 2012 13:33:32 +0000 Subject: [PATCH 57/57] Renamed derived optimizer verbosity parameters to start with 'verbosity' to make more auto-complete friendly --- examples/vSLAMexample/vSFMexample.cpp | 2 +- gtsam/nonlinear/DoglegOptimizer.cpp | 2 +- gtsam/nonlinear/DoglegOptimizer.h | 6 +++--- gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp | 2 +- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 6 +++--- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/examples/vSLAMexample/vSFMexample.cpp b/examples/vSLAMexample/vSFMexample.cpp index 6edd06465..8774b79f3 100644 --- a/examples/vSLAMexample/vSFMexample.cpp +++ b/examples/vSLAMexample/vSFMexample.cpp @@ -144,7 +144,7 @@ int main(int argc, char* argv[]) { // Optimize the graph cout << "*******************************************************" << endl; LevenbergMarquardtParams params; - params.lmVerbosity = LevenbergMarquardtParams::DAMPED; + params.verbosityLM = LevenbergMarquardtParams::DAMPED; visualSLAM::Values result = LevenbergMarquardtOptimizer(graph, initialEstimates, params).optimize(); // Print final results diff --git a/gtsam/nonlinear/DoglegOptimizer.cpp b/gtsam/nonlinear/DoglegOptimizer.cpp index 374ef0c6b..84090d666 100644 --- a/gtsam/nonlinear/DoglegOptimizer.cpp +++ b/gtsam/nonlinear/DoglegOptimizer.cpp @@ -37,7 +37,7 @@ void DoglegOptimizer::iterate(void) { GaussianFactorGraph::Eliminate eliminationMethod = params_.getEliminationFunction(); // Pull out parameters we'll use - const bool dlVerbose = (params_.dlVerbosity > DoglegParams::SILENT); + const bool dlVerbose = (params_.verbosityDL > DoglegParams::SILENT); // Do Dogleg iteration with either Multifrontal or Sequential elimination DoglegOptimizerImpl::IterationResult result; diff --git a/gtsam/nonlinear/DoglegOptimizer.h b/gtsam/nonlinear/DoglegOptimizer.h index 69d0503d7..7edb43ca9 100644 --- a/gtsam/nonlinear/DoglegOptimizer.h +++ b/gtsam/nonlinear/DoglegOptimizer.h @@ -32,16 +32,16 @@ class DoglegOptimizer; class DoglegParams : public SuccessiveLinearizationParams { public: /** See DoglegParams::dlVerbosity */ - enum DLVerbosity { + enum VerbosityDL { SILENT, VERBOSE }; double deltaInitial; ///< The initial trust region radius (default: 1.0) - DLVerbosity dlVerbosity; ///< The verbosity level for Dogleg (default: SILENT), see also NonlinearOptimizerParams::verbosity + VerbosityDL verbosityDL; ///< The verbosity level for Dogleg (default: SILENT), see also NonlinearOptimizerParams::verbosity DoglegParams() : - deltaInitial(1.0), dlVerbosity(SILENT) {} + deltaInitial(1.0), verbosityDL(SILENT) {} virtual ~DoglegParams() {} diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 5295ce9ff..1b19cfe75 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -37,7 +37,7 @@ void LevenbergMarquardtOptimizer::iterate() { // Pull out parameters we'll use const NonlinearOptimizerParams::Verbosity nloVerbosity = params_.verbosity; - const LevenbergMarquardtParams::LMVerbosity lmVerbosity = params_.lmVerbosity; + const LevenbergMarquardtParams::VerbosityLM lmVerbosity = params_.verbosityLM; // Keep increasing lambda until we make make progress while(true) { diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index a03140166..c1fe55c26 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -32,7 +32,7 @@ class LevenbergMarquardtOptimizer; class LevenbergMarquardtParams : public SuccessiveLinearizationParams { public: /** See LevenbergMarquardtParams::lmVerbosity */ - enum LMVerbosity { + enum VerbosityLM { SILENT, LAMBDA, TRYLAMBDA, @@ -44,10 +44,10 @@ public: double lambdaInitial; ///< The initial Levenberg-Marquardt damping term (default: 1e-5) double lambdaFactor; ///< The amount by which to multiply or divide lambda when adjusting lambda (default: 10.0) double lambdaUpperBound; ///< The maximum lambda to try before assuming the optimization has failed (default: 1e5) - LMVerbosity lmVerbosity; ///< The verbosity level for Levenberg-Marquardt (default: SILENT), see also NonlinearOptimizerParams::verbosity + VerbosityLM verbosityLM; ///< The verbosity level for Levenberg-Marquardt (default: SILENT), see also NonlinearOptimizerParams::verbosity LevenbergMarquardtParams() : - lambdaInitial(1e-5), lambdaFactor(10.0), lambdaUpperBound(1e5), lmVerbosity(SILENT) {} + lambdaInitial(1e-5), lambdaFactor(10.0), lambdaUpperBound(1e5), verbosityLM(SILENT) {} virtual ~LevenbergMarquardtParams() {}