From 75bd1689dfa7056744178edea5a13c5d560c9552 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Mon, 14 May 2012 19:10:02 +0000 Subject: [PATCH] Changes in progress --- gtsam/nonlinear/DoglegOptimizer.cpp | 12 +- gtsam/nonlinear/DoglegOptimizer.h | 31 +++- gtsam/nonlinear/GaussNewtonOptimizer.cpp | 3 +- gtsam/nonlinear/GaussNewtonOptimizer.h | 20 ++- .../nonlinear/LevenbergMarquardtOptimizer.cpp | 10 +- gtsam/nonlinear/LevenbergMarquardtOptimizer.h | 27 +++- gtsam/nonlinear/NonlinearOptimizer.cpp | 2 +- gtsam/nonlinear/NonlinearOptimizer.h | 144 +++++++++--------- gtsam/slam/pose2SLAM.h | 2 +- 9 files changed, 148 insertions(+), 103 deletions(-) diff --git a/gtsam/nonlinear/DoglegOptimizer.cpp b/gtsam/nonlinear/DoglegOptimizer.cpp index 9d52fb7c1..4eeac0296 100644 --- a/gtsam/nonlinear/DoglegOptimizer.cpp +++ b/gtsam/nonlinear/DoglegOptimizer.cpp @@ -27,11 +27,11 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -void DoglegOptimizer::iterate(void) const { +void DoglegOptimizer::iterate(void) { // Linearize graph const Ordering& ordering = *params_.ordering; - GaussianFactorGraph::shared_ptr linear = graph_->linearize(state_.values, ordering); + GaussianFactorGraph::shared_ptr linear = graph_.linearize(state_.values, ordering); // Check whether to use QR bool useQR; @@ -43,18 +43,18 @@ void DoglegOptimizer::iterate(void) const { throw runtime_error("Optimization parameter is invalid: DoglegParams::factorization"); // Pull out parameters we'll use - const bool dlVerbose = (params_->dlVerbosity > DoglegParams::SILENT); + const bool dlVerbose = (params_.dlVerbosity > DoglegParams::SILENT); // Do Dogleg iteration with either Multifrontal or Sequential elimination DoglegOptimizerImpl::IterationResult result; if(params_.elimination == DoglegParams::MULTIFRONTAL) { GaussianBayesTree::shared_ptr bt = GaussianMultifrontalSolver(*linear, useQR).eliminate(); - result = DoglegOptimizerImpl::Iterate(state_.delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, graph_, state_.values, ordering, state_.error, dlVerbose); + result = DoglegOptimizerImpl::Iterate(state_.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, graph_, state_.values, ordering, state_.error, dlVerbose); } else if(params_.elimination == DoglegParams::SEQUENTIAL) { GaussianBayesNet::shared_ptr bn = GaussianSequentialSolver(*linear, useQR).eliminate(); - result = DoglegOptimizerImpl::Iterate(state_.delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, graph_, state_.values, ordering, state_.error, dlVerbose); + result = DoglegOptimizerImpl::Iterate(state_.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, graph_, state_.values, ordering, state_.error, dlVerbose); } else { throw runtime_error("Optimization parameter is invalid: DoglegParams::elimination"); @@ -66,7 +66,7 @@ void DoglegOptimizer::iterate(void) const { // Create new state with new values and new error state_.values = state_.values.retract(result.dx_d, ordering); state_.error = result.f_error; - state_.delta = result.delta; + state_.Delta = result.Delta; ++state_.iterations; } diff --git a/gtsam/nonlinear/DoglegOptimizer.h b/gtsam/nonlinear/DoglegOptimizer.h index 1c6f3cb6f..44813befa 100644 --- a/gtsam/nonlinear/DoglegOptimizer.h +++ b/gtsam/nonlinear/DoglegOptimizer.h @@ -22,6 +22,8 @@ namespace gtsam { +class DoglegOptimizer; + /** Parameters for Levenberg-Marquardt optimization. Note that this parameters * class inherits from NonlinearOptimizerParams, which specifies the parameters * common to all nonlinear optimization algorithms. This class also contains @@ -53,17 +55,26 @@ public: /** * State for DoglegOptimizer */ -class DoglegState : public SuccessiveLinearizationState { +class DoglegState : public NonlinearOptimizerState { public: - double delta; + double Delta; + DoglegState() {} + + virtual ~DoglegState() {} + +protected: + DoglegState(const NonlinearFactorGraph& graph, const Values& values, const DoglegParams& params, unsigned int interations = 0) : + NonlinearOptimizerState(graph, values, iterations), Delta(params.deltaInitial) {} + + friend class DoglegOptimizer; }; /** * This class performs Dogleg nonlinear optimization */ -class DoglegOptimizer : public SuccessiveLinearizationOptimizer { +class DoglegOptimizer : public NonlinearOptimizer { public: @@ -82,7 +93,7 @@ public: */ DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const DoglegParams& params = DoglegParams()) : - NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues) {} + NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph, initialValues)), state_(graph, initialValues, params_) {} /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. For convenience this @@ -93,8 +104,9 @@ public: * @param params The optimization parameters */ DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : - NonlinearOptimizer(graph), state_(graph, initialValues) { - *params_.ordering = ordering; } + NonlinearOptimizer(graph) { + *params_.ordering = ordering; + state_ = DoglegState(graph, initialValues, params_); } /// @} @@ -108,7 +120,7 @@ public: * containing the updated variable assignments, which may be retrieved with * values(). */ - virtual void iterate() const; + virtual void iterate(); /** Access the parameters */ const DoglegParams& params() const { return params_; } @@ -116,6 +128,9 @@ public: /** Access the last state */ const DoglegState& state() const { return state_; } + /** Access the current trust region radius Delta */ + double Delta() const { return state_.Delta; } + /// @} protected: @@ -131,7 +146,7 @@ protected: /** Internal function for computing a COLAMD ordering if no ordering is specified */ DoglegParams ensureHasOrdering(DoglegParams params, const NonlinearFactorGraph& graph, const Values& values) const { if(!params.ordering) - params.ordering = graph.orderingCOLAMD(values); + params.ordering = *graph.orderingCOLAMD(values); return params; } }; diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.cpp b/gtsam/nonlinear/GaussNewtonOptimizer.cpp index 02960f569..39a4d32d5 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.cpp +++ b/gtsam/nonlinear/GaussNewtonOptimizer.cpp @@ -25,7 +25,7 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -void GaussNewtonOptimizer::iterate() const { +void GaussNewtonOptimizer::iterate() { const NonlinearOptimizerState& current = state_; @@ -54,7 +54,6 @@ void GaussNewtonOptimizer::iterate() const { if(params_.verbosity >= NonlinearOptimizerParams::DELTA) delta->print("delta"); // Create new state with new values and new error - NonlinearOptimizerState newState; state_.values = current.values.retract(*delta, *params_.ordering); state_.error = graph_.error(state_.values); ++ state_.iterations; diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.h b/gtsam/nonlinear/GaussNewtonOptimizer.h index afc0fe30d..f7c57c3e9 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.h +++ b/gtsam/nonlinear/GaussNewtonOptimizer.h @@ -22,12 +22,22 @@ namespace gtsam { +class GaussNewtonOptimizer; + /** Parameters for Gauss-Newton optimization, inherits from * NonlinearOptimizationParams. */ class GaussNewtonParams : public SuccessiveLinearizationParams { }; +class GaussNewtonState : public NonlinearOptimizerState { +protected: + GaussNewtonState(const NonlinearFactorGraph& graph, const Values& values, unsigned int iterations = 0) : + NonlinearOptimizerState(graph, values, iterations) {} + + friend class GaussNewtonOptimizer; +}; + /** * This class performs Gauss-Newton nonlinear optimization */ @@ -48,7 +58,7 @@ public: */ GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const GaussNewtonParams& params = GaussNewtonParams()) : - NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues) {} + NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph, initialValues)), state_(graph, initialValues) {} /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. For convenience this @@ -74,20 +84,20 @@ public: * containing the updated variable assignments, which may be retrieved with * values(). */ - virtual void iterate() const; + virtual void iterate(); /** Access the parameters */ const GaussNewtonParams& params() const { return params_; } /** Access the last state */ - const NonlinearOptimizerState& state() const { return state_; } + const GaussNewtonState& state() const { return state_; } /// @} protected: GaussNewtonParams params_; - NonlinearOptimizerState state_; + GaussNewtonState state_; /** Access the parameters (base class version) */ virtual const NonlinearOptimizerParams& _params() const { return params_; } @@ -98,7 +108,7 @@ protected: /** Internal function for computing a COLAMD ordering if no ordering is specified */ GaussNewtonParams ensureHasOrdering(GaussNewtonParams params, const NonlinearFactorGraph& graph, const Values& values) const { if(!params.ordering) - params.ordering = graph.orderingCOLAMD(values); + params.ordering = *graph.orderingCOLAMD(values); return params; } diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index fc25a697d..50d87a0d3 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -27,10 +27,10 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -void LevenbergMarquardtOptimizer::iterate() const { +void LevenbergMarquardtOptimizer::iterate() { // Linearize graph - GaussianFactorGraph::shared_ptr linear = graph_->linearize(state_.values, *params_.ordering); + GaussianFactorGraph::shared_ptr linear = graph_.linearize(state_.values, *params_.ordering); // Check whether to use QR bool useQR; @@ -48,13 +48,13 @@ void LevenbergMarquardtOptimizer::iterate() const { // Keep increasing lambda until we make make progress while(true) { if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) - cout << "trying lambda = " << lambda << endl; + cout << "trying lambda = " << state_.lambda << endl; // Add prior-factors // TODO: replace this dampening with a backsubstitution approach GaussianFactorGraph dampedSystem(*linear); { - double sigma = 1.0 / sqrt(lambda); + double sigma = 1.0 / sqrt(state_.lambda); dampedSystem.reserve(dampedSystem.size() + dimensions_.size()); // for each of the variables, add a prior for(Index j=0; j= params_.lambdaUpperBound) { + if(state_.lambda >= params_.lambdaUpperBound) { if(nloVerbosity >= NonlinearOptimizerParams::ERROR) cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; break; diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h index c522d0a82..f2e2e4fc7 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -22,6 +22,8 @@ namespace gtsam { +class LevenbergMarquardtOptimizer; + /** Parameters for Levenberg-Marquardt optimization. Note that this parameters * class inherits from NonlinearOptimizerParams, which specifies the parameters * common to all nonlinear optimization algorithms. This class also contains @@ -66,6 +68,15 @@ public: double lambda; + LevenbergMarquardtState() {} + + virtual ~LevenbergMarquardtState() {} + +protected: + LevenbergMarquardtState(const NonlinearFactorGraph& graph, const Values& initialValues, const LevenbergMarquardtParams& params, unsigned int iterations = 0) : + NonlinearOptimizerState(graph, values, iterations), lambda(params.lambdaInitial) {} + + friend class LevenbergMarquardtOptimizer; }; /** @@ -90,8 +101,8 @@ public: */ LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) : - NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), - state_(graph, initialValues), dimensions_(initialValues.dims(*params_.ordering)) {} + NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph, initialValues)), + state_(graph, initialValues, params_), dimensions_(initialValues.dims(*params_.ordering)) {} /** Standard constructor, requires a nonlinear factor graph, initial * variable assignments, and optimization parameters. For convenience this @@ -102,8 +113,12 @@ public: * @param params The optimization parameters */ LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : - NonlinearOptimizer(graph), state_(graph, initialValues), dimensions_(initialValues.dims(ordering)) { - *params_.ordering = ordering; } + NonlinearOptimizer(graph), dimensions_(initialValues.dims(ordering)) { + *params_.ordering = ordering; + state_ = LevenbergMarquardtState(graph, initialValues, params_); } + + /** Access the current damping value */ + double lambda() const { return state_.lambda; } /// @} @@ -117,7 +132,7 @@ public: * containing the updated variable assignments, which may be retrieved with * values(). */ - virtual void iterate() const; + virtual void iterate(); /** Access the parameters */ const LevenbergMarquardtParams& params() const { return params_; } @@ -142,7 +157,7 @@ protected: /** Internal function for computing a COLAMD ordering if no ordering is specified */ LevenbergMarquardtParams ensureHasOrdering(LevenbergMarquardtParams params, const NonlinearFactorGraph& graph, const Values& values) const { if(!params.ordering) - params.ordering = graph.orderingCOLAMD(values); + params.ordering = *graph.orderingCOLAMD(values); return params; } }; diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index c80714d34..76d1fe573 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -27,7 +27,7 @@ namespace gtsam { /* ************************************************************************* */ void NonlinearOptimizer::defaultOptimize() { - const NonlinearOptimizerParams& params = this->params(); + const NonlinearOptimizerParams& params = this->_params(); double currentError = this->error(); // check if we're already close enough diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index f3f4abb92..388058ac0 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -22,6 +22,79 @@ namespace gtsam { +class NonlinearOptimizer; + +/** The common parameters for Nonlinear optimizers. Most optimizers + * deriving from NonlinearOptimizer also subclass the parameters. + */ +class NonlinearOptimizerParams { +public: + /** See NonlinearOptimizerParams::verbosity */ + enum Verbosity { + SILENT, + ERROR, + VALUES, + DELTA, + LINEAR + }; + + size_t maxIterations; ///< The maximum iterations to stop iterating (default 100) + double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5) + double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5) + double errorTol; ///< The maximum total error to stop iterating (default 0.0) + Verbosity verbosity; ///< The printing verbosity during optimization (default SILENT) + + NonlinearOptimizerParams() : + maxIterations(100.0), relativeErrorTol(1e-5), absoluteErrorTol(1e-5), + errorTol(0.0), verbosity(SILENT) {} + + virtual void print(const std::string& str = "") const { + std::cout << str << "\n"; + std::cout << "relative decrease threshold: " << relativeErrorTol << "\n"; + std::cout << "absolute decrease threshold: " << absoluteErrorTol << "\n"; + std::cout << " total error threshold: " << errorTol << "\n"; + std::cout << " maximum iterations: " << maxIterations << "\n"; + std::cout << " verbosity level: " << verbosity << std::endl; + } + + virtual ~NonlinearOptimizerParams() {} +}; + + +/** + * Base class for a nonlinear optimization state, including the current estimate + * of the variable values, error, and number of iterations. Optimizers derived + * from NonlinearOptimizer usually also define a derived state class containing + * additional state specific to the algorithm (for example, Dogleg state + * contains the current trust region radius). + */ +class NonlinearOptimizerState { +public: + + /** The current estimate of the variable values. */ + Values values; + + /** The factor graph error on the current values. */ + double error; + + /** The number of optimization iterations performed. */ + unsigned int iterations; + + NonlinearOptimizerState() {} + + /** Virtual destructor */ + virtual ~NonlinearOptimizerState() {} + +protected: + NonlinearOptimizerState(const NonlinearFactorGraph& graph, const Values& values, unsigned int iterations = 0) : + values(values), error(graph.error(values)), iterations(iterations) {} + + NonlinearOptimizerState(const Values& values, double error, unsigned int iterations) : + values(values), error(error), iterations(iterations) {} + + friend class NonlinearOptimizer; +}; + /** * This is the abstract interface for classes that can optimize for the * maximum-likelihood estimate of a NonlinearFactorGraph. @@ -113,7 +186,7 @@ public: * process, you may call iterate() and check_convergence() yourself, and if * needed modify the optimization state between iterations. */ - virtual const Values& optimize() const { return defaultOptimize(); } + virtual const Values& optimize() { defaultOptimize(); return values(); } double error() const { return _state().error; } @@ -133,7 +206,7 @@ public: * containing the updated variable assignments, which may be retrieved with * values(). */ - virtual void iterate() const = 0; + virtual void iterate() = 0; /// @} @@ -155,73 +228,6 @@ protected: }; - -/** The common parameters for Nonlinear optimizers. Most optimizers - * deriving from NonlinearOptimizer also subclass the parameters. - */ -class NonlinearOptimizerParams { -public: - /** See NonlinearOptimizerParams::verbosity */ - enum Verbosity { - SILENT, - ERROR, - VALUES, - DELTA, - LINEAR - }; - - size_t maxIterations; ///< The maximum iterations to stop iterating (default 100) - double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5) - double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5) - double errorTol; ///< The maximum total error to stop iterating (default 0.0) - Verbosity verbosity; ///< The printing verbosity during optimization (default SILENT) - - NonlinearOptimizerParams() : - maxIterations(100.0), relativeErrorTol(1e-5), absoluteErrorTol(1e-5), - errorTol(0.0), verbosity(SILENT) {} - - virtual void print(const std::string& str = "") const { - std::cout << str << "\n"; - std::cout << "relative decrease threshold: " << relativeErrorTol << "\n"; - std::cout << "absolute decrease threshold: " << absoluteErrorTol << "\n"; - std::cout << " total error threshold: " << errorTol << "\n"; - std::cout << " maximum iterations: " << maxIterations << "\n"; - std::cout << " verbosity level: " << verbosity << std::endl; - } - - virtual ~NonlinearOptimizerParams() {} -}; - - -/** - * Base class for a nonlinear optimization state, including the current estimate - * of the variable values, error, and number of iterations. Optimizers derived - * from NonlinearOptimizer usually also define a derived state class containing - * additional state specific to the algorithm (for example, Dogleg state - * contains the current trust region radius). - */ -class NonlinearOptimizerState { -public: - - /** The current estimate of the variable values. */ - Values values; - - /** The factor graph error on the current values. */ - double error; - - /** The number of optimization iterations performed. */ - unsigned int iterations; - - NonlinearOptimizerState(const NonlinearFactorGraph& graph, const Values& values, unsigned int iterations = 0) : - values(values), error(graph.error(values)), iterations(iterations) {} - - NonlinearOptimizerState(const Values& values, double error, unsigned int iterations) : - values(values), error(error), iterations(iterations) {} - - /** Virtual destructor */ - virtual ~NonlinearOptimizerState() {} -}; - /** Check whether the relative error decrease is less than relativeErrorTreshold, * the absolute error decrease is less than absoluteErrorTreshold, or * the error itself is less than errorThreshold. diff --git a/gtsam/slam/pose2SLAM.h b/gtsam/slam/pose2SLAM.h index ccae01ebd..42bd8758c 100644 --- a/gtsam/slam/pose2SLAM.h +++ b/gtsam/slam/pose2SLAM.h @@ -97,7 +97,7 @@ namespace pose2SLAM { /// Optimize Values optimize(const Values& initialEstimate) { - return LevenbergMarquardtOptimizer(*this).optimized(initialEstimate); + return LevenbergMarquardtOptimizer(*this, initialEstimate).optimize(); } };