From 638d4f1978ca523b9e03dc780a4ac2b105cf3a05 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 5 Sep 2011 21:28:26 +0000 Subject: [PATCH] Documentation and variable renaming for clarity --- gtsam/nonlinear/NonlinearOptimization-inl.h | 8 ++-- gtsam/nonlinear/NonlinearOptimizer-inl.h | 45 ++++++++++++--------- 2 files changed, 30 insertions(+), 23 deletions(-) diff --git a/gtsam/nonlinear/NonlinearOptimization-inl.h b/gtsam/nonlinear/NonlinearOptimization-inl.h index 5f6afaecd..1f8d0dc17 100644 --- a/gtsam/nonlinear/NonlinearOptimization-inl.h +++ b/gtsam/nonlinear/NonlinearOptimization-inl.h @@ -39,13 +39,13 @@ namespace gtsam { // Use a variable ordering from COLAMD Ordering::shared_ptr ordering = graph.orderingCOLAMD(initialEstimate); - // initial optimization state is the same in both cases tested + // Create an nonlinear Optimizer that uses a Sequential Solver typedef NonlinearOptimizer Optimizer; Optimizer optimizer(boost::make_shared(graph), boost::make_shared(initialEstimate), ordering, boost::make_shared(parameters)); - // choose nonlinear optimization method + // Now optimize using either LM or GN methods. if (useLM) return *optimizer.levenbergMarquardt().values(); else @@ -62,13 +62,13 @@ namespace gtsam { // Use a variable ordering from COLAMD Ordering::shared_ptr ordering = graph.orderingCOLAMD(initialEstimate); - // initial optimization state is the same in both cases tested + // Create an nonlinear Optimizer that uses a Multifrontal Solver typedef NonlinearOptimizer Optimizer; Optimizer optimizer(boost::make_shared(graph), boost::make_shared(initialEstimate), ordering, boost::make_shared(parameters)); - // choose nonlinear optimization method + // now optimize using either LM or GN methods if (useLM) return *optimizer.levenbergMarquardt().values(); else diff --git a/gtsam/nonlinear/NonlinearOptimizer-inl.h b/gtsam/nonlinear/NonlinearOptimizer-inl.h index 1a828d468..15f39df1d 100644 --- a/gtsam/nonlinear/NonlinearOptimizer-inl.h +++ b/gtsam/nonlinear/NonlinearOptimizer-inl.h @@ -130,30 +130,31 @@ namespace gtsam { // Form damped system with given lambda, and return a new, more optimistic // optimizer if error decreased or iterate with a larger lambda if not. // TODO: in theory we can't infinitely recurse, but maybe we should put a max. - /* ************************************************************************* */ - template - NonlinearOptimizer NonlinearOptimizer::try_lambda(const L& linear) { + // Reminder: the parameters are Graph type $G$, Values class type $T$, + // linear system class $L$, the non linear solver type $S$, and the writer type $W$ + template + NonlinearOptimizer NonlinearOptimizer::try_lambda(const L& linearSystem) { const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ; - double lambda = parameters_->lambda_ ; const Parameters::LambdaMode lambdaMode = parameters_->lambdaMode_ ; const double factor = parameters_->lambdaFactor_ ; + double lambda = parameters_->lambda_ ; if( lambdaMode >= Parameters::CAUTIOUS) throw runtime_error("CAUTIOUS mode not working yet, please use BOUNDED."); double next_error = error_; - shared_values next_values = values_; + // Keep increasing lambda until we make make progress while(true) { if (verbosity >= Parameters::TRYLAMBDA) cout << "trying lambda = " << lambda << endl; // add prior-factors // TODO: replace this dampening with a backsubstitution approach - typename L::shared_ptr damped(new L(linear)); + typename L::shared_ptr dampedSystem(new L(linearSystem)); { double sigma = 1.0 / sqrt(lambda); - damped->reserve(damped->size() + dimensions_->size()); + dampedSystem->reserve(dampedSystem->size() + dimensions_->size()); // for each of the variables, add a prior for(Index j=0; jsize(); ++j) { size_t dim = (*dimensions_)[j]; @@ -161,22 +162,24 @@ namespace gtsam { Vector b = zero(dim); SharedDiagonal model = noiseModel::Isotropic::Sigma(dim,sigma); typename L::sharedFactor prior(new JacobianFactor(j, A, b, model)); - damped->push_back(prior); + dampedSystem->push_back(prior); } } - if (verbosity >= Parameters::DAMPED) damped->print("damped"); + if (verbosity >= Parameters::DAMPED) dampedSystem->print("damped"); - // solve + // Create a new solver using the damped linear system // FIXME: remove spcg specific code - if (spcg_solver_) spcg_solver_->replaceFactors(damped); + if (spcg_solver_) spcg_solver_->replaceFactors(dampedSystem); shared_solver solver = (spcg_solver_) ? spcg_solver_ : shared_solver( - new S(damped, structure_, parameters_->useQR_)); + new S(dampedSystem, structure_, parameters_->useQR_)); + + // Try solving try { VectorValues delta = *solver->optimize(); if (verbosity >= Parameters::TRYDELTA) delta.print("delta"); // update values - shared_values newValues(new C(values_->expmap(delta, *ordering_))); + shared_values newValues(new T(values_->expmap(delta, *ordering_))); // create new optimization state with more adventurous lambda double error = graph_->error(*newValues); @@ -219,9 +222,10 @@ namespace gtsam { /* ************************************************************************* */ // One iteration of Levenberg Marquardt - /* ************************************************************************* */ - template - NonlinearOptimizer NonlinearOptimizer::iterateLM() { + // Reminder: the parameters are Graph type $G$, Values class type $T$, + // linear system class $L$, the non linear solver type $S$, and the writer type $W$ + template + NonlinearOptimizer NonlinearOptimizer::iterateLM() { const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ; const double lambda = parameters_->lambda_ ; @@ -243,10 +247,12 @@ namespace gtsam { } /* ************************************************************************* */ - template - NonlinearOptimizer NonlinearOptimizer::levenbergMarquardt() { + // Reminder: the parameters are Graph type $G$, Values class type $T$, + // linear system class $L$, the non linear solver type $S$, and the writer type $W$ + template + NonlinearOptimizer NonlinearOptimizer::levenbergMarquardt() { - iterations_ = 0; + // Initialize bool converged = false; const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ; @@ -262,6 +268,7 @@ namespace gtsam { if (iterations_ >= parameters_->maxIterations_) return *this; + // Iterative loop that implements Levenberg-Marquardt while (true) { double previous_error = error_; // do one iteration of LM