From d21c6e48137d1063a70338710a57e5d96890c034 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Fri, 22 Oct 2010 19:47:46 +0000 Subject: [PATCH] NonlinearOptimizer keeps the solver between iterations to prevent having to re-split the graph each iteration with SPCG --- linear/GaussianMultifrontalSolver.cpp | 14 +++++++ linear/GaussianMultifrontalSolver.h | 33 ++++++++++------ linear/GaussianSequentialSolver.cpp | 12 ++++++ linear/GaussianSequentialSolver.h | 18 ++++++++- nonlinear/NonlinearOptimizer-inl.h | 26 ++++++++---- nonlinear/NonlinearOptimizer.h | 57 ++++++++++++++------------- 6 files changed, 113 insertions(+), 47 deletions(-) diff --git a/linear/GaussianMultifrontalSolver.cpp b/linear/GaussianMultifrontalSolver.cpp index 6027cf27f..852880c44 100644 --- a/linear/GaussianMultifrontalSolver.cpp +++ b/linear/GaussianMultifrontalSolver.cpp @@ -26,6 +26,20 @@ namespace gtsam { GaussianMultifrontalSolver::GaussianMultifrontalSolver(const FactorGraph& factorGraph) : Base(factorGraph) {} +/* ************************************************************************* */ +GaussianMultifrontalSolver::shared_ptr +GaussianMultifrontalSolver::Create(const FactorGraph& factorGraph) { + return shared_ptr(new GaussianMultifrontalSolver(factorGraph)); +} + +/* ************************************************************************* */ +GaussianMultifrontalSolver::shared_ptr +GaussianMultifrontalSolver::update(const FactorGraph& factorGraph) const { + // We do not yet have code written to update the junction tree, so we just + // create a new solver. + return Create(factorGraph); +} + /* ************************************************************************* */ BayesTree::shared_ptr GaussianMultifrontalSolver::eliminate() const { return Base::eliminate(); diff --git a/linear/GaussianMultifrontalSolver.h b/linear/GaussianMultifrontalSolver.h index a8ac17179..56a21c849 100644 --- a/linear/GaussianMultifrontalSolver.h +++ b/linear/GaussianMultifrontalSolver.h @@ -29,9 +29,9 @@ namespace gtsam { -/** This solver uses sequential variable elimination to solve a - * GaussianFactorGraph, i.e. a sparse linear system. Underlying this is a - * column elimination tree (inference/EliminationTree), see Gilbert 2001 BIT. +/** This solver uses multifrontal elimination to solve a GaussianFactorGraph, + * i.e. a sparse linear system. Underlying this is a junction tree, which is + * eliminated into a Bayes tree. * * The elimination ordering is "baked in" to the variable indices at this * stage, i.e. elimination proceeds in order from '0'. A fill-reducing @@ -40,21 +40,15 @@ namespace gtsam { * existing GaussianFactorGraph into a COLAMD ordering instead, this is done * when computing marginals). * - * This is not the most efficient algorithm we provide, most efficient is the - * MultifrontalSolver, which performs Multi-frontal QR factorization. However, - * sequential variable elimination is easier to understand so this is a good - * starting point to learn about these algorithms and our implementation. - * Additionally, the first step of MFQR is symbolic sequential elimination. - * - * The EliminationTree recursively produces a BayesNet, - * typedef'ed in linear/GaussianBayesNet, on which this class calls - * optimize(...) to perform back-substitution. + * The JunctionTree recursively produces a BayesTree, + * on which this class calls optimize(...) to perform back-substitution. */ class GaussianMultifrontalSolver : GenericMultifrontalSolver { protected: typedef GenericMultifrontalSolver Base; + typedef boost::shared_ptr shared_ptr; public: @@ -64,6 +58,21 @@ public: */ GaussianMultifrontalSolver(const FactorGraph& factorGraph); + /** + * Named constructor that returns a shared_ptr. This builds the junction + * tree, which already does some of the symbolic work of elimination. + */ + static shared_ptr Create(const FactorGraph& factorGraph); + + /** + * Return a new solver that solves the given factor graph, which must have + * the *same structure* as the one this solver solves. For some solvers this + * is more efficient than constructing the solver from scratch. This can be + * used in cases where the numerical values of the linear problem change, + * e.g. during iterative nonlinear optimization. + */ + shared_ptr update(const FactorGraph& factorGraph) const; + /** * Eliminate the factor graph sequentially. Uses a column elimination tree * to recursively eliminate. diff --git a/linear/GaussianSequentialSolver.cpp b/linear/GaussianSequentialSolver.cpp index dcfba358a..b357152b0 100644 --- a/linear/GaussianSequentialSolver.cpp +++ b/linear/GaussianSequentialSolver.cpp @@ -26,6 +26,18 @@ namespace gtsam { GaussianSequentialSolver::GaussianSequentialSolver(const FactorGraph& factorGraph) : Base(factorGraph) {} +/* ************************************************************************* */ +GaussianSequentialSolver::shared_ptr GaussianSequentialSolver::Create(const FactorGraph& factorGraph) { + return shared_ptr(new GaussianSequentialSolver(factorGraph)); +} + +/* ************************************************************************* */ +GaussianSequentialSolver::shared_ptr GaussianSequentialSolver::update(const FactorGraph& factorGraph) const { + // We do not yet have code written to update the elimination tree, so we just + // create a new solver. + return Create(factorGraph); +} + /* ************************************************************************* */ GaussianBayesNet::shared_ptr GaussianSequentialSolver::eliminate() const { return Base::eliminate(); diff --git a/linear/GaussianSequentialSolver.h b/linear/GaussianSequentialSolver.h index 31065aff3..b2c3f72e6 100644 --- a/linear/GaussianSequentialSolver.h +++ b/linear/GaussianSequentialSolver.h @@ -45,7 +45,7 @@ namespace gtsam { * starting point to learn about these algorithms and our implementation. * Additionally, the first step of MFQR is symbolic sequential elimination. * - * The EliminationTree recursively produces a BayesNet, + * The EliminationTree recursively produces a BayesNet, * typedef'ed in linear/GaussianBayesNet, on which this class calls * optimize(...) to perform back-substitution. */ @@ -54,6 +54,7 @@ class GaussianSequentialSolver : GenericSequentialSolver { protected: typedef GenericSequentialSolver Base; + typedef boost::shared_ptr shared_ptr; public: @@ -63,6 +64,21 @@ public: */ GaussianSequentialSolver(const FactorGraph& factorGraph); + /** + * Named constructor to return a shared_ptr. This builds the elimination + * tree, which already does some of the symbolic work of elimination. + */ + static shared_ptr Create(const FactorGraph& factorGraph); + + /** + * Return a new solver that solves the given factor graph, which must have + * the *same structure* as the one this solver solves. For some solvers this + * is more efficient than constructing the solver from scratch. This can be + * used in cases where the numerical values of the linear problem change, + * e.g. during iterative nonlinear optimization. + */ + shared_ptr update(const FactorGraph& factorGraph) const; + /** * Eliminate the factor graph sequentially. Uses a column elimination tree * to recursively eliminate. diff --git a/nonlinear/NonlinearOptimizer-inl.h b/nonlinear/NonlinearOptimizer-inl.h index af46fe9a7..f53b74468 100644 --- a/nonlinear/NonlinearOptimizer-inl.h +++ b/nonlinear/NonlinearOptimizer-inl.h @@ -102,7 +102,14 @@ namespace gtsam { NonlinearOptimizer NonlinearOptimizer::iterate( Parameters::verbosityLevel verbosity) const { // linearize and optimize - VectorValues delta = linearizeAndOptimizeForDelta(); + + boost::shared_ptr linearized = graph_->linearize(*values_, *ordering_); + shared_solver newSolver = solver_; + if(newSolver) + newSolver = newSolver->update(*linearized); + else + newSolver.reset(new S(*linearized)); + VectorValues delta = *newSolver->optimize(); // maybe show output if (verbosity >= Parameters::DELTA) @@ -115,7 +122,7 @@ namespace gtsam { if (verbosity >= Parameters::VALUES) newValues->print("newValues"); - NonlinearOptimizer newOptimizer = newValues_(newValues); + NonlinearOptimizer newOptimizer = newValuesSolver_(newValues, newSolver); if (verbosity >= Parameters::ERROR) cout << "error: " << newOptimizer.error_ << endl; @@ -177,7 +184,12 @@ namespace gtsam { damped.print("damped"); // solve - VectorValues delta = *S(damped).optimize(); + shared_solver newSolver = solver_; + if(newSolver) + newSolver = newSolver->update(damped); + else + newSolver.reset(new S(damped)); + VectorValues delta = *newSolver->optimize(); if (verbosity >= Parameters::TRYDELTA) delta.print("delta"); @@ -187,7 +199,7 @@ namespace gtsam { // newValues->print("values"); // create new optimization state with more adventurous lambda - NonlinearOptimizer next(newValuesNewLambda_(newValues, lambda_ / factor)); + NonlinearOptimizer next(newValuesSolverLambda_(newValues, newSolver, lambda_ / factor)); if (verbosity >= Parameters::TRYLAMBDA) cout << "next error = " << next.error_ << endl; if(lambdaMode >= Parameters::CAUTIOUS) { @@ -199,7 +211,7 @@ namespace gtsam { // If we're cautious, see if the current lambda is better // todo: include stopping criterion here? if(lambdaMode == Parameters::CAUTIOUS) { - NonlinearOptimizer sameLambda(newValues_(newValues)); + NonlinearOptimizer sameLambda(newValuesSolver_(newValues, newSolver)); if(sameLambda.error_ <= next.error_) return sameLambda; } @@ -212,7 +224,7 @@ namespace gtsam { // A more adventerous lambda was worse. If we're cautious, try the same lambda. if(lambdaMode == Parameters::CAUTIOUS) { - NonlinearOptimizer sameLambda(newValues_(newValues)); + NonlinearOptimizer sameLambda(newValuesSolver_(newValues, newSolver)); if(sameLambda.error_ <= error_) return sameLambda; } @@ -223,7 +235,7 @@ namespace gtsam { // TODO: can we avoid copying the values ? if(lambdaMode >= Parameters::BOUNDED && lambda_ >= 1.0e5) { - return NonlinearOptimizer(newValues_(newValues)); + return NonlinearOptimizer(newValuesSolver_(newValues, newSolver)); } else { NonlinearOptimizer cautious(newLambda_(lambda_ * factor)); return cautious.try_lambda(linear, verbosity, factor, lambdaMode); diff --git a/nonlinear/NonlinearOptimizer.h b/nonlinear/NonlinearOptimizer.h index feeecc831..4df619378 100644 --- a/nonlinear/NonlinearOptimizer.h +++ b/nonlinear/NonlinearOptimizer.h @@ -37,7 +37,7 @@ namespace gtsam { /** * The class NonlinearOptimizer encapsulates an optimization state. - * Typically it is instantiated with a NonlinearFactorGraph and an initial config + * Typically it is instantiated with a NonlinearFactorGraph and an initial values * and then one of the optimization routines is called. These recursively iterate * until convergence. All methods are functional and return a new state. * @@ -67,10 +67,11 @@ namespace gtsam { class NonlinearOptimizer { public: - // For performance reasons in recursion, we store configs in a shared_ptr + // For performance reasons in recursion, we store values in a shared_ptr typedef boost::shared_ptr shared_values; typedef boost::shared_ptr shared_graph; typedef boost::shared_ptr shared_ordering; + typedef boost::shared_ptr shared_solver; typedef NonlinearOptimizationParameters Parameters; private: @@ -87,9 +88,11 @@ namespace gtsam { const shared_values values_; const double error_; + // the variable ordering const shared_ordering ordering_; + // the linear system solver - //const shared_solver solver_; + const shared_solver solver_; // keep current lambda for use within LM only // TODO: red flag, should we have an LM class ? @@ -105,26 +108,26 @@ namespace gtsam { /** * Constructor that does not do any computation */ - NonlinearOptimizer(shared_graph graph, shared_values config, shared_ordering ordering, - const double error, const double lambda, shared_dimensions dimensions): graph_(graph), values_(config), - error_(error), ordering_(ordering), lambda_(lambda), dimensions_(dimensions) {} + NonlinearOptimizer(shared_graph graph, shared_values values, const double error, shared_ordering ordering, + shared_solver solver, const double lambda, shared_dimensions dimensions): graph_(graph), values_(values), + error_(error), ordering_(ordering), solver_(solver), lambda_(lambda), dimensions_(dimensions) {} /** Create a new NonlinearOptimizer with a different lambda */ This newLambda_(double newLambda) const { - return NonlinearOptimizer(graph_, values_, ordering_, error_, newLambda, dimensions_); } + return NonlinearOptimizer(graph_, values_, error_, ordering_, solver_, newLambda, dimensions_); } - This newValues_(shared_values newValues) const { - return NonlinearOptimizer(graph_, newValues, ordering_, graph_->error(*newValues), lambda_, dimensions_); } + This newValuesSolver_(shared_values newValues, shared_solver newSolver) const { + return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), ordering_, newSolver, lambda_, dimensions_); } - This newValuesNewLambda_(shared_values newValues, double newLambda) const { - return NonlinearOptimizer(graph_, newValues, ordering_, graph_->error(*newValues), newLambda, dimensions_); } + This newValuesSolverLambda_(shared_values newValues, shared_solver newSolver, double newLambda) const { + return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), ordering_, newSolver, newLambda, dimensions_); } public: /** * Constructor that evaluates new error */ - NonlinearOptimizer(shared_graph graph, shared_values config, shared_ordering ordering, + NonlinearOptimizer(shared_graph graph, shared_values values, shared_ordering ordering, const double lambda = 1e-5); /** @@ -132,7 +135,7 @@ namespace gtsam { */ NonlinearOptimizer(const NonlinearOptimizer &optimizer) : graph_(optimizer.graph_), values_(optimizer.values_), error_(optimizer.error_), - ordering_(optimizer.ordering_), lambda_(optimizer.lambda_), dimensions_(optimizer.dimensions_) {} + ordering_(optimizer.ordering_), solver_(optimizer.solver_), lambda_(optimizer.lambda_), dimensions_(optimizer.dimensions_) {} /** * Return current error @@ -208,21 +211,21 @@ namespace gtsam { /** * Static interface to LM optimization using default ordering and thresholds * @param graph Nonlinear factor graph to optimize - * @param config Initial config + * @param values Initial values * @param verbosity Integer specifying how much output to provide * @return an optimized values structure */ - static shared_values optimizeLM(shared_graph graph, shared_values config, + static shared_values optimizeLM(shared_graph graph, shared_values values, Parameters::verbosityLevel verbosity = Parameters::SILENT) { // Use a variable ordering from COLAMD - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*config); + Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values); double relativeThreshold = 1e-5, absoluteThreshold = 1e-5; // initial optimization state is the same in both cases tested - GS solver(*graph->linearize(*config, *ordering)); - NonlinearOptimizer optimizer(graph, config, ordering); + GS solver(*graph->linearize(*values, *ordering)); + NonlinearOptimizer optimizer(graph, values, ordering); // Levenberg-Marquardt NonlinearOptimizer result = optimizer.levenbergMarquardt(relativeThreshold, @@ -233,27 +236,27 @@ namespace gtsam { /** * Static interface to LM optimization (no shared_ptr arguments) - see above */ - inline static shared_values optimizeLM(const G& graph, const T& config, + inline static shared_values optimizeLM(const G& graph, const T& values, Parameters::verbosityLevel verbosity = Parameters::SILENT) { return optimizeLM(boost::make_shared(graph), - boost::make_shared(config), verbosity); + boost::make_shared(values), verbosity); } /** * Static interface to GN optimization using default ordering and thresholds * @param graph Nonlinear factor graph to optimize - * @param config Initial config + * @param values Initial values * @param verbosity Integer specifying how much output to provide * @return an optimized values structure */ - static shared_values optimizeGN(shared_graph graph, shared_values config, + static shared_values optimizeGN(shared_graph graph, shared_values values, Parameters::verbosityLevel verbosity = Parameters::SILENT) { - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*config); + Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values); double relativeThreshold = 1e-5, absoluteThreshold = 1e-5; // initial optimization state is the same in both cases tested - GS solver(*graph->linearize(*config, *ordering)); - NonlinearOptimizer optimizer(graph, config, ordering); + GS solver(*graph->linearize(*values, *ordering)); + NonlinearOptimizer optimizer(graph, values, ordering); // Gauss-Newton NonlinearOptimizer result = optimizer.gaussNewton(relativeThreshold, @@ -264,10 +267,10 @@ namespace gtsam { /** * Static interface to GN optimization (no shared_ptr arguments) - see above */ - inline static shared_values optimizeGN(const G& graph, const T& config, + inline static shared_values optimizeGN(const G& graph, const T& values, Parameters::verbosityLevel verbosity = Parameters::SILENT) { return optimizeGN(boost::make_shared(graph), - boost::make_shared(config), verbosity); + boost::make_shared(values), verbosity); } };