NonlinearOptimizer keeps the solver between iterations to prevent having to re-split the graph each iteration with SPCG
parent
69c6d05ce1
commit
d21c6e4813
|
@ -26,6 +26,20 @@ namespace gtsam {
|
|||
GaussianMultifrontalSolver::GaussianMultifrontalSolver(const FactorGraph<GaussianFactor>& factorGraph) :
|
||||
Base(factorGraph) {}
|
||||
|
||||
/* ************************************************************************* */
|
||||
GaussianMultifrontalSolver::shared_ptr
|
||||
GaussianMultifrontalSolver::Create(const FactorGraph<GaussianFactor>& factorGraph) {
|
||||
return shared_ptr(new GaussianMultifrontalSolver(factorGraph));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
GaussianMultifrontalSolver::shared_ptr
|
||||
GaussianMultifrontalSolver::update(const FactorGraph<GaussianFactor>& factorGraph) const {
|
||||
// We do not yet have code written to update the junction tree, so we just
|
||||
// create a new solver.
|
||||
return Create(factorGraph);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
BayesTree<GaussianConditional>::shared_ptr GaussianMultifrontalSolver::eliminate() const {
|
||||
return Base::eliminate();
|
||||
|
|
|
@ -29,9 +29,9 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
/** This solver uses sequential variable elimination to solve a
|
||||
* GaussianFactorGraph, i.e. a sparse linear system. Underlying this is a
|
||||
* column elimination tree (inference/EliminationTree), see Gilbert 2001 BIT.
|
||||
/** This solver uses multifrontal elimination to solve a GaussianFactorGraph,
|
||||
* i.e. a sparse linear system. Underlying this is a junction tree, which is
|
||||
* eliminated into a Bayes tree.
|
||||
*
|
||||
* The elimination ordering is "baked in" to the variable indices at this
|
||||
* stage, i.e. elimination proceeds in order from '0'. A fill-reducing
|
||||
|
@ -40,21 +40,15 @@ namespace gtsam {
|
|||
* existing GaussianFactorGraph into a COLAMD ordering instead, this is done
|
||||
* when computing marginals).
|
||||
*
|
||||
* This is not the most efficient algorithm we provide, most efficient is the
|
||||
* MultifrontalSolver, which performs Multi-frontal QR factorization. However,
|
||||
* sequential variable elimination is easier to understand so this is a good
|
||||
* starting point to learn about these algorithms and our implementation.
|
||||
* Additionally, the first step of MFQR is symbolic sequential elimination.
|
||||
*
|
||||
* The EliminationTree recursively produces a BayesNet<GaussianFactor>,
|
||||
* typedef'ed in linear/GaussianBayesNet, on which this class calls
|
||||
* optimize(...) to perform back-substitution.
|
||||
* The JunctionTree recursively produces a BayesTree<GaussianConditional>,
|
||||
* on which this class calls optimize(...) to perform back-substitution.
|
||||
*/
|
||||
class GaussianMultifrontalSolver : GenericMultifrontalSolver<GaussianFactor, GaussianJunctionTree> {
|
||||
|
||||
protected:
|
||||
|
||||
typedef GenericMultifrontalSolver<GaussianFactor, GaussianJunctionTree> Base;
|
||||
typedef boost::shared_ptr<const GaussianMultifrontalSolver> shared_ptr;
|
||||
|
||||
public:
|
||||
|
||||
|
@ -64,6 +58,21 @@ public:
|
|||
*/
|
||||
GaussianMultifrontalSolver(const FactorGraph<GaussianFactor>& factorGraph);
|
||||
|
||||
/**
|
||||
* Named constructor that returns a shared_ptr. This builds the junction
|
||||
* tree, which already does some of the symbolic work of elimination.
|
||||
*/
|
||||
static shared_ptr Create(const FactorGraph<GaussianFactor>& factorGraph);
|
||||
|
||||
/**
|
||||
* Return a new solver that solves the given factor graph, which must have
|
||||
* the *same structure* as the one this solver solves. For some solvers this
|
||||
* is more efficient than constructing the solver from scratch. This can be
|
||||
* used in cases where the numerical values of the linear problem change,
|
||||
* e.g. during iterative nonlinear optimization.
|
||||
*/
|
||||
shared_ptr update(const FactorGraph<GaussianFactor>& factorGraph) const;
|
||||
|
||||
/**
|
||||
* Eliminate the factor graph sequentially. Uses a column elimination tree
|
||||
* to recursively eliminate.
|
||||
|
|
|
@ -26,6 +26,18 @@ namespace gtsam {
|
|||
GaussianSequentialSolver::GaussianSequentialSolver(const FactorGraph<GaussianFactor>& factorGraph) :
|
||||
Base(factorGraph) {}
|
||||
|
||||
/* ************************************************************************* */
|
||||
GaussianSequentialSolver::shared_ptr GaussianSequentialSolver::Create(const FactorGraph<GaussianFactor>& factorGraph) {
|
||||
return shared_ptr(new GaussianSequentialSolver(factorGraph));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
GaussianSequentialSolver::shared_ptr GaussianSequentialSolver::update(const FactorGraph<GaussianFactor>& factorGraph) const {
|
||||
// We do not yet have code written to update the elimination tree, so we just
|
||||
// create a new solver.
|
||||
return Create(factorGraph);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
GaussianBayesNet::shared_ptr GaussianSequentialSolver::eliminate() const {
|
||||
return Base::eliminate();
|
||||
|
|
|
@ -45,7 +45,7 @@ namespace gtsam {
|
|||
* starting point to learn about these algorithms and our implementation.
|
||||
* Additionally, the first step of MFQR is symbolic sequential elimination.
|
||||
*
|
||||
* The EliminationTree recursively produces a BayesNet<GaussianFactor>,
|
||||
* The EliminationTree recursively produces a BayesNet<GaussianConditional>,
|
||||
* typedef'ed in linear/GaussianBayesNet, on which this class calls
|
||||
* optimize(...) to perform back-substitution.
|
||||
*/
|
||||
|
@ -54,6 +54,7 @@ class GaussianSequentialSolver : GenericSequentialSolver<GaussianFactor> {
|
|||
protected:
|
||||
|
||||
typedef GenericSequentialSolver<GaussianFactor> Base;
|
||||
typedef boost::shared_ptr<const GaussianSequentialSolver> shared_ptr;
|
||||
|
||||
public:
|
||||
|
||||
|
@ -63,6 +64,21 @@ public:
|
|||
*/
|
||||
GaussianSequentialSolver(const FactorGraph<GaussianFactor>& factorGraph);
|
||||
|
||||
/**
|
||||
* Named constructor to return a shared_ptr. This builds the elimination
|
||||
* tree, which already does some of the symbolic work of elimination.
|
||||
*/
|
||||
static shared_ptr Create(const FactorGraph<GaussianFactor>& factorGraph);
|
||||
|
||||
/**
|
||||
* Return a new solver that solves the given factor graph, which must have
|
||||
* the *same structure* as the one this solver solves. For some solvers this
|
||||
* is more efficient than constructing the solver from scratch. This can be
|
||||
* used in cases where the numerical values of the linear problem change,
|
||||
* e.g. during iterative nonlinear optimization.
|
||||
*/
|
||||
shared_ptr update(const FactorGraph<GaussianFactor>& factorGraph) const;
|
||||
|
||||
/**
|
||||
* Eliminate the factor graph sequentially. Uses a column elimination tree
|
||||
* to recursively eliminate.
|
||||
|
|
|
@ -102,7 +102,14 @@ namespace gtsam {
|
|||
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterate(
|
||||
Parameters::verbosityLevel verbosity) const {
|
||||
// linearize and optimize
|
||||
VectorValues delta = linearizeAndOptimizeForDelta();
|
||||
|
||||
boost::shared_ptr<L> linearized = graph_->linearize(*values_, *ordering_);
|
||||
shared_solver newSolver = solver_;
|
||||
if(newSolver)
|
||||
newSolver = newSolver->update(*linearized);
|
||||
else
|
||||
newSolver.reset(new S(*linearized));
|
||||
VectorValues delta = *newSolver->optimize();
|
||||
|
||||
// maybe show output
|
||||
if (verbosity >= Parameters::DELTA)
|
||||
|
@ -115,7 +122,7 @@ namespace gtsam {
|
|||
if (verbosity >= Parameters::VALUES)
|
||||
newValues->print("newValues");
|
||||
|
||||
NonlinearOptimizer newOptimizer = newValues_(newValues);
|
||||
NonlinearOptimizer newOptimizer = newValuesSolver_(newValues, newSolver);
|
||||
|
||||
if (verbosity >= Parameters::ERROR)
|
||||
cout << "error: " << newOptimizer.error_ << endl;
|
||||
|
@ -177,7 +184,12 @@ namespace gtsam {
|
|||
damped.print("damped");
|
||||
|
||||
// solve
|
||||
VectorValues delta = *S(damped).optimize();
|
||||
shared_solver newSolver = solver_;
|
||||
if(newSolver)
|
||||
newSolver = newSolver->update(damped);
|
||||
else
|
||||
newSolver.reset(new S(damped));
|
||||
VectorValues delta = *newSolver->optimize();
|
||||
if (verbosity >= Parameters::TRYDELTA)
|
||||
delta.print("delta");
|
||||
|
||||
|
@ -187,7 +199,7 @@ namespace gtsam {
|
|||
// newValues->print("values");
|
||||
|
||||
// create new optimization state with more adventurous lambda
|
||||
NonlinearOptimizer next(newValuesNewLambda_(newValues, lambda_ / factor));
|
||||
NonlinearOptimizer next(newValuesSolverLambda_(newValues, newSolver, lambda_ / factor));
|
||||
if (verbosity >= Parameters::TRYLAMBDA) cout << "next error = " << next.error_ << endl;
|
||||
|
||||
if(lambdaMode >= Parameters::CAUTIOUS) {
|
||||
|
@ -199,7 +211,7 @@ namespace gtsam {
|
|||
// If we're cautious, see if the current lambda is better
|
||||
// todo: include stopping criterion here?
|
||||
if(lambdaMode == Parameters::CAUTIOUS) {
|
||||
NonlinearOptimizer sameLambda(newValues_(newValues));
|
||||
NonlinearOptimizer sameLambda(newValuesSolver_(newValues, newSolver));
|
||||
if(sameLambda.error_ <= next.error_)
|
||||
return sameLambda;
|
||||
}
|
||||
|
@ -212,7 +224,7 @@ namespace gtsam {
|
|||
|
||||
// A more adventerous lambda was worse. If we're cautious, try the same lambda.
|
||||
if(lambdaMode == Parameters::CAUTIOUS) {
|
||||
NonlinearOptimizer sameLambda(newValues_(newValues));
|
||||
NonlinearOptimizer sameLambda(newValuesSolver_(newValues, newSolver));
|
||||
if(sameLambda.error_ <= error_)
|
||||
return sameLambda;
|
||||
}
|
||||
|
@ -223,7 +235,7 @@ namespace gtsam {
|
|||
|
||||
// TODO: can we avoid copying the values ?
|
||||
if(lambdaMode >= Parameters::BOUNDED && lambda_ >= 1.0e5) {
|
||||
return NonlinearOptimizer(newValues_(newValues));
|
||||
return NonlinearOptimizer(newValuesSolver_(newValues, newSolver));
|
||||
} else {
|
||||
NonlinearOptimizer cautious(newLambda_(lambda_ * factor));
|
||||
return cautious.try_lambda(linear, verbosity, factor, lambdaMode);
|
||||
|
|
|
@ -37,7 +37,7 @@ namespace gtsam {
|
|||
|
||||
/**
|
||||
* The class NonlinearOptimizer encapsulates an optimization state.
|
||||
* Typically it is instantiated with a NonlinearFactorGraph and an initial config
|
||||
* Typically it is instantiated with a NonlinearFactorGraph and an initial values
|
||||
* and then one of the optimization routines is called. These recursively iterate
|
||||
* until convergence. All methods are functional and return a new state.
|
||||
*
|
||||
|
@ -67,10 +67,11 @@ namespace gtsam {
|
|||
class NonlinearOptimizer {
|
||||
public:
|
||||
|
||||
// For performance reasons in recursion, we store configs in a shared_ptr
|
||||
// For performance reasons in recursion, we store values in a shared_ptr
|
||||
typedef boost::shared_ptr<const T> shared_values;
|
||||
typedef boost::shared_ptr<const G> shared_graph;
|
||||
typedef boost::shared_ptr<const Ordering> shared_ordering;
|
||||
typedef boost::shared_ptr<const GS> shared_solver;
|
||||
typedef NonlinearOptimizationParameters Parameters;
|
||||
|
||||
private:
|
||||
|
@ -87,9 +88,11 @@ namespace gtsam {
|
|||
const shared_values values_;
|
||||
const double error_;
|
||||
|
||||
// the variable ordering
|
||||
const shared_ordering ordering_;
|
||||
|
||||
// the linear system solver
|
||||
//const shared_solver solver_;
|
||||
const shared_solver solver_;
|
||||
|
||||
// keep current lambda for use within LM only
|
||||
// TODO: red flag, should we have an LM class ?
|
||||
|
@ -105,26 +108,26 @@ namespace gtsam {
|
|||
/**
|
||||
* Constructor that does not do any computation
|
||||
*/
|
||||
NonlinearOptimizer(shared_graph graph, shared_values config, shared_ordering ordering,
|
||||
const double error, const double lambda, shared_dimensions dimensions): graph_(graph), values_(config),
|
||||
error_(error), ordering_(ordering), lambda_(lambda), dimensions_(dimensions) {}
|
||||
NonlinearOptimizer(shared_graph graph, shared_values values, const double error, shared_ordering ordering,
|
||||
shared_solver solver, const double lambda, shared_dimensions dimensions): graph_(graph), values_(values),
|
||||
error_(error), ordering_(ordering), solver_(solver), lambda_(lambda), dimensions_(dimensions) {}
|
||||
|
||||
/** Create a new NonlinearOptimizer with a different lambda */
|
||||
This newLambda_(double newLambda) const {
|
||||
return NonlinearOptimizer(graph_, values_, ordering_, error_, newLambda, dimensions_); }
|
||||
return NonlinearOptimizer(graph_, values_, error_, ordering_, solver_, newLambda, dimensions_); }
|
||||
|
||||
This newValues_(shared_values newValues) const {
|
||||
return NonlinearOptimizer(graph_, newValues, ordering_, graph_->error(*newValues), lambda_, dimensions_); }
|
||||
This newValuesSolver_(shared_values newValues, shared_solver newSolver) const {
|
||||
return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), ordering_, newSolver, lambda_, dimensions_); }
|
||||
|
||||
This newValuesNewLambda_(shared_values newValues, double newLambda) const {
|
||||
return NonlinearOptimizer(graph_, newValues, ordering_, graph_->error(*newValues), newLambda, dimensions_); }
|
||||
This newValuesSolverLambda_(shared_values newValues, shared_solver newSolver, double newLambda) const {
|
||||
return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), ordering_, newSolver, newLambda, dimensions_); }
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Constructor that evaluates new error
|
||||
*/
|
||||
NonlinearOptimizer(shared_graph graph, shared_values config, shared_ordering ordering,
|
||||
NonlinearOptimizer(shared_graph graph, shared_values values, shared_ordering ordering,
|
||||
const double lambda = 1e-5);
|
||||
|
||||
/**
|
||||
|
@ -132,7 +135,7 @@ namespace gtsam {
|
|||
*/
|
||||
NonlinearOptimizer(const NonlinearOptimizer<G, T, L, GS> &optimizer) :
|
||||
graph_(optimizer.graph_), values_(optimizer.values_), error_(optimizer.error_),
|
||||
ordering_(optimizer.ordering_), lambda_(optimizer.lambda_), dimensions_(optimizer.dimensions_) {}
|
||||
ordering_(optimizer.ordering_), solver_(optimizer.solver_), lambda_(optimizer.lambda_), dimensions_(optimizer.dimensions_) {}
|
||||
|
||||
/**
|
||||
* Return current error
|
||||
|
@ -208,21 +211,21 @@ namespace gtsam {
|
|||
/**
|
||||
* Static interface to LM optimization using default ordering and thresholds
|
||||
* @param graph Nonlinear factor graph to optimize
|
||||
* @param config Initial config
|
||||
* @param values Initial values
|
||||
* @param verbosity Integer specifying how much output to provide
|
||||
* @return an optimized values structure
|
||||
*/
|
||||
static shared_values optimizeLM(shared_graph graph, shared_values config,
|
||||
static shared_values optimizeLM(shared_graph graph, shared_values values,
|
||||
Parameters::verbosityLevel verbosity = Parameters::SILENT) {
|
||||
|
||||
// Use a variable ordering from COLAMD
|
||||
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*config);
|
||||
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values);
|
||||
|
||||
double relativeThreshold = 1e-5, absoluteThreshold = 1e-5;
|
||||
|
||||
// initial optimization state is the same in both cases tested
|
||||
GS solver(*graph->linearize(*config, *ordering));
|
||||
NonlinearOptimizer optimizer(graph, config, ordering);
|
||||
GS solver(*graph->linearize(*values, *ordering));
|
||||
NonlinearOptimizer optimizer(graph, values, ordering);
|
||||
|
||||
// Levenberg-Marquardt
|
||||
NonlinearOptimizer result = optimizer.levenbergMarquardt(relativeThreshold,
|
||||
|
@ -233,27 +236,27 @@ namespace gtsam {
|
|||
/**
|
||||
* Static interface to LM optimization (no shared_ptr arguments) - see above
|
||||
*/
|
||||
inline static shared_values optimizeLM(const G& graph, const T& config,
|
||||
inline static shared_values optimizeLM(const G& graph, const T& values,
|
||||
Parameters::verbosityLevel verbosity = Parameters::SILENT) {
|
||||
return optimizeLM(boost::make_shared<const G>(graph),
|
||||
boost::make_shared<const T>(config), verbosity);
|
||||
boost::make_shared<const T>(values), verbosity);
|
||||
}
|
||||
|
||||
/**
|
||||
* Static interface to GN optimization using default ordering and thresholds
|
||||
* @param graph Nonlinear factor graph to optimize
|
||||
* @param config Initial config
|
||||
* @param values Initial values
|
||||
* @param verbosity Integer specifying how much output to provide
|
||||
* @return an optimized values structure
|
||||
*/
|
||||
static shared_values optimizeGN(shared_graph graph, shared_values config,
|
||||
static shared_values optimizeGN(shared_graph graph, shared_values values,
|
||||
Parameters::verbosityLevel verbosity = Parameters::SILENT) {
|
||||
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*config);
|
||||
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values);
|
||||
double relativeThreshold = 1e-5, absoluteThreshold = 1e-5;
|
||||
|
||||
// initial optimization state is the same in both cases tested
|
||||
GS solver(*graph->linearize(*config, *ordering));
|
||||
NonlinearOptimizer optimizer(graph, config, ordering);
|
||||
GS solver(*graph->linearize(*values, *ordering));
|
||||
NonlinearOptimizer optimizer(graph, values, ordering);
|
||||
|
||||
// Gauss-Newton
|
||||
NonlinearOptimizer result = optimizer.gaussNewton(relativeThreshold,
|
||||
|
@ -264,10 +267,10 @@ namespace gtsam {
|
|||
/**
|
||||
* Static interface to GN optimization (no shared_ptr arguments) - see above
|
||||
*/
|
||||
inline static shared_values optimizeGN(const G& graph, const T& config,
|
||||
inline static shared_values optimizeGN(const G& graph, const T& values,
|
||||
Parameters::verbosityLevel verbosity = Parameters::SILENT) {
|
||||
return optimizeGN(boost::make_shared<const G>(graph),
|
||||
boost::make_shared<const T>(config), verbosity);
|
||||
boost::make_shared<const T>(values), verbosity);
|
||||
}
|
||||
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue