BIG: replaced optimize in NonlinearFactorGraph with specialized NonlinearOptimizer object. This does away with the artificial ErrorVectorConfig and the like as NonlinearOptimizer is templated and can use "exmap", the exponential map defined for any differentiable manifold.

release/4.3a0
Frank Dellaert 2009-09-09 04:43:04 +00:00
parent ff12101945
commit ead3d03866
15 changed files with 751 additions and 544 deletions

View File

@ -366,6 +366,21 @@
<useDefaultCommand>true</useDefaultCommand> <useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders> <runAllBuilders>true</runAllBuilders>
</target> </target>
<target name="testNonlinearOptimizer.run" path="cpp" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildTarget>testNonlinearOptimizer.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testLinearFactor.run" path="cpp" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments></buildArguments>
<buildTarget>testLinearFactor.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="install" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder"> <target name="install" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand> <buildCommand>make</buildCommand>
<buildTarget>install</buildTarget> <buildTarget>install</buildTarget>

View File

@ -9,13 +9,11 @@
#include <boost/foreach.hpp> #include <boost/foreach.hpp>
#include <boost/tuple/tuple.hpp> #include <boost/tuple/tuple.hpp>
#include "FGConfig.h" #include "FGConfig.h"
#include "Value.h"
using namespace std;
// trick from some reading group // trick from some reading group
#define FOREACH_PAIR( KEY, VAL, COL) BOOST_FOREACH (boost::tie(KEY,VAL),COL) #define FOREACH_PAIR( KEY, VAL, COL) BOOST_FOREACH (boost::tie(KEY,VAL),COL)
using namespace std;
using namespace gtsam; using namespace gtsam;
/* ************************************************************************* */ /* ************************************************************************* */
@ -29,23 +27,17 @@ void check_size(const string& key, const Vector & vj, const Vector & dj) {
} }
/* ************************************************************************* */ /* ************************************************************************* */
void FGConfig::operator+=(const FGConfig & delta) FGConfig FGConfig::exmap(const FGConfig & delta) const
{ {
for (iterator it = values.begin(); it!=values.end(); it++) { FGConfig newConfig;
for (const_iterator it = values.begin(); it!=values.end(); it++) {
string j = it->first; string j = it->first;
Vector &vj = it->second; const Vector &vj = it->second;
const Vector& dj = delta[j]; const Vector& dj = delta[j];
check_size(j,vj,dj); check_size(j,vj,dj);
vj += dj; newConfig.insert(j, vj + dj);
} }
} return newConfig;
/* ************************************************************************* */
FGConfig FGConfig::operator+(const FGConfig & delta) const
{
FGConfig result = *this;
result += delta;
return result;
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -12,12 +12,13 @@
#include <map> #include <map>
#include <boost/serialization/map.hpp> #include <boost/serialization/map.hpp>
#include "Value.h" #include "Testable.h"
#include "Vector.h"
namespace gtsam { namespace gtsam {
/** Factor Graph Configuration */ /** Factor Graph Configuration */
class FGConfig { class FGConfig : public Testable<FGConfig> {
protected: protected:
/** Map from string indices to values */ /** Map from string indices to values */
@ -27,10 +28,10 @@ namespace gtsam {
typedef std::map<std::string, Vector>::iterator iterator; typedef std::map<std::string, Vector>::iterator iterator;
typedef std::map<std::string, Vector>::const_iterator const_iterator; typedef std::map<std::string, Vector>::const_iterator const_iterator;
FGConfig() {}; FGConfig():Testable<FGConfig>() {}
FGConfig(const FGConfig& cfg_in) : values(cfg_in.values){}; FGConfig(const FGConfig& cfg_in) : Testable<FGConfig>(), values(cfg_in.values) {}
virtual ~FGConfig() {}; virtual ~FGConfig() {}
/** return all the nodes in the graph **/ /** return all the nodes in the graph **/
std::vector<std::string> get_names() const { std::vector<std::string> get_names() const {
@ -47,10 +48,10 @@ namespace gtsam {
} }
/** /**
* add a delta config, should apply exponential map more generally * Add a delta config, needed for use in NonlinearOptimizer
* For FGConfig, this is just addition.
*/ */
virtual void operator+=(const FGConfig & delta); FGConfig exmap(const FGConfig & delta) const;
virtual FGConfig operator+(const FGConfig & delta) const;
const_iterator begin() const {return values.begin();} const_iterator begin() const {return values.begin();}
const_iterator end() const {return values.end();} const_iterator end() const {return values.end();}
@ -77,7 +78,7 @@ namespace gtsam {
void print(const std::string& name = "") const; void print(const std::string& name = "") const;
/** equals, for unit testing */ /** equals, for unit testing */
bool equals(const FGConfig& expected, double tol=1e-6) const; bool equals(const FGConfig& expected, double tol=1e-9) const;
void clear() {values.clear();} void clear() {values.clear();}

View File

@ -80,14 +80,18 @@ timeLinearFactor: CXXFLAGS += -I /opt/local/include
timeLinearFactor: LDFLAGS += -L.libs -lgtsam timeLinearFactor: LDFLAGS += -L.libs -lgtsam
# graphs # graphs
sources += ConstrainedChordalBayesNet.cpp ChordalBayesNet.cpp FactorGraph.cpp LinearFactorGraph.cpp NonlinearFactorGraph.cpp sources += ConstrainedChordalBayesNet.cpp ChordalBayesNet.cpp FactorGraph.cpp
sources += LinearFactorGraph.cpp NonlinearFactorGraph.cpp
sources += ConstrainedNonlinearFactorGraph.cpp ConstrainedLinearFactorGraph.cpp sources += ConstrainedNonlinearFactorGraph.cpp ConstrainedLinearFactorGraph.cpp
check_PROGRAMS += testChordalBayesNet testConstrainedChordalBayesNet testFactorgraph testLinearFactorGraph testNonlinearFactorGraph testConstrainedNonlinearFactorGraph testConstrainedLinearFactorGraph check_PROGRAMS += testChordalBayesNet testConstrainedChordalBayesNet testFactorgraph
check_PROGRAMS += testLinearFactorGraph testNonlinearFactorGraph testNonlinearOptimizer
check_PROGRAMS += testConstrainedNonlinearFactorGraph testConstrainedLinearFactorGraph
testChordalBayesNet_SOURCES = $(example) testChordalBayesNet.cpp testChordalBayesNet_SOURCES = $(example) testChordalBayesNet.cpp
testConstrainedChordalBayesNet_SOURCES = $(example) testConstrainedChordalBayesNet.cpp testConstrainedChordalBayesNet_SOURCES = $(example) testConstrainedChordalBayesNet.cpp
testFactorgraph_SOURCES = testFactorgraph.cpp testFactorgraph_SOURCES = testFactorgraph.cpp
testLinearFactorGraph_SOURCES = $(example) testLinearFactorGraph.cpp testLinearFactorGraph_SOURCES = $(example) testLinearFactorGraph.cpp
testNonlinearFactorGraph_SOURCES = $(example) testNonlinearFactorGraph.cpp testNonlinearFactorGraph_SOURCES = $(example) testNonlinearFactorGraph.cpp
testNonlinearOptimizer_SOURCES = $(example) testNonlinearOptimizer.cpp
testConstrainedNonlinearFactorGraph_SOURCES = $(example) testConstrainedNonlinearFactorGraph.cpp testConstrainedNonlinearFactorGraph_SOURCES = $(example) testConstrainedNonlinearFactorGraph.cpp
testConstrainedLinearFactorGraph_SOURCES = $(example) testConstrainedLinearFactorGraph.cpp testConstrainedLinearFactorGraph_SOURCES = $(example) testConstrainedLinearFactorGraph.cpp
@ -95,6 +99,7 @@ testChordalBayesNet_LDADD = libgtsam.la
testFactorgraph_LDADD = libgtsam.la testFactorgraph_LDADD = libgtsam.la
testLinearFactorGraph_LDADD = libgtsam.la testLinearFactorGraph_LDADD = libgtsam.la
testNonlinearFactorGraph_LDADD = libgtsam.la testNonlinearFactorGraph_LDADD = libgtsam.la
testNonlinearOptimizer_LDADD = libgtsam.la
testConstrainedNonlinearFactorGraph_LDADD = libgtsam.la testConstrainedNonlinearFactorGraph_LDADD = libgtsam.la
testConstrainedLinearFactorGraph_LDADD = libgtsam.la testConstrainedLinearFactorGraph_LDADD = libgtsam.la
testConstrainedChordalBayesNet_LDADD = libgtsam.la testConstrainedChordalBayesNet_LDADD = libgtsam.la
@ -140,7 +145,10 @@ testVSLAMFactor_LDADD = libgtsam.la
# The header files will be installed in ~/include/gtsam # The header files will be installed in ~/include/gtsam
headers = gtsam.h Value.h Factor.h LinearFactorSet.h Point2Prior.h Simulated2DOdometry.h Simulated2DMeasurement.h smallExample.h $(sources:.cpp=.h) headers = gtsam.h Value.h Testable.h Factor.h LinearFactorSet.h Point2Prior.h
headers += Simulated2DOdometry.h Simulated2DMeasurement.h smallExample.h
headers += NonlinearOptimizer.h NonlinearOptimizer.cpp # template !
headers += $(sources:.cpp=.h)
# create both dynamic and static libraries # create both dynamic and static libraries
AM_CXXFLAGS = -I$(boost) -fPIC AM_CXXFLAGS = -I$(boost) -fPIC

View File

@ -16,240 +16,74 @@
using namespace std; using namespace std;
namespace gtsam { namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
LinearFactorGraph NonlinearFactorGraph::linearize(const FGConfig& config) const LinearFactorGraph NonlinearFactorGraph::linearize(const FGConfig& config) const {
{ // TODO speed up the function either by returning a pointer or by
// TODO speed up the function either by returning a pointer or by // returning the linearisation as a second argument and returning
// returning the linearisation as a second argument and returning // the reference
// the reference
// create an empty linear FG // create an empty linear FG
LinearFactorGraph linearFG; LinearFactorGraph linearFG;
// linearize all factors // linearize all factors
for(const_iterator factor=factors_.begin(); factor<factors_.end(); factor++){ for (const_iterator factor = factors_.begin(); factor < factors_.end(); factor++) {
LinearFactor::shared_ptr lf = (*factor)->linearize(config); LinearFactor::shared_ptr lf = (*factor)->linearize(config);
linearFG.push_back(lf); linearFG.push_back(lf);
}
return linearFG;
}
/* ************************************************************************* */
double calculate_error (const NonlinearFactorGraph& fg, const FGConfig& config, int verbosity) {
double newError = fg.error(config);
if (verbosity>=1) cout << "error: " << newError << endl;
return newError;
}
/* ************************************************************************* */
bool check_convergence (double relativeErrorTreshold,
double absoluteErrorTreshold,
double currentError, double newError,
int verbosity)
{
// check if diverges
double absoluteDecrease = currentError - newError;
if (verbosity>=2) cout << "absoluteDecrease: " << absoluteDecrease << endl;
if (absoluteDecrease<0)
throw overflow_error("NonlinearFactorGraph::optimize: error increased, diverges.");
// calculate relative error decrease and update currentError
double relativeDecrease = absoluteDecrease/currentError;
if (verbosity>=2) cout << "relativeDecrease: " << relativeDecrease << endl;
bool converged =
(relativeDecrease < relativeErrorTreshold) ||
(absoluteDecrease < absoluteErrorTreshold);
if (verbosity>=1 && converged) cout << "converged" << endl;
return converged;
}
/* ************************************************************************* */
// linearize and solve, return delta config (TODO: return updated)
/* ************************************************************************* */
FGConfig NonlinearFactorGraph::iterate
(const FGConfig& config, const Ordering& ordering) const
{
LinearFactorGraph linear = linearize(config); // linearize all factors_
return linear.optimize(ordering);
}
/* ************************************************************************* */
// One iteration of optimize, imperative :-(
/* ************************************************************************* */
double NonlinearFactorGraph::iterate
(FGConfig& config, const Ordering& ordering, int verbosity) const
{
FGConfig delta = iterate(config, ordering); // linearize and solve
if (verbosity>=4) delta.print("delta"); // maybe show output
config += delta; // update config
if (verbosity>=3) config.print("config"); // maybe show output
return calculate_error(*this,config,verbosity);
}
/* ************************************************************************* */
Ordering NonlinearFactorGraph::getOrdering(FGConfig& config) const
{
// TODO: FD: Whoa! This is crazy !!!!! re-linearizing just to get ordering ?
LinearFactorGraph lfg = linearize(config);
return lfg.getOrdering();
}
/* ************************************************************************* */
void NonlinearFactorGraph::optimize(FGConfig& config,
const Ordering& ordering,
double relativeErrorTreshold,
double absoluteErrorTreshold,
int verbosity) const
{
bool converged = false;
double currentError = calculate_error(*this,config, verbosity);
// while not converged
while(!converged) {
double newError = iterate(config, ordering, verbosity); // linearize, solve, update
converged = gtsam::check_convergence(relativeErrorTreshold, absoluteErrorTreshold,
currentError, newError, verbosity);
currentError = newError;
}
} }
/* ************************************************************************* */ return linearFG;
bool NonlinearFactorGraph::check_convergence(const FGConfig& config1,
const FGConfig& config2,
double relativeErrorTreshold,
double absoluteErrorTreshold,
int verbosity) const
{
double currentError = calculate_error(*this, config1, verbosity);
double newError = calculate_error(*this, config2, verbosity);
return gtsam::check_convergence(relativeErrorTreshold, absoluteErrorTreshold,
currentError, newError, verbosity);
}
/* ************************************************************************* */
// One attempt at a tempered Gauss-Newton step with given lambda
/* ************************************************************************* */
pair<FGConfig,double> NonlinearFactorGraph::try_lambda
(const FGConfig& config, const LinearFactorGraph& linear,
double lambda, const Ordering& ordering, int verbosity) const
{
if (verbosity>=1)
cout << "trying lambda = " << lambda << endl;
LinearFactorGraph damped =
linear.add_priors(sqrt(lambda)); // add prior-factors
if (verbosity>=7) damped.print("damped");
FGConfig delta = damped.optimize(ordering); // solve
if (verbosity>=5) delta.print("delta");
FGConfig newConfig = config + delta; // update config
if (verbosity>=5) newConfig.print("config");
double newError = // calculate...
calculate_error(*this,newConfig,verbosity>=4); // ...new error
return make_pair(newConfig,newError); // return config and error
}
/* ************************************************************************* */
// One iteration of Levenberg Marquardt, imperative :-(
/* ************************************************************************* */
double NonlinearFactorGraph::iterateLM
(FGConfig& config, double currentError, double& lambda, double lambdaFactor,
const Ordering& ordering, int verbosity) const
{
FGConfig newConfig;
double newError;
LinearFactorGraph linear = linearize(config); // linearize all factors once
if (verbosity>=6) linear.print("linear");
// try lambda steps with successively larger lambda until we achieve descent
boost::tie(newConfig,newError) = try_lambda(config, linear, lambda, ordering,verbosity);
while (newError > currentError) {
lambda = lambda * lambdaFactor; // be more cautious
boost::tie(newConfig,newError) = try_lambda(config, linear, lambda, ordering,verbosity);
} }
// next time, let's be more adventerous /* ************************************************************************* */
lambda = lambda / lambdaFactor; double calculate_error(const NonlinearFactorGraph& fg,
const FGConfig& config, int verbosity) {
// return result of this iteration double newError = fg.error(config);
config = newConfig; if (verbosity >= 1)
if (verbosity>=4) config.print("config"); // maybe show output cout << "error: " << newError << endl;
if (verbosity>=1) cout << "error: " << newError << endl; return newError;
return newError;
}
/* ************************************************************************* */
void NonlinearFactorGraph::optimizeLM(FGConfig& config,
const Ordering& ordering,
double relativeErrorTreshold,
double absoluteErrorTreshold,
int verbosity,
double lambda0,
double lambdaFactor) const
{
double lambda = lambda0;
bool converged = false;
double currentError = calculate_error(*this,config, verbosity);
if (verbosity>=4) config.print("config"); // maybe show output
// while not converged
while(!converged) {
// linearize, solve, update
double newError = iterateLM(config, currentError, lambda, lambdaFactor, ordering, verbosity);
converged = gtsam::check_convergence(relativeErrorTreshold, absoluteErrorTreshold,
currentError, newError, verbosity);
currentError = newError;
}
}
/* ************************************************************************* */
pair<LinearFactorGraph, FGConfig> NonlinearFactorGraph::OneIterationLM( FGConfig& config,
const Ordering& ordering,
double relativeErrorTreshold,
double absoluteErrorTreshold,
int verbosity,
double lambda0,
double lambdaFactor) const {
double lambda = lambda0;
bool converged = false;
double currentError = calculate_error(*this,config, verbosity);
if (verbosity>=4) config.print("config"); // maybe show output
FGConfig newConfig;
double newError;
LinearFactorGraph linear = linearize(config); // linearize all factors once
if (verbosity>=6) linear.print("linear");
// try lambda steps with successively larger lambda until we achieve descent
boost::tie(newConfig,newError) = try_lambda(config, linear, lambda, ordering,verbosity);
while (newError > currentError) {
lambda = lambda * lambdaFactor; // be more cautious
boost::tie(newConfig,newError) = try_lambda(config, linear, lambda, ordering,verbosity);
} }
// next time, let's be more adventerous /* ************************************************************************* */
lambda = lambda / lambdaFactor; bool check_convergence(double relativeErrorTreshold,
double absoluteErrorTreshold, double currentError, double newError,
int verbosity) {
// check if diverges
double absoluteDecrease = currentError - newError;
if (verbosity >= 2)
cout << "absoluteDecrease: " << absoluteDecrease << endl;
if (absoluteDecrease < 0)
throw overflow_error(
"NonlinearFactorGraph::optimize: error increased, diverges.");
// return result of this iteration // calculate relative error decrease and update currentError
config = newConfig; double relativeDecrease = absoluteDecrease / currentError;
if (verbosity>=4) config.print("config"); // maybe show output if (verbosity >= 2)
if (verbosity>=1) cout << "error: " << newError << endl; cout << "relativeDecrease: " << relativeDecrease << endl;
bool converged = (relativeDecrease < relativeErrorTreshold)
|| (absoluteDecrease < absoluteErrorTreshold);
if (verbosity >= 1 && converged)
cout << "converged" << endl;
return converged;
}
pair<LinearFactorGraph, FGConfig> p(linear, newConfig); /* ************************************************************************* */
return p; Ordering NonlinearFactorGraph::getOrdering(const FGConfig& config) const {
// TODO: FD: Whoa! This is crazy !!!!! re-linearizing just to get ordering ?
LinearFactorGraph lfg = linearize(config);
return lfg.getOrdering();
}
/* ************************************************************************* */
bool NonlinearFactorGraph::check_convergence(const FGConfig& config1,
const FGConfig& config2, double relativeErrorTreshold,
double absoluteErrorTreshold, int verbosity) const {
double currentError = calculate_error(*this, config1, verbosity);
double newError = calculate_error(*this, config2, verbosity);
return gtsam::check_convergence(relativeErrorTreshold,
absoluteErrorTreshold, currentError, newError, verbosity);
}
// linearize, solve, update
//double newError = iterateLM(config, currentError, lambda, lambdaFactor, ordering, verbosity);
}
/* ************************************************************************* */ /* ************************************************************************* */
} // namespace gtsam } // namespace gtsam

View File

@ -21,24 +21,13 @@ namespace gtsam {
typedef FactorGraph<NonlinearFactor> BaseFactorGraph; typedef FactorGraph<NonlinearFactor> BaseFactorGraph;
/** Factor Graph Constsiting of non-linear factors */ /** Factor Graph consisting of non-linear factors */
class NonlinearFactorGraph : public BaseFactorGraph class NonlinearFactorGraph : public BaseFactorGraph
{ {
public: // internal, exposed for testing only, doc in .cpp file public:
// internal, exposed for testing only, doc in .cpp file
FGConfig iterate(const FGConfig& config, const Ordering& ordering) const; Ordering getOrdering(const FGConfig& config) const;
double iterate(FGConfig& config, const Ordering& ordering, int verbosity) const;
std::pair<FGConfig,double> try_lambda
(const FGConfig& config, const LinearFactorGraph& linear,
double lambda, const Ordering& ordering, int verbosity) const;
double iterateLM(FGConfig& config, double currentError,
double& lambda, double lambdaFactor,
const Ordering& ordering, int verbosity) const;
Ordering getOrdering(FGConfig& config) const;
public: // these you will probably want to use public: // these you will probably want to use
/** /**
@ -46,20 +35,6 @@ public: // these you will probably want to use
*/ */
LinearFactorGraph linearize(const FGConfig& config) const; LinearFactorGraph linearize(const FGConfig& config) const;
/**
* Optimize a solution for a non linear factor graph
* Changes config in place
* @param config Reference to current configuration
* @param ordering Ordering to optimize with
* @param relativeErrorTreshold
* @param absoluteErrorTreshold
* @param verbosity Integer specifying how much output to provide
*/
void optimize(FGConfig& config,
const Ordering& ordering,
double relativeErrorTreshold, double absoluteErrorTreshold,
int verbosity = 0) const;
/** /**
* Given two configs, check whether the error of config2 is * Given two configs, check whether the error of config2 is
* different enough from the error for config1, or whether config1 * different enough from the error for config1, or whether config1
@ -76,40 +51,6 @@ public: // these you will probably want to use
double relativeErrorTreshold, double absoluteErrorTreshold, double relativeErrorTreshold, double absoluteErrorTreshold,
int verbosity = 0) const; int verbosity = 0) const;
/**
* Optimize using Levenberg-Marquardt. Really Levenberg's
* algorithm at this moment, as we just add I*\lambda to Hessian
* H'H. The probabilistic explanation is very simple: every
* variable gets an extra Gaussian prior that biases staying at
* current value, with variance 1/lambda. This is done very easily
* (but perhaps wastefully) by adding a prior factor for each of
* the variables, after linearization.
*
* Changes config in place :-(
*
* @param config Reference to current configuration
* @param ordering Ordering to optimize with
* @param relativeErrorTreshold
* @param absoluteErrorTreshold
* @param verbosity Integer specifying how much output to provide
* @param lambda Reference to current lambda
* @param lambdaFactor Factor by which to decrease/increase lambda
*/
void optimizeLM(FGConfig& config,
const Ordering& ordering,
double relativeErrorTreshold, double absoluteErrorTreshold,
int verbosity = 0,
double lambda0=1e-5, double lambdaFactor=10) const;
std::pair<LinearFactorGraph, FGConfig> OneIterationLM( FGConfig& config, const Ordering& ordering,
double relativeErrorTreshold,
double absoluteErrorTreshold,
int verbosity,
double lambda0,
double lambdaFactor) const ;
private: private:
/** Serialization function */ /** Serialization function */

175
cpp/NonlinearOptimizer.cpp Normal file
View File

@ -0,0 +1,175 @@
/**
* NonlinearOptimizer.cpp
* @brief: Encapsulates nonlinear optimization state
* @Author: Frank Dellaert
* Created on: Sep 7, 2009
*/
#include <iostream>
#include <boost/tuple/tuple.hpp>
#include "NonlinearOptimizer.h"
using namespace std;
namespace gtsam {
/* ************************************************************************* */
// Constructors
/* ************************************************************************* */
template<class G, class C>
NonlinearOptimizer<G, C>::NonlinearOptimizer(const G& graph,
const Ordering& ordering, shared_config config, double lambda) :
graph_(&graph), ordering_(&ordering), config_(config), error_(graph.error(
*config)), lambda_(lambda) {
}
/* ************************************************************************* */
// linearize and optimize
/* ************************************************************************* */
template<class G, class C>
FGConfig NonlinearOptimizer<G, C>::delta() const {
LinearFactorGraph linear = graph_->linearize(*config_);
return linear.optimize(*ordering_);
}
/* ************************************************************************* */
// One iteration of Gauss Newton
/* ************************************************************************* */
template<class G, class C>
NonlinearOptimizer<G, C> NonlinearOptimizer<G, C>::iterate(
verbosityLevel verbosity) const {
// linearize and optimize
FGConfig delta = this->delta();
// maybe show output
if (verbosity >= DELTA)
delta.print("delta");
// take old config and update it
shared_config newConfig(new C(config_->exmap(delta)));
// maybe show output
if (verbosity >= CONFIG)
newConfig->print("newConfig");
return NonlinearOptimizer(*graph_, *ordering_, newConfig);
}
/* ************************************************************************* */
template<class G, class C>
NonlinearOptimizer<G, C> NonlinearOptimizer<G, C>::gaussNewton(
double relativeThreshold, double absoluteThreshold,
verbosityLevel verbosity, int maxIterations) const {
// linearize, solve, update
NonlinearOptimizer next = iterate(verbosity);
// check convergence
bool converged = gtsam::check_convergence(relativeThreshold,
absoluteThreshold, error_, next.error_, verbosity);
// return converged state or iterate
if (converged)
return next;
else
return next.gaussNewton(relativeThreshold, absoluteThreshold, verbosity);
}
/* ************************************************************************* */
// Recursively try to do tempered Gauss-Newton steps until we succeed.
// Form damped system with given lambda, and return a new, more optimistic
// optimizer if error decreased or recurse with a larger lambda if not.
// TODO: in theory we can't infinitely recurse, but maybe we should put a max.
/* ************************************************************************* */
template<class G, class C>
NonlinearOptimizer<G, C> NonlinearOptimizer<G, C>::try_lambda(
const LinearFactorGraph& linear, verbosityLevel verbosity, double factor) const {
if (verbosity >= TRYLAMBDA)
cout << "trying lambda = " << lambda_ << endl;
// add prior-factors
LinearFactorGraph damped = linear.add_priors(sqrt(lambda_));
if (verbosity >= DAMPED)
damped.print("damped");
// solve
FGConfig delta = damped.optimize(*ordering_);
if (verbosity >= TRYDELTA)
delta.print("delta");
// update config
shared_config newConfig(new C(config_->exmap(delta)));
if (verbosity >= TRYCONFIG)
newConfig->print("config");
// create new optimization state with more adventurous lambda
NonlinearOptimizer next(*graph_, *ordering_, newConfig, lambda_ / factor);
// if error decreased, return the new state
if (next.error_ <= error_)
return next;
else {
// TODO: can we avoid copying the config ?
NonlinearOptimizer cautious(*graph_, *ordering_, config_, lambda_ * factor);
return cautious.try_lambda(linear, verbosity, factor);
}
}
/* ************************************************************************* */
// One iteration of Levenberg Marquardt
/* ************************************************************************* */
template<class G, class C>
NonlinearOptimizer<G, C> NonlinearOptimizer<G, C>::iterateLM(
verbosityLevel verbosity, double lambdaFactor) const {
// maybe show output
if (verbosity >= CONFIG)
config_->print("config");
if (verbosity >= ERROR)
cout << "error: " << error_ << endl;
if (verbosity >= LAMBDA)
cout << "lambda = " << lambda_ << endl;
// linearize all factors once
LinearFactorGraph linear = graph_->linearize(*config_);
if (verbosity >= LINEAR)
linear.print("linear");
// try lambda steps with successively larger lambda until we achieve descent
return try_lambda(linear, verbosity, lambdaFactor);
}
/* ************************************************************************* */
template<class G, class C>
NonlinearOptimizer<G, C> NonlinearOptimizer<G, C>::levenbergMarquardt(
double relativeThreshold, double absoluteThreshold,
verbosityLevel verbosity, int maxIterations, double lambdaFactor) const {
// do one iteration of LM
NonlinearOptimizer next = iterateLM(verbosity, lambdaFactor);
// check convergence
// TODO: move convergence checks here and incorporate in verbosity levels
// TODO: build into iterations somehow as an instance variable
bool converged = gtsam::check_convergence(relativeThreshold,
absoluteThreshold, error_, next.error_, verbosity);
// return converged state or iterate
if (converged || maxIterations <= 1) {
// maybe show output
if (verbosity >= CONFIG)
next.config_->print("final config");
if (verbosity >= ERROR)
cout << "final error: " << next.error_ << endl;
if (verbosity >= LAMBDA)
cout << "final lambda = " << next.lambda_ << endl;
return next;
} else
return next.levenbergMarquardt(relativeThreshold, absoluteThreshold,
verbosity, lambdaFactor);
}
/* ************************************************************************* */
}

147
cpp/NonlinearOptimizer.h Normal file
View File

@ -0,0 +1,147 @@
/**
* NonlinearOptimizer.h
* @brief: Encapsulates nonlinear optimization state
* @Author: Frank Dellaert
* Created on: Sep 7, 2009
*/
#ifndef NONLINEAROPTIMIZER_H_
#define NONLINEAROPTIMIZER_H_
#include <boost/shared_ptr.hpp>
#include "NonlinearFactorGraph.h"
#include "FGConfig.h"
namespace gtsam {
/**
* The class NonlinearOptimizer encapsulates an optimization state.
* Typically it is instantiated with a NonlinearFactorGraph and an initial config
* and then one of the optimization routines is called. These recursively iterate
* until convergence. All methods are functional and return a new state.
*
* The class is parameterized by the Config class type, in order to optimize
* over non-vector configurations as well.
* To use in code, include <gtsam/NonlinearOptimizer.cpp> in your cpp file
* (the trick in http://www.ddj.com/cpp/184403420 did not work).
*/
template<class FactorGraph, class Config>
class NonlinearOptimizer {
public:
// For performance reasons in recursion, we store configs in a shared_ptr
typedef boost::shared_ptr<const Config> shared_config;
enum verbosityLevel {
SILENT,
ERROR,
LAMBDA,
CONFIG,
DELTA,
LINEAR,
TRYLAMBDA,
TRYCONFIG,
TRYDELTA,
DAMPED
};
private:
// keep a reference to const versions of the graph and ordering
// These normally do not change
const FactorGraph* graph_;
const Ordering* ordering_;
// keep a configuration and its error
// These typically change once per iteration (in a functional way)
shared_config config_;
double error_;
// keep current lambda for use within LM only
// TODO: red flag, should we have an LM class ?
double lambda_;
// Recursively try to do tempered Gauss-Newton steps until we succeed
NonlinearOptimizer try_lambda(const LinearFactorGraph& linear,
verbosityLevel verbosity, double factor) const;
public:
/**
* Constructor
*/
NonlinearOptimizer(const FactorGraph& graph, const Ordering& ordering,
shared_config config, double lambda = 1e-5);
/**
* Return current config
*/
const Config& config() const {
return *config_;
}
/**
* Return current error
*/
double error() const {
return error_;
}
/**
* Return current lambda
*/
double lambda() const {
return lambda_;
}
/**
* linearize and optimize
* Thi returns an FGConfig, i.e., vectors in tangent space of Config
*/
FGConfig delta() const;
/**
* Do one Gauss-Newton iteration and return next state
*/
NonlinearOptimizer iterate(verbosityLevel verbosity = SILENT) const;
/**
* Optimize a solution for a non linear factor graph
* @param relativeTreshold
* @param absoluteTreshold
* @param verbosity Integer specifying how much output to provide
*/
NonlinearOptimizer
gaussNewton(double relativeThreshold, double absoluteThreshold,
verbosityLevel verbosity = SILENT, int maxIterations = 100) const;
/**
* One iteration of Levenberg Marquardt
*/
NonlinearOptimizer iterateLM(verbosityLevel verbosity = SILENT,
double lambdaFactor = 10) const;
/**
* Optimize using Levenberg-Marquardt. Really Levenberg's
* algorithm at this moment, as we just add I*\lambda to Hessian
* H'H. The probabilistic explanation is very simple: every
* variable gets an extra Gaussian prior that biases staying at
* current value, with variance 1/lambda. This is done very easily
* (but perhaps wastefully) by adding a prior factor for each of
* the variables, after linearization.
*
* @param relativeThreshold
* @param absoluteThreshold
* @param verbosity Integer specifying how much output to provide
* @param lambdaFactor Factor by which to decrease/increase lambda
*/
NonlinearOptimizer
levenbergMarquardt(double relativeThreshold, double absoluteThreshold,
verbosityLevel verbosity = SILENT, int maxIterations = 100,
double lambdaFactor = 10) const;
};
}
#endif /* NONLINEAROPTIMIZER_H_ */

50
cpp/Testable.h Normal file
View File

@ -0,0 +1,50 @@
/**
* @file Testable
* @brief Abstract base class for values that can be used in unit tests
* @author Frank Dellaert
*/
// \callgraph
#pragma once
namespace gtsam {
/**
* The Testable class should be templated with the derived class, e.g.
* class Rot3 : public Testable<Rot3>. This allows us to define the
* input type of equals as a Rot3 as well.
*/
template <class Derived>
class Testable {
public:
/**
* print
* @param s optional string naming the object
*/
virtual void print(const std::string& name) const = 0;
/**
* equality up to tolerance
* tricky to implement, see NonLinearFactor1 for an example
*/
virtual bool equals(const Derived& expected, double tol) const = 0;
}; // Testable class
/**
* This template works for any type with equals
*/
template<class V>
bool assert_equal(const V& actual, const V& expected, double tol = 1e-9) {
if (actual.equals(expected, tol))
return true;
printf("Not equal:\n");
actual.print("actual");
expected.print("expected");
return false;
}
} // gtsam

View File

@ -9,15 +9,16 @@
#pragma once #pragma once
#include "Vector.h" #include "Vector.h"
#include "Testable.h"
namespace gtsam { namespace gtsam {
/** /**
* The value class should be templated with the derived class, e.g. * The value class should be templated with the derived class, e.g.
* class Rot3 : public Value<rot3>. This allows us to define the * class Rot3 : public Value<Rot3>. This allows us to define the
* return type of exmap as a Rot3 as well. * return type of exmap as a Rot3 as well.
*/ */
template <class Derived> class Value { template <class Derived> class Value : public Testable<Derived> {
private: private:
@ -32,12 +33,6 @@ namespace gtsam {
*/ */
size_t dim() { return dim_;} size_t dim() { return dim_;}
/**
* print
* @param s optional string naming the factor
*/
virtual void print(const std::string& s="") const = 0;
/** /**
* Exponential map: add a delta vector, addition for most simple * Exponential map: add a delta vector, addition for most simple
* types, but fully exponential map for types such as Rot3, which * types, but fully exponential map for types such as Rot3, which

View File

@ -26,39 +26,42 @@ using namespace std;
namespace gtsam { namespace gtsam {
typedef boost::shared_ptr<NonlinearFactor> shared; typedef boost::shared_ptr<NonlinearFactor> shared;
/* ************************************************************************* */ /* ************************************************************************* */
NonlinearFactorGraph createNonlinearFactorGraph() boost::shared_ptr<const NonlinearFactorGraph> sharedNonlinearFactorGraph() {
{ // Create
// Create boost::shared_ptr<NonlinearFactorGraph> nlfg(new NonlinearFactorGraph);
NonlinearFactorGraph nlfg;
// prior on x1 // prior on x1
double sigma1=0.1; double sigma1=0.1;
Vector mu(2); mu(0) = 0 ; mu(1) = 0; Vector mu(2); mu(0) = 0 ; mu(1) = 0;
shared f1(new Point2Prior(mu, sigma1, "x1")); shared f1(new Point2Prior(mu, sigma1, "x1"));
nlfg.push_back(f1); nlfg->push_back(f1);
// odometry between x1 and x2 // odometry between x1 and x2
double sigma2=0.1; double sigma2=0.1;
Vector z2(2); z2(0) = 1.5 ; z2(1) = 0; Vector z2(2); z2(0) = 1.5 ; z2(1) = 0;
shared f2(new Simulated2DOdometry(z2, sigma2, "x1", "x2")); shared f2(new Simulated2DOdometry(z2, sigma2, "x1", "x2"));
nlfg.push_back(f2); nlfg->push_back(f2);
// measurement between x1 and l1 // measurement between x1 and l1
double sigma3=0.2; double sigma3=0.2;
Vector z3(2); z3(0) = 0. ; z3(1) = -1.; Vector z3(2); z3(0) = 0. ; z3(1) = -1.;
shared f3(new Simulated2DMeasurement(z3, sigma3, "x1", "l1")); shared f3(new Simulated2DMeasurement(z3, sigma3, "x1", "l1"));
nlfg.push_back(f3); nlfg->push_back(f3);
// measurement between x2 and l1 // measurement between x2 and l1
double sigma4=0.2; double sigma4=0.2;
Vector z4(2); z4(0)= -1.5 ; z4(1) = -1.; Vector z4(2); z4(0)= -1.5 ; z4(1) = -1.;
shared f4(new Simulated2DMeasurement(z4, sigma4, "x2", "l1")); shared f4(new Simulated2DMeasurement(z4, sigma4, "x2", "l1"));
nlfg.push_back(f4); nlfg->push_back(f4);
return nlfg; return nlfg;
}
NonlinearFactorGraph createNonlinearFactorGraph() {
return *sharedNonlinearFactorGraph();
} }
/* ************************************************************************* */ /* ************************************************************************* */
@ -119,18 +122,22 @@ FGConfig createConfig()
} }
/* ************************************************************************* */ /* ************************************************************************* */
FGConfig createNoisyConfig() boost::shared_ptr<const FGConfig> sharedNoisyConfig()
{ {
Vector v_x1(2); v_x1(0) = 0.1; v_x1(1) = 0.1; Vector v_x1(2); v_x1(0) = 0.1; v_x1(1) = 0.1;
Vector v_x2(2); v_x2(0) = 1.4; v_x2(1) = 0.2; Vector v_x2(2); v_x2(0) = 1.4; v_x2(1) = 0.2;
Vector v_l1(2); v_l1(0) = 0.1; v_l1(1) = -1.1; Vector v_l1(2); v_l1(0) = 0.1; v_l1(1) = -1.1;
FGConfig c; boost::shared_ptr<FGConfig> c(new FGConfig);
c.insert("x1", v_x1); c->insert("x1", v_x1);
c.insert("x2", v_x2); c->insert("x2", v_x2);
c.insert("l1", v_l1); c->insert("l1", v_l1);
return c; return c;
} }
FGConfig createNoisyConfig() {
return *sharedNoisyConfig();
}
/* ************************************************************************* */ /* ************************************************************************* */
FGConfig createConstrainedConfig() FGConfig createConstrainedConfig()
{ {
@ -320,17 +327,21 @@ namespace optimize {
} }
/* ************************************************************************* */ /* ************************************************************************* */
NonlinearFactorGraph createReallyNonlinearFactorGraph() boost::shared_ptr<const NonlinearFactorGraph> sharedReallyNonlinearFactorGraph()
{ {
NonlinearFactorGraph fg; boost::shared_ptr<NonlinearFactorGraph> fg(new NonlinearFactorGraph);
Vector z = Vector_(2,1.0,0.0); Vector z = Vector_(2,1.0,0.0);
double sigma = 0.1; double sigma = 0.1;
boost::shared_ptr<NonlinearFactor1> boost::shared_ptr<NonlinearFactor1>
factor(new NonlinearFactor1(z,sigma,&optimize::h,"x",&optimize::H)); factor(new NonlinearFactor1(z,sigma,&optimize::h,"x",&optimize::H));
fg.push_back(factor); fg->push_back(factor);
return fg; return fg;
} }
NonlinearFactorGraph createReallyNonlinearFactorGraph() {
return *sharedReallyNonlinearFactorGraph();
}
/* ************************************************************************* */ /* ************************************************************************* */
} // namespace gtsam } // namespace gtsam

View File

@ -5,12 +5,12 @@
* @author Carlos Nieto * @author Carlos Nieto
*/ */
// \callgraph // \callgraph
#pragma once #pragma once
#include <boost/shared_ptr.hpp>
#include "ConstrainedNonlinearFactorGraph.h" #include "ConstrainedNonlinearFactorGraph.h"
#include "ConstrainedChordalBayesNet.h" #include "ConstrainedChordalBayesNet.h"
#include "ConstrainedLinearFactorGraph.h" #include "ConstrainedLinearFactorGraph.h"
@ -21,76 +21,79 @@
namespace gtsam { namespace gtsam {
/** /**
* Create small example for non-linear factor graph * Create small example for non-linear factor graph
*/ */
NonlinearFactorGraph createNonlinearFactorGraph() ; boost::shared_ptr<const NonlinearFactorGraph> sharedNonlinearFactorGraph();
NonlinearFactorGraph createNonlinearFactorGraph();
/** /**
* Create small example constrained factor graph * Create small example constrained factor graph
*/ */
ConstrainedLinearFactorGraph createConstrainedLinearFactorGraph(); ConstrainedLinearFactorGraph createConstrainedLinearFactorGraph();
/** /**
* Create small example constrained nonlinear factor graph * Create small example constrained nonlinear factor graph
*/ */
ConstrainedNonlinearFactorGraph createConstrainedNonlinearFactorGraph(); ConstrainedNonlinearFactorGraph createConstrainedNonlinearFactorGraph();
/** /**
* Create configuration to go with it * Create configuration to go with it
* The ground truth configuration for the example above * The ground truth configuration for the example above
*/ */
FGConfig createConfig(); FGConfig createConfig();
/** /**
* Create configuration for constrained example * Create configuration for constrained example
* This is the ground truth version * This is the ground truth version
*/ */
FGConfig createConstrainedConfig(); FGConfig createConstrainedConfig();
/** /**
* create a noisy configuration for a nonlinear factor graph * create a noisy configuration for a nonlinear factor graph
*/ */
FGConfig createNoisyConfig(); boost::shared_ptr<const FGConfig> sharedNoisyConfig();
FGConfig createNoisyConfig();
/** /**
* Zero delta config * Zero delta config
*/ */
FGConfig createZeroDelta(); FGConfig createZeroDelta();
/** /**
* Delta config that, when added to noisyConfig, returns the ground truth * Delta config that, when added to noisyConfig, returns the ground truth
*/ */
FGConfig createCorrectDelta(); FGConfig createCorrectDelta();
/** /**
* create a linear factor graph * create a linear factor graph
* The non-linear graph above evaluated at NoisyConfig * The non-linear graph above evaluated at NoisyConfig
*/ */
LinearFactorGraph createLinearFactorGraph(); LinearFactorGraph createLinearFactorGraph();
/** /**
* create small Chordal Bayes Net x <- y * create small Chordal Bayes Net x <- y
*/ */
ChordalBayesNet createSmallChordalBayesNet(); ChordalBayesNet createSmallChordalBayesNet();
/** /**
* create small Constrained Chordal Bayes Net (from other constrained example) * create small Constrained Chordal Bayes Net (from other constrained example)
*/ */
ConstrainedChordalBayesNet createConstrainedChordalBayesNet(); ConstrainedChordalBayesNet createConstrainedChordalBayesNet();
/** /**
* Create really non-linear factor graph (cos/sin) * Create really non-linear factor graph (cos/sin)
*/ */
NonlinearFactorGraph createReallyNonlinearFactorGraph(); boost::shared_ptr<const NonlinearFactorGraph> sharedReallyNonlinearFactorGraph();
NonlinearFactorGraph createReallyNonlinearFactorGraph();
/** /**
* Create a noisy configuration for linearization * Create a noisy configuration for linearization
*/ */
FGConfig createConstrainedLinConfig(); FGConfig createConstrainedLinConfig();
/** /**
* Create the correct delta configuration * Create the correct delta configuration
*/ */
FGConfig createConstrainedCorrectDelta(); FGConfig createConstrainedCorrectDelta();
} }

View File

@ -60,12 +60,8 @@ TEST( FGConfig, plus)
expected.insert("x", wx).insert("y",wy); expected.insert("x", wx).insert("y",wy);
// functional // functional
FGConfig actual = fg + delta; FGConfig actual = fg.exmap(delta);
CHECK(actual.equals(expected)); CHECK(actual.equals(expected));
// inplace
fg += delta;
CHECK(fg.equals(expected));
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -6,7 +6,6 @@
* @author Christian Potthast * @author Christian Potthast
*/ */
/*STL/C++*/ /*STL/C++*/
#include <iostream> #include <iostream>
using namespace std; using namespace std;
@ -19,152 +18,53 @@ using namespace std;
using namespace gtsam; using namespace gtsam;
/* ************************************************************************* */ /* ************************************************************************* */
TEST( NonlinearFactorGraph, equals ){ TEST( NonlinearFactorGraph, equals )
{
NonlinearFactorGraph fg = createNonlinearFactorGraph(); NonlinearFactorGraph fg = createNonlinearFactorGraph();
NonlinearFactorGraph fg2 = createNonlinearFactorGraph(); NonlinearFactorGraph fg2 = createNonlinearFactorGraph();
CHECK( fg.equals(fg2) ); CHECK( fg.equals(fg2) );
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( NonlinearFactorGraph, error ) TEST( NonlinearFactorGraph, error )
{ {
NonlinearFactorGraph fg = createNonlinearFactorGraph(); NonlinearFactorGraph fg = createNonlinearFactorGraph();
FGConfig c1 = createConfig(); FGConfig c1 = createConfig();
double actual1 = fg.error(c1); double actual1 = fg.error(c1);
DOUBLES_EQUAL( 0.0, actual1, 1e-9 ); DOUBLES_EQUAL( 0.0, actual1, 1e-9 );
FGConfig c2 = createNoisyConfig(); FGConfig c2 = createNoisyConfig();
double actual2 = fg.error(c2); double actual2 = fg.error(c2);
DOUBLES_EQUAL( 5.625, actual2, 1e-9 ); DOUBLES_EQUAL( 5.625, actual2, 1e-9 );
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( NonlinearFactorGraph, probPrime ) TEST( NonlinearFactorGraph, probPrime )
{ {
NonlinearFactorGraph fg = createNonlinearFactorGraph(); NonlinearFactorGraph fg = createNonlinearFactorGraph();
FGConfig cfg = createConfig(); FGConfig cfg = createConfig();
// evaluate the probability of the factor graph // evaluate the probability of the factor graph
double actual = fg.probPrime(cfg); double actual = fg.probPrime(cfg);
double expected = 1.0; double expected = 1.0;
DOUBLES_EQUAL(expected,actual,0); DOUBLES_EQUAL(expected,actual,0);
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( NonlinearFactorGraph, linearize ) TEST( NonlinearFactorGraph, linearize )
{ {
NonlinearFactorGraph fg = createNonlinearFactorGraph(); NonlinearFactorGraph fg = createNonlinearFactorGraph();
FGConfig initial = createNoisyConfig(); FGConfig initial = createNoisyConfig();
LinearFactorGraph linearized = fg.linearize(initial); LinearFactorGraph linearized = fg.linearize(initial);
LinearFactorGraph expected = createLinearFactorGraph(); LinearFactorGraph expected = createLinearFactorGraph();
CHECK(expected.equals(linearized)); CHECK(expected.equals(linearized));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( NonlinearFactorGraph, iterate ) int main() {
{ TestResult tr;
NonlinearFactorGraph fg = createNonlinearFactorGraph(); return TestRegistry::runAllTests(tr);
FGConfig initial = createNoisyConfig();
// Expected configuration is the difference between the noisy config
// and the ground-truth config. One step only because it's linear !
FGConfig expected;
Vector dl1(2); dl1(0)=-0.1; dl1(1)= 0.1; expected.insert("l1",dl1);
Vector dx1(2); dx1(0)=-0.1; dx1(1)=-0.1; expected.insert("x1",dx1);
Vector dx2(2); dx2(0)= 0.1; dx2(1)=-0.2; expected.insert("x2",dx2);
// Check one ordering
Ordering ord1;
ord1.push_back("x2");
ord1.push_back("l1");
ord1.push_back("x1");
FGConfig actual1 = fg.iterate(initial, ord1);
CHECK(actual1.equals(expected));
// Check another
Ordering ord2;
ord2.push_back("x1");
ord2.push_back("x2");
ord2.push_back("l1");
FGConfig actual2 = fg.iterate(initial, ord2);
CHECK(actual2.equals(expected));
// And yet another...
Ordering ord3;
ord3.push_back("l1");
ord3.push_back("x1");
ord3.push_back("x2");
FGConfig actual3 = fg.iterate(initial, ord3);
CHECK(actual3.equals(expected));
} }
/* ************************************************************************* */
TEST( NonlinearFactorGraph, optimize ) {
NonlinearFactorGraph fg = createReallyNonlinearFactorGraph();
// test error at minimum
Vector xstar = Vector_(1,0.0);
FGConfig cstar;
cstar.insert("x",xstar);
DOUBLES_EQUAL(0.0,fg.error(cstar),0.0);
// test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
Vector x0 = Vector_(1,3.0);
FGConfig c0;
c0.insert("x",x0);
DOUBLES_EQUAL(199.0,fg.error(c0),1e-3);
// optimize parameters
Ordering ord;
ord.push_back("x");
double relativeErrorTreshold = 1e-5;
double absoluteErrorTreshold = 1e-5;
// Gauss-Newton
FGConfig actual = c0;
fg.optimize(actual,ord,relativeErrorTreshold,absoluteErrorTreshold);
CHECK(actual.equals(cstar));
// Levenberg-Marquardt
FGConfig actual2 = c0;
double lambda0 = 1000, lambdaFactor = 10;
fg.optimizeLM(actual2,ord,relativeErrorTreshold,absoluteErrorTreshold,0,lambda0,lambdaFactor);
CHECK(actual2.equals(cstar));
}
/* ************************************************************************* */
TEST( NonlinearFactorGraph, iterateLM ) {
// really non-linear factor graph
NonlinearFactorGraph fg = createReallyNonlinearFactorGraph();
// config far from minimum
Vector x0 = Vector_(1,3.0);
FGConfig config;
config.insert("x",x0);
// ordering
Ordering ord;
ord.push_back("x");
// normal iterate
int verbosity = 0;
FGConfig actual1 = config;
fg.iterate(actual1,ord,verbosity);
// LM iterate with lambda 0 should be the same
FGConfig actual2 = config;
double lambda0 = 0, lambdaFactor = 10;
double currentError = fg.error(actual2);
fg.iterateLM(actual2, currentError, lambda0, lambdaFactor, ord, verbosity);
CHECK(actual1.equals(actual2));
}
/* ************************************************************************* */
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -0,0 +1,139 @@
/**
* @file testNonlinearOptimizer.cpp
* @brief Unit tests for NonlinearOptimizer class
* @author Frank Dellaert
*/
#include <iostream>
using namespace std;
#include <CppUnitLite/TestHarness.h>
#include "Matrix.h"
#include "smallExample.h"
#include "NonlinearOptimizer.cpp"
using namespace gtsam;
typedef NonlinearOptimizer<NonlinearFactorGraph,FGConfig> Optimizer;
/* ************************************************************************* */
TEST( NonlinearFactorGraph, delta )
{
NonlinearFactorGraph fg = createNonlinearFactorGraph();
Optimizer::shared_config initial = sharedNoisyConfig();
// Expected configuration is the difference between the noisy config
// and the ground-truth config. One step only because it's linear !
FGConfig expected;
Vector dl1(2);
dl1(0) = -0.1;
dl1(1) = 0.1;
expected.insert("l1", dl1);
Vector dx1(2);
dx1(0) = -0.1;
dx1(1) = -0.1;
expected.insert("x1", dx1);
Vector dx2(2);
dx2(0) = 0.1;
dx2(1) = -0.2;
expected.insert("x2", dx2);
// Check one ordering
Ordering ord1;
ord1.push_back("x2");
ord1.push_back("l1");
ord1.push_back("x1");
Optimizer optimizer1(fg, ord1, initial);
FGConfig actual1 = optimizer1.delta();
CHECK(assert_equal(actual1,expected));
// Check another
Ordering ord2;
ord2.push_back("x1");
ord2.push_back("x2");
ord2.push_back("l1");
Optimizer optimizer2(fg, ord2, initial);
FGConfig actual2 = optimizer2.delta();
CHECK(assert_equal(actual2,expected));
// And yet another...
Ordering ord3;
ord3.push_back("l1");
ord3.push_back("x1");
ord3.push_back("x2");
Optimizer optimizer3(fg, ord3, initial);
FGConfig actual3 = optimizer3.delta();
CHECK(assert_equal(actual3,expected));
}
/* ************************************************************************* */
TEST( NonlinearFactorGraph, iterateLM )
{
// really non-linear factor graph
NonlinearFactorGraph fg = createReallyNonlinearFactorGraph();
// config far from minimum
Vector x0 = Vector_(1, 3.0);
boost::shared_ptr<FGConfig> config(new FGConfig);
config->insert("x", x0);
// ordering
Ordering ord;
ord.push_back("x");
// create initial optimization state, with lambda=0
Optimizer optimizer(fg, ord, config, 0);
// normal iterate
Optimizer iterated1 = optimizer.iterate();
// LM iterate with lambda 0 should be the same
Optimizer iterated2 = optimizer.iterateLM();
CHECK(assert_equal(iterated1.config(), iterated2.config(), 1e-9));
}
/* ************************************************************************* */
TEST( NonlinearFactorGraph, optimize )
{
NonlinearFactorGraph fg = createReallyNonlinearFactorGraph();
// test error at minimum
Vector xstar = Vector_(1, 0.0);
FGConfig cstar;
cstar.insert("x", xstar);
DOUBLES_EQUAL(0.0,fg.error(cstar),0.0);
// test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
Vector x0 = Vector_(1, 3.0);
boost::shared_ptr<FGConfig> c0(new FGConfig);
c0->insert("x", x0);
DOUBLES_EQUAL(199.0,fg.error(*c0),1e-3);
// optimize parameters
Ordering ord;
ord.push_back("x");
double relativeThreshold = 1e-5;
double absoluteThreshold = 1e-5;
// initial optimization state is the same in both cases tested
Optimizer optimizer(fg, ord, c0);
// Gauss-Newton
Optimizer actual1 = optimizer.gaussNewton(relativeThreshold,
absoluteThreshold);
CHECK(assert_equal(actual1.config(),cstar));
// Levenberg-Marquardt
Optimizer actual2 = optimizer.levenbergMarquardt(relativeThreshold,
absoluteThreshold, Optimizer::SILENT);
CHECK(assert_equal(actual2.config(),cstar));
}
/* ************************************************************************* */
int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}
/* ************************************************************************* */