diff --git a/.cproject b/.cproject index 3283b0ffc..10af82bcb 100644 --- a/.cproject +++ b/.cproject @@ -366,6 +366,21 @@ true true + +make +testNonlinearOptimizer.run +true +true +true + + +make + +testLinearFactor.run +true +true +true + make install diff --git a/cpp/FGConfig.cpp b/cpp/FGConfig.cpp index d869fab18..fd34bad35 100644 --- a/cpp/FGConfig.cpp +++ b/cpp/FGConfig.cpp @@ -9,13 +9,11 @@ #include #include #include "FGConfig.h" -#include "Value.h" - -using namespace std; // trick from some reading group #define FOREACH_PAIR( KEY, VAL, COL) BOOST_FOREACH (boost::tie(KEY,VAL),COL) +using namespace std; using namespace gtsam; /* ************************************************************************* */ @@ -29,23 +27,17 @@ void check_size(const string& key, const Vector & vj, const Vector & dj) { } /* ************************************************************************* */ -void FGConfig::operator+=(const FGConfig & delta) +FGConfig FGConfig::exmap(const FGConfig & delta) const { - for (iterator it = values.begin(); it!=values.end(); it++) { + FGConfig newConfig; + for (const_iterator it = values.begin(); it!=values.end(); it++) { string j = it->first; - Vector &vj = it->second; + const Vector &vj = it->second; const Vector& dj = delta[j]; check_size(j,vj,dj); - vj += dj; + newConfig.insert(j, vj + dj); } -} - -/* ************************************************************************* */ -FGConfig FGConfig::operator+(const FGConfig & delta) const -{ - FGConfig result = *this; - result += delta; - return result; + return newConfig; } /* ************************************************************************* */ diff --git a/cpp/FGConfig.h b/cpp/FGConfig.h index b6217108a..12e98ff37 100644 --- a/cpp/FGConfig.h +++ b/cpp/FGConfig.h @@ -12,12 +12,13 @@ #include #include -#include "Value.h" +#include "Testable.h" +#include "Vector.h" namespace gtsam { /** Factor Graph Configuration */ - class FGConfig { + class FGConfig : public Testable { protected: /** Map from string indices to values */ @@ -27,10 +28,10 @@ namespace gtsam { typedef std::map::iterator iterator; typedef std::map::const_iterator const_iterator; - FGConfig() {}; - FGConfig(const FGConfig& cfg_in) : values(cfg_in.values){}; + FGConfig():Testable() {} + FGConfig(const FGConfig& cfg_in) : Testable(), values(cfg_in.values) {} - virtual ~FGConfig() {}; + virtual ~FGConfig() {} /** return all the nodes in the graph **/ std::vector get_names() const { @@ -47,10 +48,10 @@ namespace gtsam { } /** - * add a delta config, should apply exponential map more generally + * Add a delta config, needed for use in NonlinearOptimizer + * For FGConfig, this is just addition. */ - virtual void operator+=(const FGConfig & delta); - virtual FGConfig operator+(const FGConfig & delta) const; + FGConfig exmap(const FGConfig & delta) const; const_iterator begin() const {return values.begin();} const_iterator end() const {return values.end();} @@ -77,7 +78,7 @@ namespace gtsam { void print(const std::string& name = "") const; /** equals, for unit testing */ - bool equals(const FGConfig& expected, double tol=1e-6) const; + bool equals(const FGConfig& expected, double tol=1e-9) const; void clear() {values.clear();} diff --git a/cpp/Makefile.am b/cpp/Makefile.am index 0760cb9ce..cbbc8604a 100644 --- a/cpp/Makefile.am +++ b/cpp/Makefile.am @@ -80,14 +80,18 @@ timeLinearFactor: CXXFLAGS += -I /opt/local/include timeLinearFactor: LDFLAGS += -L.libs -lgtsam # graphs -sources += ConstrainedChordalBayesNet.cpp ChordalBayesNet.cpp FactorGraph.cpp LinearFactorGraph.cpp NonlinearFactorGraph.cpp +sources += ConstrainedChordalBayesNet.cpp ChordalBayesNet.cpp FactorGraph.cpp +sources += LinearFactorGraph.cpp NonlinearFactorGraph.cpp sources += ConstrainedNonlinearFactorGraph.cpp ConstrainedLinearFactorGraph.cpp -check_PROGRAMS += testChordalBayesNet testConstrainedChordalBayesNet testFactorgraph testLinearFactorGraph testNonlinearFactorGraph testConstrainedNonlinearFactorGraph testConstrainedLinearFactorGraph +check_PROGRAMS += testChordalBayesNet testConstrainedChordalBayesNet testFactorgraph +check_PROGRAMS += testLinearFactorGraph testNonlinearFactorGraph testNonlinearOptimizer +check_PROGRAMS += testConstrainedNonlinearFactorGraph testConstrainedLinearFactorGraph testChordalBayesNet_SOURCES = $(example) testChordalBayesNet.cpp testConstrainedChordalBayesNet_SOURCES = $(example) testConstrainedChordalBayesNet.cpp testFactorgraph_SOURCES = testFactorgraph.cpp testLinearFactorGraph_SOURCES = $(example) testLinearFactorGraph.cpp testNonlinearFactorGraph_SOURCES = $(example) testNonlinearFactorGraph.cpp +testNonlinearOptimizer_SOURCES = $(example) testNonlinearOptimizer.cpp testConstrainedNonlinearFactorGraph_SOURCES = $(example) testConstrainedNonlinearFactorGraph.cpp testConstrainedLinearFactorGraph_SOURCES = $(example) testConstrainedLinearFactorGraph.cpp @@ -95,6 +99,7 @@ testChordalBayesNet_LDADD = libgtsam.la testFactorgraph_LDADD = libgtsam.la testLinearFactorGraph_LDADD = libgtsam.la testNonlinearFactorGraph_LDADD = libgtsam.la +testNonlinearOptimizer_LDADD = libgtsam.la testConstrainedNonlinearFactorGraph_LDADD = libgtsam.la testConstrainedLinearFactorGraph_LDADD = libgtsam.la testConstrainedChordalBayesNet_LDADD = libgtsam.la @@ -140,7 +145,10 @@ testVSLAMFactor_LDADD = libgtsam.la # The header files will be installed in ~/include/gtsam -headers = gtsam.h Value.h Factor.h LinearFactorSet.h Point2Prior.h Simulated2DOdometry.h Simulated2DMeasurement.h smallExample.h $(sources:.cpp=.h) +headers = gtsam.h Value.h Testable.h Factor.h LinearFactorSet.h Point2Prior.h +headers += Simulated2DOdometry.h Simulated2DMeasurement.h smallExample.h +headers += NonlinearOptimizer.h NonlinearOptimizer.cpp # template ! +headers += $(sources:.cpp=.h) # create both dynamic and static libraries AM_CXXFLAGS = -I$(boost) -fPIC diff --git a/cpp/NonlinearFactorGraph.cpp b/cpp/NonlinearFactorGraph.cpp index 2a9ff459e..6dbd002f8 100644 --- a/cpp/NonlinearFactorGraph.cpp +++ b/cpp/NonlinearFactorGraph.cpp @@ -16,240 +16,74 @@ using namespace std; namespace gtsam { -/* ************************************************************************* */ -LinearFactorGraph NonlinearFactorGraph::linearize(const FGConfig& config) const -{ - // TODO speed up the function either by returning a pointer or by - // returning the linearisation as a second argument and returning - // the reference + /* ************************************************************************* */ + LinearFactorGraph NonlinearFactorGraph::linearize(const FGConfig& config) const { + // TODO speed up the function either by returning a pointer or by + // returning the linearisation as a second argument and returning + // the reference - // create an empty linear FG - LinearFactorGraph linearFG; + // create an empty linear FG + LinearFactorGraph linearFG; - // linearize all factors - for(const_iterator factor=factors_.begin(); factorlinearize(config); - linearFG.push_back(lf); - } - - return linearFG; -} - -/* ************************************************************************* */ -double calculate_error (const NonlinearFactorGraph& fg, const FGConfig& config, int verbosity) { - double newError = fg.error(config); - if (verbosity>=1) cout << "error: " << newError << endl; - return newError; -} - -/* ************************************************************************* */ -bool check_convergence (double relativeErrorTreshold, - double absoluteErrorTreshold, - double currentError, double newError, - int verbosity) -{ - // check if diverges - double absoluteDecrease = currentError - newError; - if (verbosity>=2) cout << "absoluteDecrease: " << absoluteDecrease << endl; - if (absoluteDecrease<0) - throw overflow_error("NonlinearFactorGraph::optimize: error increased, diverges."); - - // calculate relative error decrease and update currentError - double relativeDecrease = absoluteDecrease/currentError; - if (verbosity>=2) cout << "relativeDecrease: " << relativeDecrease << endl; - bool converged = - (relativeDecrease < relativeErrorTreshold) || - (absoluteDecrease < absoluteErrorTreshold); - if (verbosity>=1 && converged) cout << "converged" << endl; - return converged; -} - -/* ************************************************************************* */ -// linearize and solve, return delta config (TODO: return updated) -/* ************************************************************************* */ -FGConfig NonlinearFactorGraph::iterate -(const FGConfig& config, const Ordering& ordering) const -{ - LinearFactorGraph linear = linearize(config); // linearize all factors_ - return linear.optimize(ordering); -} - -/* ************************************************************************* */ -// One iteration of optimize, imperative :-( -/* ************************************************************************* */ -double NonlinearFactorGraph::iterate -(FGConfig& config, const Ordering& ordering, int verbosity) const -{ - FGConfig delta = iterate(config, ordering); // linearize and solve - if (verbosity>=4) delta.print("delta"); // maybe show output - config += delta; // update config - if (verbosity>=3) config.print("config"); // maybe show output - return calculate_error(*this,config,verbosity); -} - -/* ************************************************************************* */ -Ordering NonlinearFactorGraph::getOrdering(FGConfig& config) const -{ - // TODO: FD: Whoa! This is crazy !!!!! re-linearizing just to get ordering ? - LinearFactorGraph lfg = linearize(config); - return lfg.getOrdering(); -} -/* ************************************************************************* */ -void NonlinearFactorGraph::optimize(FGConfig& config, - const Ordering& ordering, - double relativeErrorTreshold, - double absoluteErrorTreshold, - int verbosity) const - { - bool converged = false; - double currentError = calculate_error(*this,config, verbosity); - - // while not converged - while(!converged) { - double newError = iterate(config, ordering, verbosity); // linearize, solve, update - converged = gtsam::check_convergence(relativeErrorTreshold, absoluteErrorTreshold, - currentError, newError, verbosity); - currentError = newError; - } + // linearize all factors + for (const_iterator factor = factors_.begin(); factor < factors_.end(); factor++) { + LinearFactor::shared_ptr lf = (*factor)->linearize(config); + linearFG.push_back(lf); } -/* ************************************************************************* */ -bool NonlinearFactorGraph::check_convergence(const FGConfig& config1, - const FGConfig& config2, - double relativeErrorTreshold, - double absoluteErrorTreshold, - int verbosity) const -{ - double currentError = calculate_error(*this, config1, verbosity); - double newError = calculate_error(*this, config2, verbosity); - return gtsam::check_convergence(relativeErrorTreshold, absoluteErrorTreshold, - currentError, newError, verbosity); - -} - - -/* ************************************************************************* */ -// One attempt at a tempered Gauss-Newton step with given lambda -/* ************************************************************************* */ -pair NonlinearFactorGraph::try_lambda -(const FGConfig& config, const LinearFactorGraph& linear, - double lambda, const Ordering& ordering, int verbosity) const - { - if (verbosity>=1) - cout << "trying lambda = " << lambda << endl; - LinearFactorGraph damped = - linear.add_priors(sqrt(lambda)); // add prior-factors - if (verbosity>=7) damped.print("damped"); - FGConfig delta = damped.optimize(ordering); // solve - if (verbosity>=5) delta.print("delta"); - FGConfig newConfig = config + delta; // update config - if (verbosity>=5) newConfig.print("config"); - double newError = // calculate... - calculate_error(*this,newConfig,verbosity>=4); // ...new error - return make_pair(newConfig,newError); // return config and error - } - -/* ************************************************************************* */ -// One iteration of Levenberg Marquardt, imperative :-( -/* ************************************************************************* */ -double NonlinearFactorGraph::iterateLM -(FGConfig& config, double currentError, double& lambda, double lambdaFactor, - const Ordering& ordering, int verbosity) const - { - FGConfig newConfig; - double newError; - LinearFactorGraph linear = linearize(config); // linearize all factors once - if (verbosity>=6) linear.print("linear"); - - // try lambda steps with successively larger lambda until we achieve descent - boost::tie(newConfig,newError) = try_lambda(config, linear, lambda, ordering,verbosity); - while (newError > currentError) { - lambda = lambda * lambdaFactor; // be more cautious - boost::tie(newConfig,newError) = try_lambda(config, linear, lambda, ordering,verbosity); + return linearFG; } - // next time, let's be more adventerous - lambda = lambda / lambdaFactor; - - // return result of this iteration - config = newConfig; - if (verbosity>=4) config.print("config"); // maybe show output - if (verbosity>=1) cout << "error: " << newError << endl; - return newError; - } - -/* ************************************************************************* */ -void NonlinearFactorGraph::optimizeLM(FGConfig& config, - const Ordering& ordering, - double relativeErrorTreshold, - double absoluteErrorTreshold, - int verbosity, - double lambda0, - double lambdaFactor) const - { - double lambda = lambda0; - bool converged = false; - double currentError = calculate_error(*this,config, verbosity); - - if (verbosity>=4) config.print("config"); // maybe show output - - // while not converged - while(!converged) { - // linearize, solve, update - double newError = iterateLM(config, currentError, lambda, lambdaFactor, ordering, verbosity); - converged = gtsam::check_convergence(relativeErrorTreshold, absoluteErrorTreshold, - currentError, newError, verbosity); - currentError = newError; - } - } - -/* ************************************************************************* */ - -pair NonlinearFactorGraph::OneIterationLM( FGConfig& config, - const Ordering& ordering, - double relativeErrorTreshold, - double absoluteErrorTreshold, - int verbosity, - double lambda0, - double lambdaFactor) const { - - double lambda = lambda0; - bool converged = false; - double currentError = calculate_error(*this,config, verbosity); - - if (verbosity>=4) config.print("config"); // maybe show output - - FGConfig newConfig; - double newError; - LinearFactorGraph linear = linearize(config); // linearize all factors once - if (verbosity>=6) linear.print("linear"); - - // try lambda steps with successively larger lambda until we achieve descent - boost::tie(newConfig,newError) = try_lambda(config, linear, lambda, ordering,verbosity); - while (newError > currentError) { - lambda = lambda * lambdaFactor; // be more cautious - boost::tie(newConfig,newError) = try_lambda(config, linear, lambda, ordering,verbosity); + /* ************************************************************************* */ + double calculate_error(const NonlinearFactorGraph& fg, + const FGConfig& config, int verbosity) { + double newError = fg.error(config); + if (verbosity >= 1) + cout << "error: " << newError << endl; + return newError; } - // next time, let's be more adventerous - lambda = lambda / lambdaFactor; + /* ************************************************************************* */ + bool check_convergence(double relativeErrorTreshold, + double absoluteErrorTreshold, double currentError, double newError, + int verbosity) { + // check if diverges + double absoluteDecrease = currentError - newError; + if (verbosity >= 2) + cout << "absoluteDecrease: " << absoluteDecrease << endl; + if (absoluteDecrease < 0) + throw overflow_error( + "NonlinearFactorGraph::optimize: error increased, diverges."); - // return result of this iteration - config = newConfig; - if (verbosity>=4) config.print("config"); // maybe show output - if (verbosity>=1) cout << "error: " << newError << endl; + // calculate relative error decrease and update currentError + double relativeDecrease = absoluteDecrease / currentError; + if (verbosity >= 2) + cout << "relativeDecrease: " << relativeDecrease << endl; + bool converged = (relativeDecrease < relativeErrorTreshold) + || (absoluteDecrease < absoluteErrorTreshold); + if (verbosity >= 1 && converged) + cout << "converged" << endl; + return converged; + } - pair p(linear, newConfig); - return p; + /* ************************************************************************* */ + Ordering NonlinearFactorGraph::getOrdering(const FGConfig& config) const { + // TODO: FD: Whoa! This is crazy !!!!! re-linearizing just to get ordering ? + LinearFactorGraph lfg = linearize(config); + return lfg.getOrdering(); + } + /* ************************************************************************* */ + bool NonlinearFactorGraph::check_convergence(const FGConfig& config1, + const FGConfig& config2, double relativeErrorTreshold, + double absoluteErrorTreshold, int verbosity) const { + double currentError = calculate_error(*this, config1, verbosity); + double newError = calculate_error(*this, config2, verbosity); + return gtsam::check_convergence(relativeErrorTreshold, + absoluteErrorTreshold, currentError, newError, verbosity); - - // linearize, solve, update - //double newError = iterateLM(config, currentError, lambda, lambdaFactor, ordering, verbosity); - -} + } /* ************************************************************************* */ - } // namespace gtsam diff --git a/cpp/NonlinearFactorGraph.h b/cpp/NonlinearFactorGraph.h index 0498a4c5a..6283671b3 100644 --- a/cpp/NonlinearFactorGraph.h +++ b/cpp/NonlinearFactorGraph.h @@ -21,24 +21,13 @@ namespace gtsam { typedef FactorGraph BaseFactorGraph; -/** Factor Graph Constsiting of non-linear factors */ +/** Factor Graph consisting of non-linear factors */ class NonlinearFactorGraph : public BaseFactorGraph { -public: // internal, exposed for testing only, doc in .cpp file +public: + // internal, exposed for testing only, doc in .cpp file - FGConfig iterate(const FGConfig& config, const Ordering& ordering) const; - - double iterate(FGConfig& config, const Ordering& ordering, int verbosity) const; - - std::pair try_lambda - (const FGConfig& config, const LinearFactorGraph& linear, - double lambda, const Ordering& ordering, int verbosity) const; - - double iterateLM(FGConfig& config, double currentError, - double& lambda, double lambdaFactor, - const Ordering& ordering, int verbosity) const; - - Ordering getOrdering(FGConfig& config) const; + Ordering getOrdering(const FGConfig& config) const; public: // these you will probably want to use /** @@ -46,20 +35,6 @@ public: // these you will probably want to use */ LinearFactorGraph linearize(const FGConfig& config) const; - /** - * Optimize a solution for a non linear factor graph - * Changes config in place - * @param config Reference to current configuration - * @param ordering Ordering to optimize with - * @param relativeErrorTreshold - * @param absoluteErrorTreshold - * @param verbosity Integer specifying how much output to provide - */ - void optimize(FGConfig& config, - const Ordering& ordering, - double relativeErrorTreshold, double absoluteErrorTreshold, - int verbosity = 0) const; - /** * Given two configs, check whether the error of config2 is * different enough from the error for config1, or whether config1 @@ -76,40 +51,6 @@ public: // these you will probably want to use double relativeErrorTreshold, double absoluteErrorTreshold, int verbosity = 0) const; - /** - * Optimize using Levenberg-Marquardt. Really Levenberg's - * algorithm at this moment, as we just add I*\lambda to Hessian - * H'H. The probabilistic explanation is very simple: every - * variable gets an extra Gaussian prior that biases staying at - * current value, with variance 1/lambda. This is done very easily - * (but perhaps wastefully) by adding a prior factor for each of - * the variables, after linearization. - * - * Changes config in place :-( - * - * @param config Reference to current configuration - * @param ordering Ordering to optimize with - * @param relativeErrorTreshold - * @param absoluteErrorTreshold - * @param verbosity Integer specifying how much output to provide - * @param lambda Reference to current lambda - * @param lambdaFactor Factor by which to decrease/increase lambda - */ - void optimizeLM(FGConfig& config, - const Ordering& ordering, - double relativeErrorTreshold, double absoluteErrorTreshold, - int verbosity = 0, - double lambda0=1e-5, double lambdaFactor=10) const; - - - - std::pair OneIterationLM( FGConfig& config, const Ordering& ordering, - double relativeErrorTreshold, - double absoluteErrorTreshold, - int verbosity, - double lambda0, - double lambdaFactor) const ; - private: /** Serialization function */ diff --git a/cpp/NonlinearOptimizer.cpp b/cpp/NonlinearOptimizer.cpp new file mode 100644 index 000000000..372b130a1 --- /dev/null +++ b/cpp/NonlinearOptimizer.cpp @@ -0,0 +1,175 @@ +/** + * NonlinearOptimizer.cpp + * @brief: Encapsulates nonlinear optimization state + * @Author: Frank Dellaert + * Created on: Sep 7, 2009 + */ + +#include +#include +#include "NonlinearOptimizer.h" + +using namespace std; + +namespace gtsam { + + /* ************************************************************************* */ + // Constructors + /* ************************************************************************* */ + template + NonlinearOptimizer::NonlinearOptimizer(const G& graph, + const Ordering& ordering, shared_config config, double lambda) : + graph_(&graph), ordering_(&ordering), config_(config), error_(graph.error( + *config)), lambda_(lambda) { + } + + /* ************************************************************************* */ + // linearize and optimize + /* ************************************************************************* */ + template + FGConfig NonlinearOptimizer::delta() const { + LinearFactorGraph linear = graph_->linearize(*config_); + return linear.optimize(*ordering_); + } + + /* ************************************************************************* */ + // One iteration of Gauss Newton + /* ************************************************************************* */ + template + NonlinearOptimizer NonlinearOptimizer::iterate( + verbosityLevel verbosity) const { + + // linearize and optimize + FGConfig delta = this->delta(); + + // maybe show output + if (verbosity >= DELTA) + delta.print("delta"); + + // take old config and update it + shared_config newConfig(new C(config_->exmap(delta))); + + // maybe show output + if (verbosity >= CONFIG) + newConfig->print("newConfig"); + + return NonlinearOptimizer(*graph_, *ordering_, newConfig); + } + + /* ************************************************************************* */ + template + NonlinearOptimizer NonlinearOptimizer::gaussNewton( + double relativeThreshold, double absoluteThreshold, + verbosityLevel verbosity, int maxIterations) const { + // linearize, solve, update + NonlinearOptimizer next = iterate(verbosity); + + // check convergence + bool converged = gtsam::check_convergence(relativeThreshold, + absoluteThreshold, error_, next.error_, verbosity); + + // return converged state or iterate + if (converged) + return next; + else + return next.gaussNewton(relativeThreshold, absoluteThreshold, verbosity); + } + + /* ************************************************************************* */ + // Recursively try to do tempered Gauss-Newton steps until we succeed. + // Form damped system with given lambda, and return a new, more optimistic + // optimizer if error decreased or recurse with a larger lambda if not. + // TODO: in theory we can't infinitely recurse, but maybe we should put a max. + /* ************************************************************************* */ + template + NonlinearOptimizer NonlinearOptimizer::try_lambda( + const LinearFactorGraph& linear, verbosityLevel verbosity, double factor) const { + + if (verbosity >= TRYLAMBDA) + cout << "trying lambda = " << lambda_ << endl; + + // add prior-factors + LinearFactorGraph damped = linear.add_priors(sqrt(lambda_)); + if (verbosity >= DAMPED) + damped.print("damped"); + + // solve + FGConfig delta = damped.optimize(*ordering_); + if (verbosity >= TRYDELTA) + delta.print("delta"); + + // update config + shared_config newConfig(new C(config_->exmap(delta))); + if (verbosity >= TRYCONFIG) + newConfig->print("config"); + + // create new optimization state with more adventurous lambda + NonlinearOptimizer next(*graph_, *ordering_, newConfig, lambda_ / factor); + + // if error decreased, return the new state + if (next.error_ <= error_) + return next; + else { + // TODO: can we avoid copying the config ? + NonlinearOptimizer cautious(*graph_, *ordering_, config_, lambda_ * factor); + return cautious.try_lambda(linear, verbosity, factor); + } + } + + /* ************************************************************************* */ + // One iteration of Levenberg Marquardt + /* ************************************************************************* */ + template + NonlinearOptimizer NonlinearOptimizer::iterateLM( + verbosityLevel verbosity, double lambdaFactor) const { + + // maybe show output + if (verbosity >= CONFIG) + config_->print("config"); + if (verbosity >= ERROR) + cout << "error: " << error_ << endl; + if (verbosity >= LAMBDA) + cout << "lambda = " << lambda_ << endl; + + // linearize all factors once + LinearFactorGraph linear = graph_->linearize(*config_); + if (verbosity >= LINEAR) + linear.print("linear"); + + // try lambda steps with successively larger lambda until we achieve descent + return try_lambda(linear, verbosity, lambdaFactor); + } + + /* ************************************************************************* */ + template + NonlinearOptimizer NonlinearOptimizer::levenbergMarquardt( + double relativeThreshold, double absoluteThreshold, + verbosityLevel verbosity, int maxIterations, double lambdaFactor) const { + + // do one iteration of LM + NonlinearOptimizer next = iterateLM(verbosity, lambdaFactor); + + // check convergence + // TODO: move convergence checks here and incorporate in verbosity levels + // TODO: build into iterations somehow as an instance variable + bool converged = gtsam::check_convergence(relativeThreshold, + absoluteThreshold, error_, next.error_, verbosity); + + // return converged state or iterate + if (converged || maxIterations <= 1) { + // maybe show output + if (verbosity >= CONFIG) + next.config_->print("final config"); + if (verbosity >= ERROR) + cout << "final error: " << next.error_ << endl; + if (verbosity >= LAMBDA) + cout << "final lambda = " << next.lambda_ << endl; + return next; + } else + return next.levenbergMarquardt(relativeThreshold, absoluteThreshold, + verbosity, lambdaFactor); + } + +/* ************************************************************************* */ + +} diff --git a/cpp/NonlinearOptimizer.h b/cpp/NonlinearOptimizer.h new file mode 100644 index 000000000..31aa33a91 --- /dev/null +++ b/cpp/NonlinearOptimizer.h @@ -0,0 +1,147 @@ +/** + * NonlinearOptimizer.h + * @brief: Encapsulates nonlinear optimization state + * @Author: Frank Dellaert + * Created on: Sep 7, 2009 + */ + +#ifndef NONLINEAROPTIMIZER_H_ +#define NONLINEAROPTIMIZER_H_ + +#include +#include "NonlinearFactorGraph.h" +#include "FGConfig.h" + +namespace gtsam { + + /** + * The class NonlinearOptimizer encapsulates an optimization state. + * Typically it is instantiated with a NonlinearFactorGraph and an initial config + * and then one of the optimization routines is called. These recursively iterate + * until convergence. All methods are functional and return a new state. + * + * The class is parameterized by the Config class type, in order to optimize + * over non-vector configurations as well. + * To use in code, include in your cpp file + * (the trick in http://www.ddj.com/cpp/184403420 did not work). + */ + template + class NonlinearOptimizer { + public: + + // For performance reasons in recursion, we store configs in a shared_ptr + typedef boost::shared_ptr shared_config; + + enum verbosityLevel { + SILENT, + ERROR, + LAMBDA, + CONFIG, + DELTA, + LINEAR, + TRYLAMBDA, + TRYCONFIG, + TRYDELTA, + DAMPED + }; + + private: + + // keep a reference to const versions of the graph and ordering + // These normally do not change + const FactorGraph* graph_; + const Ordering* ordering_; + + // keep a configuration and its error + // These typically change once per iteration (in a functional way) + shared_config config_; + double error_; + + // keep current lambda for use within LM only + // TODO: red flag, should we have an LM class ? + double lambda_; + + // Recursively try to do tempered Gauss-Newton steps until we succeed + NonlinearOptimizer try_lambda(const LinearFactorGraph& linear, + verbosityLevel verbosity, double factor) const; + + public: + + /** + * Constructor + */ + NonlinearOptimizer(const FactorGraph& graph, const Ordering& ordering, + shared_config config, double lambda = 1e-5); + + /** + * Return current config + */ + const Config& config() const { + return *config_; + } + + /** + * Return current error + */ + double error() const { + return error_; + } + + /** + * Return current lambda + */ + double lambda() const { + return lambda_; + } + + /** + * linearize and optimize + * Thi returns an FGConfig, i.e., vectors in tangent space of Config + */ + FGConfig delta() const; + + /** + * Do one Gauss-Newton iteration and return next state + */ + NonlinearOptimizer iterate(verbosityLevel verbosity = SILENT) const; + + /** + * Optimize a solution for a non linear factor graph + * @param relativeTreshold + * @param absoluteTreshold + * @param verbosity Integer specifying how much output to provide + */ + NonlinearOptimizer + gaussNewton(double relativeThreshold, double absoluteThreshold, + verbosityLevel verbosity = SILENT, int maxIterations = 100) const; + + /** + * One iteration of Levenberg Marquardt + */ + NonlinearOptimizer iterateLM(verbosityLevel verbosity = SILENT, + double lambdaFactor = 10) const; + + /** + * Optimize using Levenberg-Marquardt. Really Levenberg's + * algorithm at this moment, as we just add I*\lambda to Hessian + * H'H. The probabilistic explanation is very simple: every + * variable gets an extra Gaussian prior that biases staying at + * current value, with variance 1/lambda. This is done very easily + * (but perhaps wastefully) by adding a prior factor for each of + * the variables, after linearization. + * + * @param relativeThreshold + * @param absoluteThreshold + * @param verbosity Integer specifying how much output to provide + * @param lambdaFactor Factor by which to decrease/increase lambda + */ + NonlinearOptimizer + levenbergMarquardt(double relativeThreshold, double absoluteThreshold, + verbosityLevel verbosity = SILENT, int maxIterations = 100, + double lambdaFactor = 10) const; + + }; + +} + +#endif /* NONLINEAROPTIMIZER_H_ */ diff --git a/cpp/Testable.h b/cpp/Testable.h new file mode 100644 index 000000000..78b6ee011 --- /dev/null +++ b/cpp/Testable.h @@ -0,0 +1,50 @@ +/** + * @file Testable + * @brief Abstract base class for values that can be used in unit tests + * @author Frank Dellaert + */ + +// \callgraph + +#pragma once + +namespace gtsam { + + /** + * The Testable class should be templated with the derived class, e.g. + * class Rot3 : public Testable. This allows us to define the + * input type of equals as a Rot3 as well. + */ + template + class Testable { + + public: + + /** + * print + * @param s optional string naming the object + */ + virtual void print(const std::string& name) const = 0; + + /** + * equality up to tolerance + * tricky to implement, see NonLinearFactor1 for an example + */ + virtual bool equals(const Derived& expected, double tol) const = 0; + + }; // Testable class + + /** + * This template works for any type with equals + */ + template + bool assert_equal(const V& actual, const V& expected, double tol = 1e-9) { + if (actual.equals(expected, tol)) + return true; + printf("Not equal:\n"); + actual.print("actual"); + expected.print("expected"); + return false; + } + +} // gtsam diff --git a/cpp/Value.h b/cpp/Value.h index da91ec20e..a6288755d 100644 --- a/cpp/Value.h +++ b/cpp/Value.h @@ -9,15 +9,16 @@ #pragma once #include "Vector.h" +#include "Testable.h" namespace gtsam { /** * The value class should be templated with the derived class, e.g. - * class Rot3 : public Value. This allows us to define the + * class Rot3 : public Value. This allows us to define the * return type of exmap as a Rot3 as well. */ - template class Value { + template class Value : public Testable { private: @@ -32,12 +33,6 @@ namespace gtsam { */ size_t dim() { return dim_;} - /** - * print - * @param s optional string naming the factor - */ - virtual void print(const std::string& s="") const = 0; - /** * Exponential map: add a delta vector, addition for most simple * types, but fully exponential map for types such as Rot3, which diff --git a/cpp/smallExample.cpp b/cpp/smallExample.cpp index 1b2399d35..d6af2d69a 100644 --- a/cpp/smallExample.cpp +++ b/cpp/smallExample.cpp @@ -26,39 +26,42 @@ using namespace std; namespace gtsam { - typedef boost::shared_ptr shared; +typedef boost::shared_ptr shared; /* ************************************************************************* */ -NonlinearFactorGraph createNonlinearFactorGraph() -{ - // Create - NonlinearFactorGraph nlfg; +boost::shared_ptr sharedNonlinearFactorGraph() { + // Create + boost::shared_ptr nlfg(new NonlinearFactorGraph); - // prior on x1 - double sigma1=0.1; - Vector mu(2); mu(0) = 0 ; mu(1) = 0; - shared f1(new Point2Prior(mu, sigma1, "x1")); - nlfg.push_back(f1); + // prior on x1 + double sigma1=0.1; + Vector mu(2); mu(0) = 0 ; mu(1) = 0; + shared f1(new Point2Prior(mu, sigma1, "x1")); + nlfg->push_back(f1); - // odometry between x1 and x2 - double sigma2=0.1; - Vector z2(2); z2(0) = 1.5 ; z2(1) = 0; - shared f2(new Simulated2DOdometry(z2, sigma2, "x1", "x2")); - nlfg.push_back(f2); + // odometry between x1 and x2 + double sigma2=0.1; + Vector z2(2); z2(0) = 1.5 ; z2(1) = 0; + shared f2(new Simulated2DOdometry(z2, sigma2, "x1", "x2")); + nlfg->push_back(f2); - // measurement between x1 and l1 - double sigma3=0.2; - Vector z3(2); z3(0) = 0. ; z3(1) = -1.; - shared f3(new Simulated2DMeasurement(z3, sigma3, "x1", "l1")); - nlfg.push_back(f3); + // measurement between x1 and l1 + double sigma3=0.2; + Vector z3(2); z3(0) = 0. ; z3(1) = -1.; + shared f3(new Simulated2DMeasurement(z3, sigma3, "x1", "l1")); + nlfg->push_back(f3); - // measurement between x2 and l1 - double sigma4=0.2; - Vector z4(2); z4(0)= -1.5 ; z4(1) = -1.; - shared f4(new Simulated2DMeasurement(z4, sigma4, "x2", "l1")); - nlfg.push_back(f4); + // measurement between x2 and l1 + double sigma4=0.2; + Vector z4(2); z4(0)= -1.5 ; z4(1) = -1.; + shared f4(new Simulated2DMeasurement(z4, sigma4, "x2", "l1")); + nlfg->push_back(f4); - return nlfg; + return nlfg; +} + +NonlinearFactorGraph createNonlinearFactorGraph() { + return *sharedNonlinearFactorGraph(); } /* ************************************************************************* */ @@ -119,18 +122,22 @@ FGConfig createConfig() } /* ************************************************************************* */ -FGConfig createNoisyConfig() +boost::shared_ptr sharedNoisyConfig() { Vector v_x1(2); v_x1(0) = 0.1; v_x1(1) = 0.1; Vector v_x2(2); v_x2(0) = 1.4; v_x2(1) = 0.2; Vector v_l1(2); v_l1(0) = 0.1; v_l1(1) = -1.1; - FGConfig c; - c.insert("x1", v_x1); - c.insert("x2", v_x2); - c.insert("l1", v_l1); + boost::shared_ptr c(new FGConfig); + c->insert("x1", v_x1); + c->insert("x2", v_x2); + c->insert("l1", v_l1); return c; } +FGConfig createNoisyConfig() { + return *sharedNoisyConfig(); +} + /* ************************************************************************* */ FGConfig createConstrainedConfig() { @@ -320,17 +327,21 @@ namespace optimize { } /* ************************************************************************* */ -NonlinearFactorGraph createReallyNonlinearFactorGraph() +boost::shared_ptr sharedReallyNonlinearFactorGraph() { - NonlinearFactorGraph fg; + boost::shared_ptr fg(new NonlinearFactorGraph); Vector z = Vector_(2,1.0,0.0); double sigma = 0.1; boost::shared_ptr factor(new NonlinearFactor1(z,sigma,&optimize::h,"x",&optimize::H)); - fg.push_back(factor); + fg->push_back(factor); return fg; } +NonlinearFactorGraph createReallyNonlinearFactorGraph() { + return *sharedReallyNonlinearFactorGraph(); +} + /* ************************************************************************* */ } // namespace gtsam diff --git a/cpp/smallExample.h b/cpp/smallExample.h index 91034aec8..0a4572a84 100644 --- a/cpp/smallExample.h +++ b/cpp/smallExample.h @@ -5,12 +5,12 @@ * @author Carlos Nieto */ - // \callgraph #pragma once +#include #include "ConstrainedNonlinearFactorGraph.h" #include "ConstrainedChordalBayesNet.h" #include "ConstrainedLinearFactorGraph.h" @@ -21,76 +21,79 @@ namespace gtsam { -/** - * Create small example for non-linear factor graph - */ -NonlinearFactorGraph createNonlinearFactorGraph() ; + /** + * Create small example for non-linear factor graph + */ + boost::shared_ptr sharedNonlinearFactorGraph(); + NonlinearFactorGraph createNonlinearFactorGraph(); -/** - * Create small example constrained factor graph - */ -ConstrainedLinearFactorGraph createConstrainedLinearFactorGraph(); + /** + * Create small example constrained factor graph + */ + ConstrainedLinearFactorGraph createConstrainedLinearFactorGraph(); -/** - * Create small example constrained nonlinear factor graph - */ -ConstrainedNonlinearFactorGraph createConstrainedNonlinearFactorGraph(); + /** + * Create small example constrained nonlinear factor graph + */ + ConstrainedNonlinearFactorGraph createConstrainedNonlinearFactorGraph(); -/** - * Create configuration to go with it - * The ground truth configuration for the example above - */ -FGConfig createConfig(); + /** + * Create configuration to go with it + * The ground truth configuration for the example above + */ + FGConfig createConfig(); -/** - * Create configuration for constrained example - * This is the ground truth version - */ -FGConfig createConstrainedConfig(); + /** + * Create configuration for constrained example + * This is the ground truth version + */ + FGConfig createConstrainedConfig(); -/** - * create a noisy configuration for a nonlinear factor graph - */ -FGConfig createNoisyConfig(); + /** + * create a noisy configuration for a nonlinear factor graph + */ + boost::shared_ptr sharedNoisyConfig(); + FGConfig createNoisyConfig(); -/** - * Zero delta config - */ -FGConfig createZeroDelta(); + /** + * Zero delta config + */ + FGConfig createZeroDelta(); -/** - * Delta config that, when added to noisyConfig, returns the ground truth - */ -FGConfig createCorrectDelta(); + /** + * Delta config that, when added to noisyConfig, returns the ground truth + */ + FGConfig createCorrectDelta(); -/** - * create a linear factor graph - * The non-linear graph above evaluated at NoisyConfig - */ -LinearFactorGraph createLinearFactorGraph(); + /** + * create a linear factor graph + * The non-linear graph above evaluated at NoisyConfig + */ + LinearFactorGraph createLinearFactorGraph(); -/** - * create small Chordal Bayes Net x <- y - */ -ChordalBayesNet createSmallChordalBayesNet(); + /** + * create small Chordal Bayes Net x <- y + */ + ChordalBayesNet createSmallChordalBayesNet(); -/** - * create small Constrained Chordal Bayes Net (from other constrained example) - */ -ConstrainedChordalBayesNet createConstrainedChordalBayesNet(); + /** + * create small Constrained Chordal Bayes Net (from other constrained example) + */ + ConstrainedChordalBayesNet createConstrainedChordalBayesNet(); -/** - * Create really non-linear factor graph (cos/sin) - */ -NonlinearFactorGraph createReallyNonlinearFactorGraph(); + /** + * Create really non-linear factor graph (cos/sin) + */ + boost::shared_ptr sharedReallyNonlinearFactorGraph(); + NonlinearFactorGraph createReallyNonlinearFactorGraph(); -/** - * Create a noisy configuration for linearization - */ -FGConfig createConstrainedLinConfig(); + /** + * Create a noisy configuration for linearization + */ + FGConfig createConstrainedLinConfig(); -/** - * Create the correct delta configuration - */ -FGConfig createConstrainedCorrectDelta(); + /** + * Create the correct delta configuration + */ + FGConfig createConstrainedCorrectDelta(); } diff --git a/cpp/testFGConfig.cpp b/cpp/testFGConfig.cpp index a0e96d3cb..fc10a61b7 100644 --- a/cpp/testFGConfig.cpp +++ b/cpp/testFGConfig.cpp @@ -60,12 +60,8 @@ TEST( FGConfig, plus) expected.insert("x", wx).insert("y",wy); // functional - FGConfig actual = fg + delta; + FGConfig actual = fg.exmap(delta); CHECK(actual.equals(expected)); - - // inplace - fg += delta; - CHECK(fg.equals(expected)); } /* ************************************************************************* */ diff --git a/cpp/testNonlinearFactorGraph.cpp b/cpp/testNonlinearFactorGraph.cpp index 912cbe9f4..60393b357 100644 --- a/cpp/testNonlinearFactorGraph.cpp +++ b/cpp/testNonlinearFactorGraph.cpp @@ -6,7 +6,6 @@ * @author Christian Potthast */ - /*STL/C++*/ #include using namespace std; @@ -19,152 +18,53 @@ using namespace std; using namespace gtsam; /* ************************************************************************* */ -TEST( NonlinearFactorGraph, equals ){ +TEST( NonlinearFactorGraph, equals ) +{ - NonlinearFactorGraph fg = createNonlinearFactorGraph(); - NonlinearFactorGraph fg2 = createNonlinearFactorGraph(); - CHECK( fg.equals(fg2) ); + NonlinearFactorGraph fg = createNonlinearFactorGraph(); + NonlinearFactorGraph fg2 = createNonlinearFactorGraph(); + CHECK( fg.equals(fg2) ); } /* ************************************************************************* */ TEST( NonlinearFactorGraph, error ) { - NonlinearFactorGraph fg = createNonlinearFactorGraph(); + NonlinearFactorGraph fg = createNonlinearFactorGraph(); - FGConfig c1 = createConfig(); - double actual1 = fg.error(c1); - DOUBLES_EQUAL( 0.0, actual1, 1e-9 ); + FGConfig c1 = createConfig(); + double actual1 = fg.error(c1); + DOUBLES_EQUAL( 0.0, actual1, 1e-9 ); - FGConfig c2 = createNoisyConfig(); - double actual2 = fg.error(c2); - DOUBLES_EQUAL( 5.625, actual2, 1e-9 ); + FGConfig c2 = createNoisyConfig(); + double actual2 = fg.error(c2); + DOUBLES_EQUAL( 5.625, actual2, 1e-9 ); } /* ************************************************************************* */ TEST( NonlinearFactorGraph, probPrime ) { - NonlinearFactorGraph fg = createNonlinearFactorGraph(); - FGConfig cfg = createConfig(); + NonlinearFactorGraph fg = createNonlinearFactorGraph(); + FGConfig cfg = createConfig(); - // evaluate the probability of the factor graph - double actual = fg.probPrime(cfg); - double expected = 1.0; - DOUBLES_EQUAL(expected,actual,0); + // evaluate the probability of the factor graph + double actual = fg.probPrime(cfg); + double expected = 1.0; + DOUBLES_EQUAL(expected,actual,0); } /* ************************************************************************* */ TEST( NonlinearFactorGraph, linearize ) { - NonlinearFactorGraph fg = createNonlinearFactorGraph(); - FGConfig initial = createNoisyConfig(); - LinearFactorGraph linearized = fg.linearize(initial); - LinearFactorGraph expected = createLinearFactorGraph(); - CHECK(expected.equals(linearized)); + NonlinearFactorGraph fg = createNonlinearFactorGraph(); + FGConfig initial = createNoisyConfig(); + LinearFactorGraph linearized = fg.linearize(initial); + LinearFactorGraph expected = createLinearFactorGraph(); + CHECK(expected.equals(linearized)); } /* ************************************************************************* */ -TEST( NonlinearFactorGraph, iterate ) -{ - NonlinearFactorGraph fg = createNonlinearFactorGraph(); - FGConfig initial = createNoisyConfig(); - - // Expected configuration is the difference between the noisy config - // and the ground-truth config. One step only because it's linear ! - FGConfig expected; - Vector dl1(2); dl1(0)=-0.1; dl1(1)= 0.1; expected.insert("l1",dl1); - Vector dx1(2); dx1(0)=-0.1; dx1(1)=-0.1; expected.insert("x1",dx1); - Vector dx2(2); dx2(0)= 0.1; dx2(1)=-0.2; expected.insert("x2",dx2); - - // Check one ordering - Ordering ord1; - ord1.push_back("x2"); - ord1.push_back("l1"); - ord1.push_back("x1"); - FGConfig actual1 = fg.iterate(initial, ord1); - CHECK(actual1.equals(expected)); - - // Check another - Ordering ord2; - ord2.push_back("x1"); - ord2.push_back("x2"); - ord2.push_back("l1"); - FGConfig actual2 = fg.iterate(initial, ord2); - CHECK(actual2.equals(expected)); - - // And yet another... - Ordering ord3; - ord3.push_back("l1"); - ord3.push_back("x1"); - ord3.push_back("x2"); - FGConfig actual3 = fg.iterate(initial, ord3); - CHECK(actual3.equals(expected)); - +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); } - -/* ************************************************************************* */ -TEST( NonlinearFactorGraph, optimize ) { - - NonlinearFactorGraph fg = createReallyNonlinearFactorGraph(); - - // test error at minimum - Vector xstar = Vector_(1,0.0); - FGConfig cstar; - cstar.insert("x",xstar); - DOUBLES_EQUAL(0.0,fg.error(cstar),0.0); - - // test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 = - Vector x0 = Vector_(1,3.0); - FGConfig c0; - c0.insert("x",x0); - DOUBLES_EQUAL(199.0,fg.error(c0),1e-3); - - // optimize parameters - Ordering ord; - ord.push_back("x"); - double relativeErrorTreshold = 1e-5; - double absoluteErrorTreshold = 1e-5; - - // Gauss-Newton - FGConfig actual = c0; - fg.optimize(actual,ord,relativeErrorTreshold,absoluteErrorTreshold); - CHECK(actual.equals(cstar)); - - // Levenberg-Marquardt - FGConfig actual2 = c0; - double lambda0 = 1000, lambdaFactor = 10; - fg.optimizeLM(actual2,ord,relativeErrorTreshold,absoluteErrorTreshold,0,lambda0,lambdaFactor); - CHECK(actual2.equals(cstar)); -} - -/* ************************************************************************* */ -TEST( NonlinearFactorGraph, iterateLM ) { - - // really non-linear factor graph - NonlinearFactorGraph fg = createReallyNonlinearFactorGraph(); - - // config far from minimum - Vector x0 = Vector_(1,3.0); - FGConfig config; - config.insert("x",x0); - - // ordering - Ordering ord; - ord.push_back("x"); - - // normal iterate - int verbosity = 0; - FGConfig actual1 = config; - fg.iterate(actual1,ord,verbosity); - - // LM iterate with lambda 0 should be the same - FGConfig actual2 = config; - double lambda0 = 0, lambdaFactor = 10; - double currentError = fg.error(actual2); - fg.iterateLM(actual2, currentError, lambda0, lambdaFactor, ord, verbosity); - - CHECK(actual1.equals(actual2)); -} - -/* ************************************************************************* */ -int main() { TestResult tr; return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ diff --git a/cpp/testNonlinearOptimizer.cpp b/cpp/testNonlinearOptimizer.cpp new file mode 100644 index 000000000..dce6ebbea --- /dev/null +++ b/cpp/testNonlinearOptimizer.cpp @@ -0,0 +1,139 @@ +/** + * @file testNonlinearOptimizer.cpp + * @brief Unit tests for NonlinearOptimizer class + * @author Frank Dellaert + */ + +#include +using namespace std; + +#include + +#include "Matrix.h" +#include "smallExample.h" +#include "NonlinearOptimizer.cpp" + +using namespace gtsam; + +typedef NonlinearOptimizer Optimizer; + +/* ************************************************************************* */ +TEST( NonlinearFactorGraph, delta ) +{ + NonlinearFactorGraph fg = createNonlinearFactorGraph(); + Optimizer::shared_config initial = sharedNoisyConfig(); + + // Expected configuration is the difference between the noisy config + // and the ground-truth config. One step only because it's linear ! + FGConfig expected; + Vector dl1(2); + dl1(0) = -0.1; + dl1(1) = 0.1; + expected.insert("l1", dl1); + Vector dx1(2); + dx1(0) = -0.1; + dx1(1) = -0.1; + expected.insert("x1", dx1); + Vector dx2(2); + dx2(0) = 0.1; + dx2(1) = -0.2; + expected.insert("x2", dx2); + + // Check one ordering + Ordering ord1; + ord1.push_back("x2"); + ord1.push_back("l1"); + ord1.push_back("x1"); + Optimizer optimizer1(fg, ord1, initial); + FGConfig actual1 = optimizer1.delta(); + CHECK(assert_equal(actual1,expected)); + + // Check another + Ordering ord2; + ord2.push_back("x1"); + ord2.push_back("x2"); + ord2.push_back("l1"); + Optimizer optimizer2(fg, ord2, initial); + FGConfig actual2 = optimizer2.delta(); + CHECK(assert_equal(actual2,expected)); + + // And yet another... + Ordering ord3; + ord3.push_back("l1"); + ord3.push_back("x1"); + ord3.push_back("x2"); + Optimizer optimizer3(fg, ord3, initial); + FGConfig actual3 = optimizer3.delta(); + CHECK(assert_equal(actual3,expected)); +} + +/* ************************************************************************* */ +TEST( NonlinearFactorGraph, iterateLM ) +{ + // really non-linear factor graph + NonlinearFactorGraph fg = createReallyNonlinearFactorGraph(); + + // config far from minimum + Vector x0 = Vector_(1, 3.0); + boost::shared_ptr config(new FGConfig); + config->insert("x", x0); + + // ordering + Ordering ord; + ord.push_back("x"); + + // create initial optimization state, with lambda=0 + Optimizer optimizer(fg, ord, config, 0); + + // normal iterate + Optimizer iterated1 = optimizer.iterate(); + + // LM iterate with lambda 0 should be the same + Optimizer iterated2 = optimizer.iterateLM(); + + CHECK(assert_equal(iterated1.config(), iterated2.config(), 1e-9)); +} + +/* ************************************************************************* */ +TEST( NonlinearFactorGraph, optimize ) +{ + NonlinearFactorGraph fg = createReallyNonlinearFactorGraph(); + + // test error at minimum + Vector xstar = Vector_(1, 0.0); + FGConfig cstar; + cstar.insert("x", xstar); + DOUBLES_EQUAL(0.0,fg.error(cstar),0.0); + + // test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 = + Vector x0 = Vector_(1, 3.0); + boost::shared_ptr c0(new FGConfig); + c0->insert("x", x0); + DOUBLES_EQUAL(199.0,fg.error(*c0),1e-3); + + // optimize parameters + Ordering ord; + ord.push_back("x"); + double relativeThreshold = 1e-5; + double absoluteThreshold = 1e-5; + + // initial optimization state is the same in both cases tested + Optimizer optimizer(fg, ord, c0); + + // Gauss-Newton + Optimizer actual1 = optimizer.gaussNewton(relativeThreshold, + absoluteThreshold); + CHECK(assert_equal(actual1.config(),cstar)); + + // Levenberg-Marquardt + Optimizer actual2 = optimizer.levenbergMarquardt(relativeThreshold, + absoluteThreshold, Optimizer::SILENT); + CHECK(assert_equal(actual2.config(),cstar)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */