depend on external CCOLAMD and DenseQR

release/4.3a0
Kai Ni 2010-10-16 01:55:47 +00:00
parent 130d9d2797
commit bc36e653fd
19 changed files with 173 additions and 136 deletions

View File

@ -18,7 +18,7 @@ SUBLIBS = base/libbase.la geometry/libgeometry.la inference/libinference.la \
linear/liblinear.la nonlinear/libnonlinear.la slam/libslam.la linear/liblinear.la nonlinear/libnonlinear.la slam/libslam.la
if USE_LAPACK if USE_LAPACK
SUBLIBS += -L$(SparseLib) -lcholmod -lspqr SUBLIBS += -L$(DenseQRLib) -lDenseQR
endif endif
# TODO: UFconfig, CCOLAMD, and LDL automake magic without adding or touching any file # TODO: UFconfig, CCOLAMD, and LDL automake magic without adding or touching any file
@ -30,7 +30,7 @@ endif
lib_LTLIBRARIES = libgtsam.la lib_LTLIBRARIES = libgtsam.la
libgtsam_la_SOURCES = libgtsam_la_SOURCES =
nodist_EXTRA_libgtsam_la_SOURCES = dummy.cxx nodist_EXTRA_libgtsam_la_SOURCES = dummy.cxx
libgtsam_la_LIBADD = $(SUBLIBS) -L$(SparseLib) -lccolamd libgtsam_la_LIBADD = $(SUBLIBS) -L$(CCOLAMDLib) -lccolamd
libgtsam_la_LDFLAGS = -no-undefined -version-info 0:0:0 libgtsam_la_LDFLAGS = -no-undefined -version-info 0:0:0
if USE_ACCELERATE_MACOS if USE_ACCELERATE_MACOS

View File

@ -50,16 +50,12 @@ base_HEADERS = $(headers)
noinst_LTLIBRARIES = libbase.la noinst_LTLIBRARIES = libbase.la
libbase_la_SOURCES = $(sources) libbase_la_SOURCES = $(sources)
AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/.. AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(DenseQRInc) -I$(top_srcdir)/..
if USE_BLAS if USE_BLAS
AM_CPPFLAGS += -DGT_USE_CBLAS AM_CPPFLAGS += -DGT_USE_CBLAS
endif endif
if USE_LAPACK
AM_CPPFLAGS += -DGT_USE_LAPACK
endif
# On Mac, we compile using the BLAS/LAPACK headers in the Accelerate framework # On Mac, we compile using the BLAS/LAPACK headers in the Accelerate framework
if USE_ACCELERATE_MACOS if USE_ACCELERATE_MACOS
AM_CPPFLAGS += -I/System/Library/Frameworks/vecLib.framework/Headers AM_CPPFLAGS += -I/System/Library/Frameworks/vecLib.framework/Headers
@ -70,7 +66,7 @@ endif
#---------------------------------------------------------------------------------------------------- #----------------------------------------------------------------------------------------------------
TESTS = $(check_PROGRAMS) TESTS = $(check_PROGRAMS)
AM_DEFAULT_SOURCE_EXT = .cpp AM_DEFAULT_SOURCE_EXT = .cpp
AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) -L$(CCOLAMDLib) -lccolamd
LDADD = libbase.la ../CppUnitLite/libCppUnitLite.a LDADD = libbase.la ../CppUnitLite/libCppUnitLite.a
if USE_BLAS_LINUX if USE_BLAS_LINUX
@ -78,7 +74,8 @@ AM_LDFLAGS += -lcblas -latlas
endif endif
if USE_LAPACK if USE_LAPACK
AM_LDFLAGS += -L$(SparseLib) -lcholmod -lspqr AM_CPPFLAGS += -DGT_USE_LAPACK
AM_LDFLAGS += -L$(DenseQRLib) -lDenseQR
endif endif
if USE_LAPACK_LINUX if USE_LAPACK_LINUX

View File

@ -22,10 +22,7 @@
#include <gtsam/base/Matrix.h> #include <gtsam/base/Matrix.h>
#ifdef GT_USE_LAPACK #ifdef GT_USE_LAPACK
extern "C" { #include <spqr_subset.hpp>
#include <cholmod.h>
}
#include <spqr.hpp>
namespace gtsam { namespace gtsam {

View File

@ -150,24 +150,45 @@ AC_ARG_WITH([boost],
]) ])
AC_SUBST([boost]) AC_SUBST([boost])
# ask for sparse library include directory # ask for ccolamd library include directory
AC_ARG_WITH([sparse-inc], AC_ARG_WITH([ccolamd-inc],
[AS_HELP_STRING([--with-sparse-inc], [AS_HELP_STRING([--with-ccolamd-inc],
[specify the sparse library include directory (mandatory)])], [specify the CCOLAMD library include directory (mandatory)])],
[SparseInc=$withval], [CCOLAMDInc=$withval],
[AC_MSG_FAILURE( [AC_MSG_FAILURE(
[--with-sparse-inc has to be specified]) [--with-ccolamd-inc has to be specified])
]) ])
AC_SUBST([SparseInc]) AC_SUBST([CCOLAMDInc])
# ask for sparse library lib directory # ask for sparse library lib directory
AC_ARG_WITH([sparse-lib], AC_ARG_WITH([ccolamd-lib],
[AS_HELP_STRING([--with-sparse-lib], [AS_HELP_STRING([--with-ccolamd-lib],
[specify the sparse library lib directory (mandatory)])], [specify the CCOLAMD library lib directory (mandatory)])],
[SparseLib=$withval], [CCOLAMDLib=$withval],
[AC_MSG_FAILURE( [AC_MSG_FAILURE(
[--with-sparse-lib has to be specified]) [--with-ccolamd-lib has to be specified])
]) ])
AC_SUBST([SparseLib]) AC_SUBST([CCOLAMDLib])
# ask for DenseQR library include directory
AC_ARG_WITH([denseqr-inc],
[AS_HELP_STRING([--with-denseqr-inc],
[specify the DenseQR library include directory (mandatory)])],
[DenseQRInc=$withval],
[AC_MSG_FAILURE(
[--with-denseqr-inc has to be specified])
])
AC_SUBST([DenseQRInc])
# ask for DenseQR library lib directory
AC_ARG_WITH([denseqr-lib],
[AS_HELP_STRING([--with-denseqr-lib],
[specify the DenseQR library lib directory (mandatory)])],
[DenseQRLib=$withval],
[AC_MSG_FAILURE(
[--with-denseqr-lib has to be specified])
])
AC_SUBST([DenseQRLib])
AC_OUTPUT AC_OUTPUT

View File

@ -15,12 +15,12 @@ noinst_PROGRAMS = SimpleRotation # Optimizes a single nonlinear rotation v
noinst_PROGRAMS += PlanarSLAMExample # Solves SLAM example from tutorial by using planarSLAM noinst_PROGRAMS += PlanarSLAMExample # Solves SLAM example from tutorial by using planarSLAM
noinst_PROGRAMS += Pose2SLAMExample # Solves SLAM example from tutorial by using planarSLAM noinst_PROGRAMS += Pose2SLAMExample # Solves SLAM example from tutorial by using planarSLAM
noinst_PROGRAMS += PlanarSLAMSelfContained # Solves SLAM example from tutorial with all typedefs in the script noinst_PROGRAMS += PlanarSLAMSelfContained # Solves SLAM example from tutorial with all typedefs in the script
SUBDIRS = vSLAMexample #SUBDIRS = vSLAMexample # does not compile....
#---------------------------------------------------------------------------------------------------- #----------------------------------------------------------------------------------------------------
# rules to build local programs # rules to build local programs
#---------------------------------------------------------------------------------------------------- #----------------------------------------------------------------------------------------------------
AM_LDFLAGS = $(BOOST_LDFLAGS) AM_LDFLAGS = $(BOOST_LDFLAGS)
AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/.. AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(top_srcdir)/..
LDADD = ../libgtsam.la LDADD = ../libgtsam.la
AM_DEFAULT_SOURCE_EXT = .cpp AM_DEFAULT_SOURCE_EXT = .cpp

View File

@ -77,7 +77,7 @@ int main(int argc, char** argv) {
Optimizer::shared_solver solver(new Optimizer::solver(ordering)); Optimizer::shared_solver solver(new Optimizer::solver(ordering));
Optimizer optimizer(graph, initialEstimate, solver); Optimizer optimizer(graph, initialEstimate, solver);
Optimizer::verbosityLevel verbosity = pose2SLAM::Optimizer::SILENT; Optimizer::Parameters::verbosityLevel verbosity = pose2SLAM::Optimizer::Parameters::SILENT;
Optimizer optimizer0 = optimizer.levenbergMarquardt(1e-15, 1e-15, verbosity); Optimizer optimizer0 = optimizer.levenbergMarquardt(1e-15, 1e-15, verbosity);
Values result2 = *optimizer0.config(); Values result2 = *optimizer0.config();

View File

@ -70,7 +70,7 @@ int main() {
initialEstimate.print("Initialization"); initialEstimate.print("Initialization");
// create an ordering // create an ordering
Optimizer::shared_values result = Optimizer::optimizeLM(graph, initialEstimate, Optimizer::LAMBDA); Optimizer::shared_values result = Optimizer::optimizeLM(graph, initialEstimate, Optimizer::Parameters::LAMBDA);
result->print("Final config"); result->print("Final config");
return 0; return 0;

View File

@ -63,14 +63,14 @@ inferencedir = $(pkgincludedir)/inference
inference_HEADERS = $(headers) inference_HEADERS = $(headers)
noinst_LTLIBRARIES = libinference.la noinst_LTLIBRARIES = libinference.la
libinference_la_SOURCES = $(sources) libinference_la_SOURCES = $(sources)
AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/.. AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(top_srcdir)/..
AM_CXXFLAGS = AM_CXXFLAGS =
#---------------------------------------------------------------------------------------------------- #----------------------------------------------------------------------------------------------------
# rules to build local programs # rules to build local programs
#---------------------------------------------------------------------------------------------------- #----------------------------------------------------------------------------------------------------
TESTS = $(check_PROGRAMS) TESTS = $(check_PROGRAMS)
AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) -L$(SparseLib) -lccolamd AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization)
LDADD = libinference.la ../base/libbase.la LDADD = libinference.la ../base/libbase.la
LDADD += ../CppUnitLite/libCppUnitLite.a LDADD += ../CppUnitLite/libCppUnitLite.a
AM_DEFAULT_SOURCE_EXT = .cpp AM_DEFAULT_SOURCE_EXT = .cpp

View File

@ -51,14 +51,14 @@ lineardir = $(pkgincludedir)/linear
linear_HEADERS = $(headers) linear_HEADERS = $(headers)
noinst_LTLIBRARIES = liblinear.la noinst_LTLIBRARIES = liblinear.la
liblinear_la_SOURCES = $(sources) liblinear_la_SOURCES = $(sources)
AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/.. AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(DenseQRInc) -I$(top_srcdir)/..
AM_CXXFLAGS = AM_CXXFLAGS =
#---------------------------------------------------------------------------------------------------- #----------------------------------------------------------------------------------------------------
# rules to build local programs # rules to build local programs
#---------------------------------------------------------------------------------------------------- #----------------------------------------------------------------------------------------------------
TESTS = $(check_PROGRAMS) TESTS = $(check_PROGRAMS)
AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) -L$(SparseLib) -lccolamd AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization)
LDADD = liblinear.la ../inference/libinference.la ../base/libbase.la LDADD = liblinear.la ../inference/libinference.la ../base/libbase.la
LDADD += ../CppUnitLite/libCppUnitLite.a LDADD += ../CppUnitLite/libCppUnitLite.a
AM_DEFAULT_SOURCE_EXT = .cpp AM_DEFAULT_SOURCE_EXT = .cpp
@ -77,5 +77,6 @@ endif
if USE_LAPACK if USE_LAPACK
AM_CXXFLAGS += -DGT_USE_LAPACK AM_CXXFLAGS += -DGT_USE_LAPACK
AM_LDFLAGS += -L$(DenseQRLib) -lDenseQR
endif endif

View File

@ -39,14 +39,14 @@ nonlineardir = $(pkgincludedir)/nonlinear
nonlinear_HEADERS = $(headers) nonlinear_HEADERS = $(headers)
noinst_LTLIBRARIES = libnonlinear.la noinst_LTLIBRARIES = libnonlinear.la
libnonlinear_la_SOURCES = $(sources) libnonlinear_la_SOURCES = $(sources)
AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/.. AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(top_srcdir)/..
AM_CXXFLAGS = AM_CXXFLAGS =
#---------------------------------------------------------------------------------------------------- #----------------------------------------------------------------------------------------------------
# rules to build local programs # rules to build local programs
#---------------------------------------------------------------------------------------------------- #----------------------------------------------------------------------------------------------------
TESTS = $(check_PROGRAMS) TESTS = $(check_PROGRAMS)
AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) -L$(SparseLib) -lccolamd AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization)
LDADD = libnonlinear.la ../linear/liblinear.la ../inference/libinference.la ../base/libbase.la LDADD = libnonlinear.la ../linear/liblinear.la ../inference/libinference.la ../base/libbase.la
LDADD += ../CppUnitLite/libCppUnitLite.a LDADD += ../CppUnitLite/libCppUnitLite.a
AM_DEFAULT_SOURCE_EXT = .cpp AM_DEFAULT_SOURCE_EXT = .cpp

View File

@ -0,0 +1,51 @@
/*
* NonlinearOptimizationParameters.h
*
* Created on: Oct 14, 2010
* Author: nikai
* Description:
*/
#pragma once
namespace gtsam {
// a container for all related parameters
struct NonLinearOptimizerParameters {
typedef enum {
SILENT,
ERROR,
LAMBDA,
TRYLAMBDA,
CONFIG,
DELTA,
TRYCONFIG,
TRYDELTA,
LINEAR,
DAMPED
} verbosityLevel;
typedef enum {
FAST,
BOUNDED,
CAUTIOUS
} LambdaMode;
double absDecrease_; /* threshold for the absolute decrease per iteration */
double relDecrease_; /* threshold for the relative decrease per iteration */
double sumError_; /* threshold for the sum of error */
int maxIterations_ ;
double lambdaFactor_ ;
verbosityLevel verbosity_;
LambdaMode lambdaMode_;
NonLinearOptimizerParameters(): absDecrease_(1), relDecrease_(1e-3), sumError_(0.0),
maxIterations_(100), lambdaFactor_(10.0), verbosity_(ERROR), lambdaMode_(BOUNDED){}
NonLinearOptimizerParameters(double absDecrease, double relDecrease, double sumError,
int iIters = 100, double lambdaFactor = 10, verbosityLevel v = ERROR, LambdaMode lambdaMode = BOUNDED)
:absDecrease_(absDecrease), relDecrease_(relDecrease), sumError_(sumError),
maxIterations_(iIters), lambdaFactor_(lambdaFactor), verbosity_(v), lambdaMode_(lambdaMode){}
};
}

View File

@ -100,24 +100,24 @@ namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
template<class G, class C, class L, class S, class W> template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterate( NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterate(
verbosityLevel verbosity) const { Parameters::verbosityLevel verbosity) const {
// linearize and optimize // linearize and optimize
VectorValues delta = linearizeAndOptimizeForDelta(); VectorValues delta = linearizeAndOptimizeForDelta();
// maybe show output // maybe show output
if (verbosity >= DELTA) if (verbosity >= Parameters::DELTA)
delta.print("delta"); delta.print("delta");
// take old config and update it // take old config and update it
shared_values newValues(new C(solver_->expmap(*config_, delta))); shared_values newValues(new C(solver_->expmap(*config_, delta)));
// maybe show output // maybe show output
if (verbosity >= CONFIG) if (verbosity >= Parameters::CONFIG)
newValues->print("newValues"); newValues->print("newValues");
NonlinearOptimizer newOptimizer = NonlinearOptimizer(graph_, newValues, solver_, lambda_); NonlinearOptimizer newOptimizer = NonlinearOptimizer(graph_, newValues, solver_, lambda_);
if (verbosity >= ERROR) if (verbosity >= Parameters::ERROR)
cout << "error: " << newOptimizer.error_ << endl; cout << "error: " << newOptimizer.error_ << endl;
return newOptimizer; return newOptimizer;
@ -127,12 +127,12 @@ namespace gtsam {
template<class G, class C, class L, class S, class W> template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::gaussNewton( NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::gaussNewton(
double relativeThreshold, double absoluteThreshold, double relativeThreshold, double absoluteThreshold,
verbosityLevel verbosity, int maxIterations) const { Parameters::verbosityLevel verbosity, int maxIterations) const {
static W writer(error_); static W writer(error_);
// check if we're already close enough // check if we're already close enough
if (error_ < absoluteThreshold) { if (error_ < absoluteThreshold) {
if (verbosity >= ERROR) cout << "Exiting, as error = " << error_ if (verbosity >= Parameters::ERROR) cout << "Exiting, as error = " << error_
<< " < absoluteThreshold (" << absoluteThreshold << ")" << endl; << " < absoluteThreshold (" << absoluteThreshold << ")" << endl;
return *this; return *this;
} }
@ -166,19 +166,19 @@ namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
template<class G, class C, class L, class S, class W> template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::try_lambda( NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::try_lambda(
const L& linear, verbosityLevel verbosity, double factor, LambdaMode lambdaMode) const { const L& linear, Parameters::verbosityLevel verbosity, double factor, Parameters::LambdaMode lambdaMode) const {
if (verbosity >= TRYLAMBDA) if (verbosity >= Parameters::TRYLAMBDA)
cout << "trying lambda = " << lambda_ << endl; cout << "trying lambda = " << lambda_ << endl;
// add prior-factors // add prior-factors
L damped = linear.add_priors(1.0/sqrt(lambda_), GaussianVariableIndex<>(linear)); L damped = linear.add_priors(1.0/sqrt(lambda_), GaussianVariableIndex<>(linear));
if (verbosity >= DAMPED) if (verbosity >= Parameters::DAMPED)
damped.print("damped"); damped.print("damped");
// solve // solve
VectorValues delta = solver_->optimize(damped); VectorValues delta = solver_->optimize(damped);
if (verbosity >= TRYDELTA) if (verbosity >= Parameters::TRYDELTA)
delta.print("delta"); delta.print("delta");
// update config // update config
@ -188,9 +188,9 @@ namespace gtsam {
// create new optimization state with more adventurous lambda // create new optimization state with more adventurous lambda
NonlinearOptimizer next(graph_, newValues, solver_, lambda_ / factor); NonlinearOptimizer next(graph_, newValues, solver_, lambda_ / factor);
if (verbosity >= TRYLAMBDA) cout << "next error = " << next.error_ << endl; if (verbosity >= Parameters::TRYLAMBDA) cout << "next error = " << next.error_ << endl;
if(lambdaMode >= CAUTIOUS) { if(lambdaMode >= Parameters::CAUTIOUS) {
throw runtime_error("CAUTIOUS mode not working yet, please use BOUNDED."); throw runtime_error("CAUTIOUS mode not working yet, please use BOUNDED.");
} }
@ -198,7 +198,7 @@ namespace gtsam {
// If we're cautious, see if the current lambda is better // If we're cautious, see if the current lambda is better
// todo: include stopping criterion here? // todo: include stopping criterion here?
if(lambdaMode == CAUTIOUS) { if(lambdaMode == Parameters::CAUTIOUS) {
NonlinearOptimizer sameLambda(graph_, newValues, solver_, lambda_); NonlinearOptimizer sameLambda(graph_, newValues, solver_, lambda_);
if(sameLambda.error_ <= next.error_) if(sameLambda.error_ <= next.error_)
return sameLambda; return sameLambda;
@ -211,7 +211,7 @@ namespace gtsam {
else { else {
// A more adventerous lambda was worse. If we're cautious, try the same lambda. // A more adventerous lambda was worse. If we're cautious, try the same lambda.
if(lambdaMode == CAUTIOUS) { if(lambdaMode == Parameters::CAUTIOUS) {
NonlinearOptimizer sameLambda(graph_, newValues, solver_, lambda_); NonlinearOptimizer sameLambda(graph_, newValues, solver_, lambda_);
if(sameLambda.error_ <= error_) if(sameLambda.error_ <= error_)
return sameLambda; return sameLambda;
@ -222,7 +222,7 @@ namespace gtsam {
// and keep the same config. // and keep the same config.
// TODO: can we avoid copying the config ? // TODO: can we avoid copying the config ?
if(lambdaMode >= BOUNDED && lambda_ >= 1.0e5) { if(lambdaMode >= Parameters::BOUNDED && lambda_ >= 1.0e5) {
return NonlinearOptimizer(graph_, newValues, solver_, lambda_);; return NonlinearOptimizer(graph_, newValues, solver_, lambda_);;
} else { } else {
NonlinearOptimizer cautious(graph_, config_, solver_, lambda_ * factor); NonlinearOptimizer cautious(graph_, config_, solver_, lambda_ * factor);
@ -237,24 +237,24 @@ namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
template<class G, class C, class L, class S, class W> template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterateLM( NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterateLM(
verbosityLevel verbosity, double lambdaFactor, LambdaMode lambdaMode) const { Parameters::verbosityLevel verbosity, double lambdaFactor, Parameters::LambdaMode lambdaMode) const {
// maybe show output // maybe show output
if (verbosity >= CONFIG) if (verbosity >= Parameters::CONFIG)
config_->print("config"); config_->print("config");
if (verbosity >= ERROR) if (verbosity >= Parameters::ERROR)
cout << "error: " << error_ << endl; cout << "error: " << error_ << endl;
if (verbosity >= LAMBDA) if (verbosity >= Parameters::LAMBDA)
cout << "lambda = " << lambda_ << endl; cout << "lambda = " << lambda_ << endl;
// linearize all factors once // linearize all factors once
boost::shared_ptr<L> linear = solver_->linearize(*graph_, *config_); boost::shared_ptr<L> linear = solver_->linearize(*graph_, *config_);
NonlinearOptimizer prepared(graph_, config_, solver_->prepareLinear(*linear), error_, lambda_); NonlinearOptimizer prepared(graph_, config_, solver_->prepareLinear(*linear), error_, lambda_);
if (verbosity >= LINEAR) if (verbosity >= Parameters::LINEAR)
linear->print("linear"); linear->print("linear");
// try lambda steps with successively larger lambda until we achieve descent // try lambda steps with successively larger lambda until we achieve descent
if (verbosity >= LAMBDA) cout << "Trying Lambda for the first time" << endl; if (verbosity >= Parameters::LAMBDA) cout << "Trying Lambda for the first time" << endl;
return prepared.try_lambda(*linear, verbosity, lambdaFactor, lambdaMode); return prepared.try_lambda(*linear, verbosity, lambdaFactor, lambdaMode);
} }
@ -262,22 +262,22 @@ namespace gtsam {
template<class G, class C, class L, class S, class W> template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::levenbergMarquardt( NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::levenbergMarquardt(
double relativeThreshold, double absoluteThreshold, double relativeThreshold, double absoluteThreshold,
verbosityLevel verbosity, int maxIterations, double lambdaFactor, LambdaMode lambdaMode) const { Parameters::verbosityLevel verbosity, int maxIterations, double lambdaFactor, Parameters::LambdaMode lambdaMode) const {
return levenbergMarquardt(NonLinearOptimizerPara (absoluteThreshold, relativeThreshold, absoluteThreshold, return levenbergMarquardt(NonLinearOptimizerParameters (absoluteThreshold, relativeThreshold, absoluteThreshold,
maxIterations, lambdaFactor, verbosity, lambdaMode)) ; maxIterations, lambdaFactor, verbosity, lambdaMode)) ;
} }
template<class G, class C, class L, class S, class W> template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>:: NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::
levenbergMarquardt(const NonLinearOptimizerPara &para) const { levenbergMarquardt(const NonLinearOptimizerParameters &para) const {
if (para.maxIterations_ <= 0) return *this; if (para.maxIterations_ <= 0) return *this;
// check if we're already close enough // check if we're already close enough
if (error_ < para.sumError_) { if (error_ < para.sumError_) {
if (para.verbosity_ >= ERROR) if (para.verbosity_ >= Parameters::ERROR)
cout << "Exiting, as error = " << error_ << " < " << para.sumError_ << endl; cout << "Exiting, as error = " << error_ << " < " << para.sumError_ << endl;
return *this; return *this;
} }
@ -299,15 +299,15 @@ namespace gtsam {
// return converged state or iterate // return converged state or iterate
if (converged || para.maxIterations_ <= 1) { if (converged || para.maxIterations_ <= 1) {
// maybe show output // maybe show output
if (para.verbosity_ >= CONFIG) if (para.verbosity_ >= Parameters::CONFIG)
next.config_->print("final config"); next.config_->print("final config");
if (para.verbosity_ >= ERROR) if (para.verbosity_ >= Parameters::ERROR)
cout << "final error: " << next.error_ << endl; cout << "final error: " << next.error_ << endl;
if (para.verbosity_ >= LAMBDA) if (para.verbosity_ >= Parameters::LAMBDA)
cout << "final lambda = " << next.lambda_ << endl; cout << "final lambda = " << next.lambda_ << endl;
return next; return next;
} else { } else {
NonLinearOptimizerPara newPara = para ; NonLinearOptimizerParameters newPara = para ;
newPara.maxIterations_ = newPara.maxIterations_ - 1; newPara.maxIterations_ = newPara.maxIterations_ - 1;
return next.levenbergMarquardt(newPara) ; return next.levenbergMarquardt(newPara) ;
} }

View File

@ -25,6 +25,7 @@
#include <gtsam/linear/VectorValues.h> #include <gtsam/linear/VectorValues.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h> #include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/linear/Factorization.h> #include <gtsam/linear/Factorization.h>
#include <gtsam/nonlinear/NonlinearOptimizationParameters.h>
namespace gtsam { namespace gtsam {
@ -41,17 +42,28 @@ namespace gtsam {
* until convergence. All methods are functional and return a new state. * until convergence. All methods are functional and return a new state.
* *
* The class is parameterized by the Graph type $G$, Values class type $T$, * The class is parameterized by the Graph type $G$, Values class type $T$,
* linear system class $L$ and the non linear solver type $S$. * linear system class $L$, the non linear solver type $S$, and the writer type $W$
* the config type is in order to be able to optimize over non-vector values structures. *
* To use in code, include <gtsam/NonlinearOptimizer-inl.h> in your cpp file * The values class type $T$ is in order to be able to optimize over non-vector values structures.
*
* A nonlinear system solver $S$
* Concept NonLinearSolver<G,T,L> implements
* linearize: G * T -> L
* solve : L -> T
*
* The writer $W$ generates output to disk or the screen.
* *
* For example, in a 2D case, $G$ can be Pose2Graph, $T$ can be Pose2Values, * For example, in a 2D case, $G$ can be Pose2Graph, $T$ can be Pose2Values,
* $L$ can be GaussianFactorGraph and $S$ can be Factorization<Pose2Graph, Pose2Values>. * $L$ can be GaussianFactorGraph and $S$ can be Factorization<Pose2Graph, Pose2Values>.
* The solver class has two main functions: linearize and optimize. The first one * The solver class has two main functions: linearize and optimize. The first one
* linearizes the nonlinear cost function around the current estimate, and the second * linearizes the nonlinear cost function around the current estimate, and the second
* one optimizes the linearized system using various methods. * one optimizes the linearized system using various methods.
*
* To use the optimizer in code, include <gtsam/NonlinearOptimizer-inl.h> in your cpp file
*
*
*/ */
template<class G, class T, class L = GaussianFactorGraph, class S = Factorization<G, T>, class Writer = NullOptimizerWriter> template<class G, class T, class L = GaussianFactorGraph, class S = Factorization<G, T>, class W = NullOptimizerWriter>
class NonlinearOptimizer { class NonlinearOptimizer {
public: public:
@ -60,49 +72,7 @@ namespace gtsam {
typedef boost::shared_ptr<const G> shared_graph; typedef boost::shared_ptr<const G> shared_graph;
typedef boost::shared_ptr<const S> shared_solver; typedef boost::shared_ptr<const S> shared_solver;
typedef const S solver; typedef const S solver;
typedef NonLinearOptimizerParameters Parameters;
typedef enum {
SILENT,
ERROR,
LAMBDA,
TRYLAMBDA,
CONFIG,
DELTA,
TRYCONFIG,
TRYDELTA,
LINEAR,
DAMPED
} verbosityLevel;
typedef enum {
FAST,
BOUNDED,
CAUTIOUS
} LambdaMode;
// a container for all related parameters
struct NonLinearOptimizerPara {
public:
double absDecrease_; /* threshold for the absolute decrease per iteration */
double relDecrease_; /* threshold for the relative decrease per iteration */
double sumError_; /* threshold for the sum of error */
int maxIterations_ ;
double lambdaFactor_ ;
verbosityLevel verbosity_;
LambdaMode lambdaMode_;
public:
NonLinearOptimizerPara(): absDecrease_(1), relDecrease_(1e-3), sumError_(0.0),
maxIterations_(100), lambdaFactor_(10.0), verbosity_(ERROR), lambdaMode_(BOUNDED){}
NonLinearOptimizerPara(double absDecrease, double relDecrease, double sumError,
int iIters = 100, double lambdaFactor = 10, verbosityLevel v = ERROR, LambdaMode lambdaMode = BOUNDED)
:absDecrease_(absDecrease), relDecrease_(relDecrease), sumError_(sumError),
maxIterations_(iIters), lambdaFactor_(lambdaFactor), verbosity_(v), lambdaMode_(lambdaMode){}
};
private: private:
@ -124,7 +94,7 @@ namespace gtsam {
// Recursively try to do tempered Gauss-Newton steps until we succeed // Recursively try to do tempered Gauss-Newton steps until we succeed
NonlinearOptimizer try_lambda(const L& linear, NonlinearOptimizer try_lambda(const L& linear,
verbosityLevel verbosity, double factor, LambdaMode lambdaMode) const; Parameters::verbosityLevel verbosity, double factor, Parameters::LambdaMode lambdaMode) const;
public: public:
@ -178,7 +148,7 @@ namespace gtsam {
/** /**
* Do one Gauss-Newton iteration and return next state * Do one Gauss-Newton iteration and return next state
*/ */
NonlinearOptimizer iterate(verbosityLevel verbosity = SILENT) const; NonlinearOptimizer iterate(Parameters::verbosityLevel verbosity = Parameters::SILENT) const;
/** /**
* Optimize a solution for a non linear factor graph * Optimize a solution for a non linear factor graph
@ -188,13 +158,13 @@ namespace gtsam {
*/ */
NonlinearOptimizer NonlinearOptimizer
gaussNewton(double relativeThreshold, double absoluteThreshold, gaussNewton(double relativeThreshold, double absoluteThreshold,
verbosityLevel verbosity = SILENT, int maxIterations = 100) const; Parameters::verbosityLevel verbosity = Parameters::SILENT, int maxIterations = 100) const;
/** /**
* One iteration of Levenberg Marquardt * One iteration of Levenberg Marquardt
*/ */
NonlinearOptimizer iterateLM(verbosityLevel verbosity = SILENT, NonlinearOptimizer iterateLM(Parameters::verbosityLevel verbosity = Parameters::SILENT,
double lambdaFactor = 10, LambdaMode lambdaMode = BOUNDED) const; double lambdaFactor = 10, Parameters::LambdaMode lambdaMode = Parameters::BOUNDED) const;
/** /**
* Optimize using Levenberg-Marquardt. Really Levenberg's * Optimize using Levenberg-Marquardt. Really Levenberg's
@ -212,12 +182,12 @@ namespace gtsam {
*/ */
NonlinearOptimizer NonlinearOptimizer
levenbergMarquardt(double relativeThreshold, double absoluteThreshold, levenbergMarquardt(double relativeThreshold, double absoluteThreshold,
verbosityLevel verbosity = SILENT, int maxIterations = 100, Parameters::verbosityLevel verbosity = Parameters::SILENT, int maxIterations = 100,
double lambdaFactor = 10, LambdaMode lambdaMode = BOUNDED) const; double lambdaFactor = 10, Parameters::LambdaMode lambdaMode = Parameters::BOUNDED) const;
NonlinearOptimizer NonlinearOptimizer
levenbergMarquardt(const NonLinearOptimizerPara &para) const; levenbergMarquardt(const NonLinearOptimizerParameters &para) const;
/** /**
* Static interface to LM optimization using default ordering and thresholds * Static interface to LM optimization using default ordering and thresholds
@ -227,7 +197,7 @@ namespace gtsam {
* @return an optimized values structure * @return an optimized values structure
*/ */
static shared_values optimizeLM(shared_graph graph, shared_values config, static shared_values optimizeLM(shared_graph graph, shared_values config,
verbosityLevel verbosity = SILENT) { Parameters::verbosityLevel verbosity = Parameters::SILENT) {
// Use a variable ordering from COLAMD // Use a variable ordering from COLAMD
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*config); Ordering::shared_ptr ordering = graph->orderingCOLAMD(*config);
@ -248,7 +218,7 @@ namespace gtsam {
* Static interface to LM optimization (no shared_ptr arguments) - see above * Static interface to LM optimization (no shared_ptr arguments) - see above
*/ */
inline static shared_values optimizeLM(const G& graph, const T& config, inline static shared_values optimizeLM(const G& graph, const T& config,
verbosityLevel verbosity = SILENT) { Parameters::verbosityLevel verbosity = Parameters::SILENT) {
return optimizeLM(boost::make_shared<const G>(graph), return optimizeLM(boost::make_shared<const G>(graph),
boost::make_shared<const T>(config), verbosity); boost::make_shared<const T>(config), verbosity);
} }
@ -261,7 +231,7 @@ namespace gtsam {
* @return an optimized values structure * @return an optimized values structure
*/ */
static shared_values optimizeGN(shared_graph graph, shared_values config, static shared_values optimizeGN(shared_graph graph, shared_values config,
verbosityLevel verbosity = SILENT) { Parameters::verbosityLevel verbosity = Parameters::SILENT) {
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*config); Ordering::shared_ptr ordering = graph->orderingCOLAMD(*config);
double relativeThreshold = 1e-5, absoluteThreshold = 1e-5; double relativeThreshold = 1e-5, absoluteThreshold = 1e-5;
@ -279,7 +249,7 @@ namespace gtsam {
* Static interface to GN optimization (no shared_ptr arguments) - see above * Static interface to GN optimization (no shared_ptr arguments) - see above
*/ */
inline static shared_values optimizeGN(const G& graph, const T& config, inline static shared_values optimizeGN(const G& graph, const T& config,
verbosityLevel verbosity = SILENT) { Parameters::verbosityLevel verbosity = Parameters::SILENT) {
return optimizeGN(boost::make_shared<const G>(graph), return optimizeGN(boost::make_shared<const G>(graph),
boost::make_shared<const T>(config), verbosity); boost::make_shared<const T>(config), verbosity);
} }

View File

@ -65,14 +65,14 @@ slamdir = $(pkgincludedir)/slam
slam_HEADERS = $(headers) slam_HEADERS = $(headers)
noinst_LTLIBRARIES = libslam.la noinst_LTLIBRARIES = libslam.la
libslam_la_SOURCES = $(sources) libslam_la_SOURCES = $(sources)
AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/.. AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(top_srcdir)/..
#---------------------------------------------------------------------------------------------------- #----------------------------------------------------------------------------------------------------
# rules to build local programs # rules to build local programs
#---------------------------------------------------------------------------------------------------- #----------------------------------------------------------------------------------------------------
TESTS = $(check_PROGRAMS) TESTS = $(check_PROGRAMS)
AM_DEFAULT_SOURCE_EXT = .cpp AM_DEFAULT_SOURCE_EXT = .cpp
AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) -L$(SparseLib) -lccolamd AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) -L$(CCOLAMDLib) -lccolamd
LDADD = libslam.la ../geometry/libgeometry.la ../nonlinear/libnonlinear.la ../linear/liblinear.la ../inference/libinference.la ../base/libbase.la LDADD = libslam.la ../geometry/libgeometry.la ../nonlinear/libnonlinear.la ../linear/liblinear.la ../inference/libinference.la ../base/libbase.la
LDADD += ../CppUnitLite/libCppUnitLite.a LDADD += ../CppUnitLite/libCppUnitLite.a

View File

@ -115,7 +115,7 @@ TEST(Pose2Graph, optimize) {
typedef NonlinearOptimizer<Pose2Graph, Pose2Values> Optimizer; typedef NonlinearOptimizer<Pose2Graph, Pose2Values> Optimizer;
Optimizer::shared_solver solver(new Optimizer::solver(ordering)); Optimizer::shared_solver solver(new Optimizer::solver(ordering));
Optimizer optimizer0(fg, initial, solver); Optimizer optimizer0(fg, initial, solver);
Optimizer::verbosityLevel verbosity = Optimizer::SILENT; Optimizer::Parameters::verbosityLevel verbosity = Optimizer::Parameters::SILENT;
//Optimizer::verbosityLevel verbosity = Optimizer::ERROR; //Optimizer::verbosityLevel verbosity = Optimizer::ERROR;
Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity); Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity);
@ -155,7 +155,7 @@ TEST(Pose2Graph, optimizeThreePoses) {
// optimize // optimize
pose2SLAM::Optimizer::shared_solver solver(new pose2SLAM::Optimizer::solver(ordering)); pose2SLAM::Optimizer::shared_solver solver(new pose2SLAM::Optimizer::solver(ordering));
pose2SLAM::Optimizer optimizer0(fg, initial, solver); pose2SLAM::Optimizer optimizer0(fg, initial, solver);
pose2SLAM::Optimizer::verbosityLevel verbosity = pose2SLAM::Optimizer::SILENT; pose2SLAM::Optimizer::Parameters::verbosityLevel verbosity = pose2SLAM::Optimizer::Parameters::SILENT;
pose2SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity); pose2SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity);
Pose2Values actual = *optimizer.config(); Pose2Values actual = *optimizer.config();
@ -199,7 +199,7 @@ TEST(Pose2Graph, optimizeCircle) {
// optimize // optimize
pose2SLAM::Optimizer::shared_solver solver(new pose2SLAM::Optimizer::solver(ordering)); pose2SLAM::Optimizer::shared_solver solver(new pose2SLAM::Optimizer::solver(ordering));
pose2SLAM::Optimizer optimizer0(fg, initial, solver); pose2SLAM::Optimizer optimizer0(fg, initial, solver);
pose2SLAM::Optimizer::verbosityLevel verbosity = pose2SLAM::Optimizer::SILENT; pose2SLAM::Optimizer::Parameters::verbosityLevel verbosity = pose2SLAM::Optimizer::Parameters::SILENT;
pose2SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity); pose2SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity);
Pose2Values actual = *optimizer.config(); Pose2Values actual = *optimizer.config();

View File

@ -73,7 +73,7 @@ TEST(Pose3Graph, optimizeCircle) {
typedef NonlinearOptimizer<Pose3Graph, Pose3Values> Optimizer; typedef NonlinearOptimizer<Pose3Graph, Pose3Values> Optimizer;
Optimizer::shared_solver solver(new Optimizer::solver(ordering)); Optimizer::shared_solver solver(new Optimizer::solver(ordering));
Optimizer optimizer0(fg, initial, solver); Optimizer optimizer0(fg, initial, solver);
Optimizer::verbosityLevel verbosity = Optimizer::SILENT; Optimizer::Parameters::verbosityLevel verbosity = Optimizer::Parameters::SILENT;
// Optimizer::verbosityLevel verbosity = Optimizer::ERROR; // Optimizer::verbosityLevel verbosity = Optimizer::ERROR;
Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity); Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity);

View File

@ -36,7 +36,7 @@ noinst_PROGRAMS = timeGaussianFactorGraph timeLinearOnDataset
# rules to build unit tests # rules to build unit tests
#---------------------------------------------------------------------------------------------------- #----------------------------------------------------------------------------------------------------
TESTS = $(check_PROGRAMS) TESTS = $(check_PROGRAMS)
AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/.. AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(top_srcdir)/..
AM_LDFLAGS = $(BOOST_LDFLAGS) AM_LDFLAGS = $(BOOST_LDFLAGS)
if USE_ACCELERATE_MACOS if USE_ACCELERATE_MACOS

View File

@ -200,7 +200,7 @@ TEST ( NonlinearEquality, allow_error_optimize ) {
PoseOptimizer::shared_solver solver(new PoseOptimizer::solver(ord)); PoseOptimizer::shared_solver solver(new PoseOptimizer::solver(ord));
PoseOptimizer optimizer(graph, init, solver); PoseOptimizer optimizer(graph, init, solver);
double relThresh = 1e-5, absThresh = 1e-5; double relThresh = 1e-5, absThresh = 1e-5;
PoseOptimizer result = optimizer.levenbergMarquardt(relThresh, absThresh, PoseOptimizer::SILENT); PoseOptimizer result = optimizer.levenbergMarquardt(relThresh, absThresh, PoseOptimizer::Parameters::SILENT);
// verify // verify
PoseValues expected; PoseValues expected;
@ -237,7 +237,7 @@ TEST ( NonlinearEquality, allow_error_optimize_with_factors ) {
PoseOptimizer::shared_solver solver(new PoseOptimizer::solver(ord)); PoseOptimizer::shared_solver solver(new PoseOptimizer::solver(ord));
PoseOptimizer optimizer(graph, init, solver); PoseOptimizer optimizer(graph, init, solver);
double relThresh = 1e-5, absThresh = 1e-5; double relThresh = 1e-5, absThresh = 1e-5;
PoseOptimizer result = optimizer.levenbergMarquardt(relThresh, absThresh, PoseOptimizer::SILENT); PoseOptimizer result = optimizer.levenbergMarquardt(relThresh, absThresh, PoseOptimizer::Parameters::SILENT);
// verify // verify
PoseValues expected; PoseValues expected;

View File

@ -179,7 +179,7 @@ TEST( NonlinearOptimizer, optimize )
// Levenberg-Marquardt // Levenberg-Marquardt
Optimizer actual2 = optimizer.levenbergMarquardt(relativeThreshold, Optimizer actual2 = optimizer.levenbergMarquardt(relativeThreshold,
absoluteThreshold, Optimizer::SILENT); absoluteThreshold, Optimizer::Parameters::SILENT);
DOUBLES_EQUAL(0,fg->error(*(actual2.config())),tol); DOUBLES_EQUAL(0,fg->error(*(actual2.config())),tol);
} }