depend on external CCOLAMD and DenseQR

release/4.3a0
Kai Ni 2010-10-16 01:55:47 +00:00
parent 130d9d2797
commit bc36e653fd
19 changed files with 173 additions and 136 deletions

View File

@ -18,7 +18,7 @@ SUBLIBS = base/libbase.la geometry/libgeometry.la inference/libinference.la \
linear/liblinear.la nonlinear/libnonlinear.la slam/libslam.la
if USE_LAPACK
SUBLIBS += -L$(SparseLib) -lcholmod -lspqr
SUBLIBS += -L$(DenseQRLib) -lDenseQR
endif
# TODO: UFconfig, CCOLAMD, and LDL automake magic without adding or touching any file
@ -30,7 +30,7 @@ endif
lib_LTLIBRARIES = libgtsam.la
libgtsam_la_SOURCES =
nodist_EXTRA_libgtsam_la_SOURCES = dummy.cxx
libgtsam_la_LIBADD = $(SUBLIBS) -L$(SparseLib) -lccolamd
libgtsam_la_LIBADD = $(SUBLIBS) -L$(CCOLAMDLib) -lccolamd
libgtsam_la_LDFLAGS = -no-undefined -version-info 0:0:0
if USE_ACCELERATE_MACOS

View File

@ -50,16 +50,12 @@ base_HEADERS = $(headers)
noinst_LTLIBRARIES = libbase.la
libbase_la_SOURCES = $(sources)
AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/..
AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(DenseQRInc) -I$(top_srcdir)/..
if USE_BLAS
AM_CPPFLAGS += -DGT_USE_CBLAS
endif
if USE_LAPACK
AM_CPPFLAGS += -DGT_USE_LAPACK
endif
# On Mac, we compile using the BLAS/LAPACK headers in the Accelerate framework
if USE_ACCELERATE_MACOS
AM_CPPFLAGS += -I/System/Library/Frameworks/vecLib.framework/Headers
@ -70,7 +66,7 @@ endif
#----------------------------------------------------------------------------------------------------
TESTS = $(check_PROGRAMS)
AM_DEFAULT_SOURCE_EXT = .cpp
AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization)
AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) -L$(CCOLAMDLib) -lccolamd
LDADD = libbase.la ../CppUnitLite/libCppUnitLite.a
if USE_BLAS_LINUX
@ -78,7 +74,8 @@ AM_LDFLAGS += -lcblas -latlas
endif
if USE_LAPACK
AM_LDFLAGS += -L$(SparseLib) -lcholmod -lspqr
AM_CPPFLAGS += -DGT_USE_LAPACK
AM_LDFLAGS += -L$(DenseQRLib) -lDenseQR
endif
if USE_LAPACK_LINUX

View File

@ -22,10 +22,7 @@
#include <gtsam/base/Matrix.h>
#ifdef GT_USE_LAPACK
extern "C" {
#include <cholmod.h>
}
#include <spqr.hpp>
#include <spqr_subset.hpp>
namespace gtsam {

View File

@ -150,24 +150,45 @@ AC_ARG_WITH([boost],
])
AC_SUBST([boost])
# ask for sparse library include directory
AC_ARG_WITH([sparse-inc],
[AS_HELP_STRING([--with-sparse-inc],
[specify the sparse library include directory (mandatory)])],
[SparseInc=$withval],
# ask for ccolamd library include directory
AC_ARG_WITH([ccolamd-inc],
[AS_HELP_STRING([--with-ccolamd-inc],
[specify the CCOLAMD library include directory (mandatory)])],
[CCOLAMDInc=$withval],
[AC_MSG_FAILURE(
[--with-sparse-inc has to be specified])
[--with-ccolamd-inc has to be specified])
])
AC_SUBST([SparseInc])
AC_SUBST([CCOLAMDInc])
# ask for sparse library lib directory
AC_ARG_WITH([sparse-lib],
[AS_HELP_STRING([--with-sparse-lib],
[specify the sparse library lib directory (mandatory)])],
[SparseLib=$withval],
AC_ARG_WITH([ccolamd-lib],
[AS_HELP_STRING([--with-ccolamd-lib],
[specify the CCOLAMD library lib directory (mandatory)])],
[CCOLAMDLib=$withval],
[AC_MSG_FAILURE(
[--with-sparse-lib has to be specified])
[--with-ccolamd-lib has to be specified])
])
AC_SUBST([SparseLib])
AC_SUBST([CCOLAMDLib])
# ask for DenseQR library include directory
AC_ARG_WITH([denseqr-inc],
[AS_HELP_STRING([--with-denseqr-inc],
[specify the DenseQR library include directory (mandatory)])],
[DenseQRInc=$withval],
[AC_MSG_FAILURE(
[--with-denseqr-inc has to be specified])
])
AC_SUBST([DenseQRInc])
# ask for DenseQR library lib directory
AC_ARG_WITH([denseqr-lib],
[AS_HELP_STRING([--with-denseqr-lib],
[specify the DenseQR library lib directory (mandatory)])],
[DenseQRLib=$withval],
[AC_MSG_FAILURE(
[--with-denseqr-lib has to be specified])
])
AC_SUBST([DenseQRLib])
AC_OUTPUT

View File

@ -15,12 +15,12 @@ noinst_PROGRAMS = SimpleRotation # Optimizes a single nonlinear rotation v
noinst_PROGRAMS += PlanarSLAMExample # Solves SLAM example from tutorial by using planarSLAM
noinst_PROGRAMS += Pose2SLAMExample # Solves SLAM example from tutorial by using planarSLAM
noinst_PROGRAMS += PlanarSLAMSelfContained # Solves SLAM example from tutorial with all typedefs in the script
SUBDIRS = vSLAMexample
#SUBDIRS = vSLAMexample # does not compile....
#----------------------------------------------------------------------------------------------------
# rules to build local programs
#----------------------------------------------------------------------------------------------------
AM_LDFLAGS = $(BOOST_LDFLAGS)
AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/..
AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(top_srcdir)/..
LDADD = ../libgtsam.la
AM_DEFAULT_SOURCE_EXT = .cpp

View File

@ -77,7 +77,7 @@ int main(int argc, char** argv) {
Optimizer::shared_solver solver(new Optimizer::solver(ordering));
Optimizer optimizer(graph, initialEstimate, solver);
Optimizer::verbosityLevel verbosity = pose2SLAM::Optimizer::SILENT;
Optimizer::Parameters::verbosityLevel verbosity = pose2SLAM::Optimizer::Parameters::SILENT;
Optimizer optimizer0 = optimizer.levenbergMarquardt(1e-15, 1e-15, verbosity);
Values result2 = *optimizer0.config();

View File

@ -70,7 +70,7 @@ int main() {
initialEstimate.print("Initialization");
// create an ordering
Optimizer::shared_values result = Optimizer::optimizeLM(graph, initialEstimate, Optimizer::LAMBDA);
Optimizer::shared_values result = Optimizer::optimizeLM(graph, initialEstimate, Optimizer::Parameters::LAMBDA);
result->print("Final config");
return 0;

View File

@ -63,14 +63,14 @@ inferencedir = $(pkgincludedir)/inference
inference_HEADERS = $(headers)
noinst_LTLIBRARIES = libinference.la
libinference_la_SOURCES = $(sources)
AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/..
AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(top_srcdir)/..
AM_CXXFLAGS =
#----------------------------------------------------------------------------------------------------
# rules to build local programs
#----------------------------------------------------------------------------------------------------
TESTS = $(check_PROGRAMS)
AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) -L$(SparseLib) -lccolamd
AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization)
LDADD = libinference.la ../base/libbase.la
LDADD += ../CppUnitLite/libCppUnitLite.a
AM_DEFAULT_SOURCE_EXT = .cpp

View File

@ -51,14 +51,14 @@ lineardir = $(pkgincludedir)/linear
linear_HEADERS = $(headers)
noinst_LTLIBRARIES = liblinear.la
liblinear_la_SOURCES = $(sources)
AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/..
AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(DenseQRInc) -I$(top_srcdir)/..
AM_CXXFLAGS =
#----------------------------------------------------------------------------------------------------
# rules to build local programs
#----------------------------------------------------------------------------------------------------
TESTS = $(check_PROGRAMS)
AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) -L$(SparseLib) -lccolamd
AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization)
LDADD = liblinear.la ../inference/libinference.la ../base/libbase.la
LDADD += ../CppUnitLite/libCppUnitLite.a
AM_DEFAULT_SOURCE_EXT = .cpp
@ -77,5 +77,6 @@ endif
if USE_LAPACK
AM_CXXFLAGS += -DGT_USE_LAPACK
AM_LDFLAGS += -L$(DenseQRLib) -lDenseQR
endif

View File

@ -39,14 +39,14 @@ nonlineardir = $(pkgincludedir)/nonlinear
nonlinear_HEADERS = $(headers)
noinst_LTLIBRARIES = libnonlinear.la
libnonlinear_la_SOURCES = $(sources)
AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/..
AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(top_srcdir)/..
AM_CXXFLAGS =
#----------------------------------------------------------------------------------------------------
# rules to build local programs
#----------------------------------------------------------------------------------------------------
TESTS = $(check_PROGRAMS)
AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) -L$(SparseLib) -lccolamd
AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization)
LDADD = libnonlinear.la ../linear/liblinear.la ../inference/libinference.la ../base/libbase.la
LDADD += ../CppUnitLite/libCppUnitLite.a
AM_DEFAULT_SOURCE_EXT = .cpp

View File

@ -0,0 +1,51 @@
/*
* NonlinearOptimizationParameters.h
*
* Created on: Oct 14, 2010
* Author: nikai
* Description:
*/
#pragma once
namespace gtsam {
// a container for all related parameters
struct NonLinearOptimizerParameters {
typedef enum {
SILENT,
ERROR,
LAMBDA,
TRYLAMBDA,
CONFIG,
DELTA,
TRYCONFIG,
TRYDELTA,
LINEAR,
DAMPED
} verbosityLevel;
typedef enum {
FAST,
BOUNDED,
CAUTIOUS
} LambdaMode;
double absDecrease_; /* threshold for the absolute decrease per iteration */
double relDecrease_; /* threshold for the relative decrease per iteration */
double sumError_; /* threshold for the sum of error */
int maxIterations_ ;
double lambdaFactor_ ;
verbosityLevel verbosity_;
LambdaMode lambdaMode_;
NonLinearOptimizerParameters(): absDecrease_(1), relDecrease_(1e-3), sumError_(0.0),
maxIterations_(100), lambdaFactor_(10.0), verbosity_(ERROR), lambdaMode_(BOUNDED){}
NonLinearOptimizerParameters(double absDecrease, double relDecrease, double sumError,
int iIters = 100, double lambdaFactor = 10, verbosityLevel v = ERROR, LambdaMode lambdaMode = BOUNDED)
:absDecrease_(absDecrease), relDecrease_(relDecrease), sumError_(sumError),
maxIterations_(iIters), lambdaFactor_(lambdaFactor), verbosity_(v), lambdaMode_(lambdaMode){}
};
}

View File

@ -100,24 +100,24 @@ namespace gtsam {
/* ************************************************************************* */
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterate(
verbosityLevel verbosity) const {
Parameters::verbosityLevel verbosity) const {
// linearize and optimize
VectorValues delta = linearizeAndOptimizeForDelta();
// maybe show output
if (verbosity >= DELTA)
if (verbosity >= Parameters::DELTA)
delta.print("delta");
// take old config and update it
shared_values newValues(new C(solver_->expmap(*config_, delta)));
// maybe show output
if (verbosity >= CONFIG)
if (verbosity >= Parameters::CONFIG)
newValues->print("newValues");
NonlinearOptimizer newOptimizer = NonlinearOptimizer(graph_, newValues, solver_, lambda_);
if (verbosity >= ERROR)
if (verbosity >= Parameters::ERROR)
cout << "error: " << newOptimizer.error_ << endl;
return newOptimizer;
@ -127,12 +127,12 @@ namespace gtsam {
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::gaussNewton(
double relativeThreshold, double absoluteThreshold,
verbosityLevel verbosity, int maxIterations) const {
Parameters::verbosityLevel verbosity, int maxIterations) const {
static W writer(error_);
// check if we're already close enough
if (error_ < absoluteThreshold) {
if (verbosity >= ERROR) cout << "Exiting, as error = " << error_
if (verbosity >= Parameters::ERROR) cout << "Exiting, as error = " << error_
<< " < absoluteThreshold (" << absoluteThreshold << ")" << endl;
return *this;
}
@ -166,19 +166,19 @@ namespace gtsam {
/* ************************************************************************* */
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::try_lambda(
const L& linear, verbosityLevel verbosity, double factor, LambdaMode lambdaMode) const {
const L& linear, Parameters::verbosityLevel verbosity, double factor, Parameters::LambdaMode lambdaMode) const {
if (verbosity >= TRYLAMBDA)
if (verbosity >= Parameters::TRYLAMBDA)
cout << "trying lambda = " << lambda_ << endl;
// add prior-factors
L damped = linear.add_priors(1.0/sqrt(lambda_), GaussianVariableIndex<>(linear));
if (verbosity >= DAMPED)
if (verbosity >= Parameters::DAMPED)
damped.print("damped");
// solve
VectorValues delta = solver_->optimize(damped);
if (verbosity >= TRYDELTA)
if (verbosity >= Parameters::TRYDELTA)
delta.print("delta");
// update config
@ -188,9 +188,9 @@ namespace gtsam {
// create new optimization state with more adventurous lambda
NonlinearOptimizer next(graph_, newValues, solver_, lambda_ / factor);
if (verbosity >= TRYLAMBDA) cout << "next error = " << next.error_ << endl;
if (verbosity >= Parameters::TRYLAMBDA) cout << "next error = " << next.error_ << endl;
if(lambdaMode >= CAUTIOUS) {
if(lambdaMode >= Parameters::CAUTIOUS) {
throw runtime_error("CAUTIOUS mode not working yet, please use BOUNDED.");
}
@ -198,7 +198,7 @@ namespace gtsam {
// If we're cautious, see if the current lambda is better
// todo: include stopping criterion here?
if(lambdaMode == CAUTIOUS) {
if(lambdaMode == Parameters::CAUTIOUS) {
NonlinearOptimizer sameLambda(graph_, newValues, solver_, lambda_);
if(sameLambda.error_ <= next.error_)
return sameLambda;
@ -211,7 +211,7 @@ namespace gtsam {
else {
// A more adventerous lambda was worse. If we're cautious, try the same lambda.
if(lambdaMode == CAUTIOUS) {
if(lambdaMode == Parameters::CAUTIOUS) {
NonlinearOptimizer sameLambda(graph_, newValues, solver_, lambda_);
if(sameLambda.error_ <= error_)
return sameLambda;
@ -222,7 +222,7 @@ namespace gtsam {
// and keep the same config.
// TODO: can we avoid copying the config ?
if(lambdaMode >= BOUNDED && lambda_ >= 1.0e5) {
if(lambdaMode >= Parameters::BOUNDED && lambda_ >= 1.0e5) {
return NonlinearOptimizer(graph_, newValues, solver_, lambda_);;
} else {
NonlinearOptimizer cautious(graph_, config_, solver_, lambda_ * factor);
@ -237,24 +237,24 @@ namespace gtsam {
/* ************************************************************************* */
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterateLM(
verbosityLevel verbosity, double lambdaFactor, LambdaMode lambdaMode) const {
Parameters::verbosityLevel verbosity, double lambdaFactor, Parameters::LambdaMode lambdaMode) const {
// maybe show output
if (verbosity >= CONFIG)
if (verbosity >= Parameters::CONFIG)
config_->print("config");
if (verbosity >= ERROR)
if (verbosity >= Parameters::ERROR)
cout << "error: " << error_ << endl;
if (verbosity >= LAMBDA)
if (verbosity >= Parameters::LAMBDA)
cout << "lambda = " << lambda_ << endl;
// linearize all factors once
boost::shared_ptr<L> linear = solver_->linearize(*graph_, *config_);
NonlinearOptimizer prepared(graph_, config_, solver_->prepareLinear(*linear), error_, lambda_);
if (verbosity >= LINEAR)
if (verbosity >= Parameters::LINEAR)
linear->print("linear");
// try lambda steps with successively larger lambda until we achieve descent
if (verbosity >= LAMBDA) cout << "Trying Lambda for the first time" << endl;
if (verbosity >= Parameters::LAMBDA) cout << "Trying Lambda for the first time" << endl;
return prepared.try_lambda(*linear, verbosity, lambdaFactor, lambdaMode);
}
@ -262,22 +262,22 @@ namespace gtsam {
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::levenbergMarquardt(
double relativeThreshold, double absoluteThreshold,
verbosityLevel verbosity, int maxIterations, double lambdaFactor, LambdaMode lambdaMode) const {
Parameters::verbosityLevel verbosity, int maxIterations, double lambdaFactor, Parameters::LambdaMode lambdaMode) const {
return levenbergMarquardt(NonLinearOptimizerPara (absoluteThreshold, relativeThreshold, absoluteThreshold,
return levenbergMarquardt(NonLinearOptimizerParameters (absoluteThreshold, relativeThreshold, absoluteThreshold,
maxIterations, lambdaFactor, verbosity, lambdaMode)) ;
}
template<class G, class C, class L, class S, class W>
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::
levenbergMarquardt(const NonLinearOptimizerPara &para) const {
levenbergMarquardt(const NonLinearOptimizerParameters &para) const {
if (para.maxIterations_ <= 0) return *this;
// check if we're already close enough
if (error_ < para.sumError_) {
if (para.verbosity_ >= ERROR)
if (para.verbosity_ >= Parameters::ERROR)
cout << "Exiting, as error = " << error_ << " < " << para.sumError_ << endl;
return *this;
}
@ -299,15 +299,15 @@ namespace gtsam {
// return converged state or iterate
if (converged || para.maxIterations_ <= 1) {
// maybe show output
if (para.verbosity_ >= CONFIG)
if (para.verbosity_ >= Parameters::CONFIG)
next.config_->print("final config");
if (para.verbosity_ >= ERROR)
if (para.verbosity_ >= Parameters::ERROR)
cout << "final error: " << next.error_ << endl;
if (para.verbosity_ >= LAMBDA)
if (para.verbosity_ >= Parameters::LAMBDA)
cout << "final lambda = " << next.lambda_ << endl;
return next;
} else {
NonLinearOptimizerPara newPara = para ;
NonLinearOptimizerParameters newPara = para ;
newPara.maxIterations_ = newPara.maxIterations_ - 1;
return next.levenbergMarquardt(newPara) ;
}

View File

@ -25,6 +25,7 @@
#include <gtsam/linear/VectorValues.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/linear/Factorization.h>
#include <gtsam/nonlinear/NonlinearOptimizationParameters.h>
namespace gtsam {
@ -41,17 +42,28 @@ namespace gtsam {
* until convergence. All methods are functional and return a new state.
*
* The class is parameterized by the Graph type $G$, Values class type $T$,
* linear system class $L$ and the non linear solver type $S$.
* the config type is in order to be able to optimize over non-vector values structures.
* To use in code, include <gtsam/NonlinearOptimizer-inl.h> in your cpp file
* linear system class $L$, the non linear solver type $S$, and the writer type $W$
*
* The values class type $T$ is in order to be able to optimize over non-vector values structures.
*
* A nonlinear system solver $S$
* Concept NonLinearSolver<G,T,L> implements
* linearize: G * T -> L
* solve : L -> T
*
* The writer $W$ generates output to disk or the screen.
*
* For example, in a 2D case, $G$ can be Pose2Graph, $T$ can be Pose2Values,
* $L$ can be GaussianFactorGraph and $S$ can be Factorization<Pose2Graph, Pose2Values>.
* The solver class has two main functions: linearize and optimize. The first one
* linearizes the nonlinear cost function around the current estimate, and the second
* one optimizes the linearized system using various methods.
*
* To use the optimizer in code, include <gtsam/NonlinearOptimizer-inl.h> in your cpp file
*
*
*/
template<class G, class T, class L = GaussianFactorGraph, class S = Factorization<G, T>, class Writer = NullOptimizerWriter>
template<class G, class T, class L = GaussianFactorGraph, class S = Factorization<G, T>, class W = NullOptimizerWriter>
class NonlinearOptimizer {
public:
@ -60,49 +72,7 @@ namespace gtsam {
typedef boost::shared_ptr<const G> shared_graph;
typedef boost::shared_ptr<const S> shared_solver;
typedef const S solver;
typedef enum {
SILENT,
ERROR,
LAMBDA,
TRYLAMBDA,
CONFIG,
DELTA,
TRYCONFIG,
TRYDELTA,
LINEAR,
DAMPED
} verbosityLevel;
typedef enum {
FAST,
BOUNDED,
CAUTIOUS
} LambdaMode;
// a container for all related parameters
struct NonLinearOptimizerPara {
public:
double absDecrease_; /* threshold for the absolute decrease per iteration */
double relDecrease_; /* threshold for the relative decrease per iteration */
double sumError_; /* threshold for the sum of error */
int maxIterations_ ;
double lambdaFactor_ ;
verbosityLevel verbosity_;
LambdaMode lambdaMode_;
public:
NonLinearOptimizerPara(): absDecrease_(1), relDecrease_(1e-3), sumError_(0.0),
maxIterations_(100), lambdaFactor_(10.0), verbosity_(ERROR), lambdaMode_(BOUNDED){}
NonLinearOptimizerPara(double absDecrease, double relDecrease, double sumError,
int iIters = 100, double lambdaFactor = 10, verbosityLevel v = ERROR, LambdaMode lambdaMode = BOUNDED)
:absDecrease_(absDecrease), relDecrease_(relDecrease), sumError_(sumError),
maxIterations_(iIters), lambdaFactor_(lambdaFactor), verbosity_(v), lambdaMode_(lambdaMode){}
};
typedef NonLinearOptimizerParameters Parameters;
private:
@ -124,7 +94,7 @@ namespace gtsam {
// Recursively try to do tempered Gauss-Newton steps until we succeed
NonlinearOptimizer try_lambda(const L& linear,
verbosityLevel verbosity, double factor, LambdaMode lambdaMode) const;
Parameters::verbosityLevel verbosity, double factor, Parameters::LambdaMode lambdaMode) const;
public:
@ -178,7 +148,7 @@ namespace gtsam {
/**
* Do one Gauss-Newton iteration and return next state
*/
NonlinearOptimizer iterate(verbosityLevel verbosity = SILENT) const;
NonlinearOptimizer iterate(Parameters::verbosityLevel verbosity = Parameters::SILENT) const;
/**
* Optimize a solution for a non linear factor graph
@ -188,13 +158,13 @@ namespace gtsam {
*/
NonlinearOptimizer
gaussNewton(double relativeThreshold, double absoluteThreshold,
verbosityLevel verbosity = SILENT, int maxIterations = 100) const;
Parameters::verbosityLevel verbosity = Parameters::SILENT, int maxIterations = 100) const;
/**
* One iteration of Levenberg Marquardt
*/
NonlinearOptimizer iterateLM(verbosityLevel verbosity = SILENT,
double lambdaFactor = 10, LambdaMode lambdaMode = BOUNDED) const;
NonlinearOptimizer iterateLM(Parameters::verbosityLevel verbosity = Parameters::SILENT,
double lambdaFactor = 10, Parameters::LambdaMode lambdaMode = Parameters::BOUNDED) const;
/**
* Optimize using Levenberg-Marquardt. Really Levenberg's
@ -212,12 +182,12 @@ namespace gtsam {
*/
NonlinearOptimizer
levenbergMarquardt(double relativeThreshold, double absoluteThreshold,
verbosityLevel verbosity = SILENT, int maxIterations = 100,
double lambdaFactor = 10, LambdaMode lambdaMode = BOUNDED) const;
Parameters::verbosityLevel verbosity = Parameters::SILENT, int maxIterations = 100,
double lambdaFactor = 10, Parameters::LambdaMode lambdaMode = Parameters::BOUNDED) const;
NonlinearOptimizer
levenbergMarquardt(const NonLinearOptimizerPara &para) const;
levenbergMarquardt(const NonLinearOptimizerParameters &para) const;
/**
* Static interface to LM optimization using default ordering and thresholds
@ -227,7 +197,7 @@ namespace gtsam {
* @return an optimized values structure
*/
static shared_values optimizeLM(shared_graph graph, shared_values config,
verbosityLevel verbosity = SILENT) {
Parameters::verbosityLevel verbosity = Parameters::SILENT) {
// Use a variable ordering from COLAMD
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*config);
@ -248,7 +218,7 @@ namespace gtsam {
* Static interface to LM optimization (no shared_ptr arguments) - see above
*/
inline static shared_values optimizeLM(const G& graph, const T& config,
verbosityLevel verbosity = SILENT) {
Parameters::verbosityLevel verbosity = Parameters::SILENT) {
return optimizeLM(boost::make_shared<const G>(graph),
boost::make_shared<const T>(config), verbosity);
}
@ -261,7 +231,7 @@ namespace gtsam {
* @return an optimized values structure
*/
static shared_values optimizeGN(shared_graph graph, shared_values config,
verbosityLevel verbosity = SILENT) {
Parameters::verbosityLevel verbosity = Parameters::SILENT) {
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*config);
double relativeThreshold = 1e-5, absoluteThreshold = 1e-5;
@ -279,7 +249,7 @@ namespace gtsam {
* Static interface to GN optimization (no shared_ptr arguments) - see above
*/
inline static shared_values optimizeGN(const G& graph, const T& config,
verbosityLevel verbosity = SILENT) {
Parameters::verbosityLevel verbosity = Parameters::SILENT) {
return optimizeGN(boost::make_shared<const G>(graph),
boost::make_shared<const T>(config), verbosity);
}

View File

@ -65,14 +65,14 @@ slamdir = $(pkgincludedir)/slam
slam_HEADERS = $(headers)
noinst_LTLIBRARIES = libslam.la
libslam_la_SOURCES = $(sources)
AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/..
AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(top_srcdir)/..
#----------------------------------------------------------------------------------------------------
# rules to build local programs
#----------------------------------------------------------------------------------------------------
TESTS = $(check_PROGRAMS)
AM_DEFAULT_SOURCE_EXT = .cpp
AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) -L$(SparseLib) -lccolamd
AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) -L$(CCOLAMDLib) -lccolamd
LDADD = libslam.la ../geometry/libgeometry.la ../nonlinear/libnonlinear.la ../linear/liblinear.la ../inference/libinference.la ../base/libbase.la
LDADD += ../CppUnitLite/libCppUnitLite.a

View File

@ -115,7 +115,7 @@ TEST(Pose2Graph, optimize) {
typedef NonlinearOptimizer<Pose2Graph, Pose2Values> Optimizer;
Optimizer::shared_solver solver(new Optimizer::solver(ordering));
Optimizer optimizer0(fg, initial, solver);
Optimizer::verbosityLevel verbosity = Optimizer::SILENT;
Optimizer::Parameters::verbosityLevel verbosity = Optimizer::Parameters::SILENT;
//Optimizer::verbosityLevel verbosity = Optimizer::ERROR;
Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity);
@ -155,7 +155,7 @@ TEST(Pose2Graph, optimizeThreePoses) {
// optimize
pose2SLAM::Optimizer::shared_solver solver(new pose2SLAM::Optimizer::solver(ordering));
pose2SLAM::Optimizer optimizer0(fg, initial, solver);
pose2SLAM::Optimizer::verbosityLevel verbosity = pose2SLAM::Optimizer::SILENT;
pose2SLAM::Optimizer::Parameters::verbosityLevel verbosity = pose2SLAM::Optimizer::Parameters::SILENT;
pose2SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity);
Pose2Values actual = *optimizer.config();
@ -199,7 +199,7 @@ TEST(Pose2Graph, optimizeCircle) {
// optimize
pose2SLAM::Optimizer::shared_solver solver(new pose2SLAM::Optimizer::solver(ordering));
pose2SLAM::Optimizer optimizer0(fg, initial, solver);
pose2SLAM::Optimizer::verbosityLevel verbosity = pose2SLAM::Optimizer::SILENT;
pose2SLAM::Optimizer::Parameters::verbosityLevel verbosity = pose2SLAM::Optimizer::Parameters::SILENT;
pose2SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity);
Pose2Values actual = *optimizer.config();

View File

@ -73,7 +73,7 @@ TEST(Pose3Graph, optimizeCircle) {
typedef NonlinearOptimizer<Pose3Graph, Pose3Values> Optimizer;
Optimizer::shared_solver solver(new Optimizer::solver(ordering));
Optimizer optimizer0(fg, initial, solver);
Optimizer::verbosityLevel verbosity = Optimizer::SILENT;
Optimizer::Parameters::verbosityLevel verbosity = Optimizer::Parameters::SILENT;
// Optimizer::verbosityLevel verbosity = Optimizer::ERROR;
Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity);

View File

@ -36,7 +36,7 @@ noinst_PROGRAMS = timeGaussianFactorGraph timeLinearOnDataset
# rules to build unit tests
#----------------------------------------------------------------------------------------------------
TESTS = $(check_PROGRAMS)
AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/..
AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(top_srcdir)/..
AM_LDFLAGS = $(BOOST_LDFLAGS)
if USE_ACCELERATE_MACOS

View File

@ -200,7 +200,7 @@ TEST ( NonlinearEquality, allow_error_optimize ) {
PoseOptimizer::shared_solver solver(new PoseOptimizer::solver(ord));
PoseOptimizer optimizer(graph, init, solver);
double relThresh = 1e-5, absThresh = 1e-5;
PoseOptimizer result = optimizer.levenbergMarquardt(relThresh, absThresh, PoseOptimizer::SILENT);
PoseOptimizer result = optimizer.levenbergMarquardt(relThresh, absThresh, PoseOptimizer::Parameters::SILENT);
// verify
PoseValues expected;
@ -237,7 +237,7 @@ TEST ( NonlinearEquality, allow_error_optimize_with_factors ) {
PoseOptimizer::shared_solver solver(new PoseOptimizer::solver(ord));
PoseOptimizer optimizer(graph, init, solver);
double relThresh = 1e-5, absThresh = 1e-5;
PoseOptimizer result = optimizer.levenbergMarquardt(relThresh, absThresh, PoseOptimizer::SILENT);
PoseOptimizer result = optimizer.levenbergMarquardt(relThresh, absThresh, PoseOptimizer::Parameters::SILENT);
// verify
PoseValues expected;

View File

@ -179,7 +179,7 @@ TEST( NonlinearOptimizer, optimize )
// Levenberg-Marquardt
Optimizer actual2 = optimizer.levenbergMarquardt(relativeThreshold,
absoluteThreshold, Optimizer::SILENT);
absoluteThreshold, Optimizer::Parameters::SILENT);
DOUBLES_EQUAL(0,fg->error(*(actual2.config())),tol);
}