diff --git a/Makefile.am b/Makefile.am index 0d9b8c8f6..12259909b 100644 --- a/Makefile.am +++ b/Makefile.am @@ -18,7 +18,7 @@ SUBLIBS = base/libbase.la geometry/libgeometry.la inference/libinference.la \ linear/liblinear.la nonlinear/libnonlinear.la slam/libslam.la if USE_LAPACK -SUBLIBS += -L$(SparseLib) -lcholmod -lspqr +SUBLIBS += -L$(DenseQRLib) -lDenseQR endif # TODO: UFconfig, CCOLAMD, and LDL automake magic without adding or touching any file @@ -30,7 +30,7 @@ endif lib_LTLIBRARIES = libgtsam.la libgtsam_la_SOURCES = nodist_EXTRA_libgtsam_la_SOURCES = dummy.cxx -libgtsam_la_LIBADD = $(SUBLIBS) -L$(SparseLib) -lccolamd +libgtsam_la_LIBADD = $(SUBLIBS) -L$(CCOLAMDLib) -lccolamd libgtsam_la_LDFLAGS = -no-undefined -version-info 0:0:0 if USE_ACCELERATE_MACOS diff --git a/base/Makefile.am b/base/Makefile.am index 9e3770fc8..9745ae33d 100644 --- a/base/Makefile.am +++ b/base/Makefile.am @@ -50,16 +50,12 @@ base_HEADERS = $(headers) noinst_LTLIBRARIES = libbase.la libbase_la_SOURCES = $(sources) -AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/.. +AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(DenseQRInc) -I$(top_srcdir)/.. if USE_BLAS AM_CPPFLAGS += -DGT_USE_CBLAS endif -if USE_LAPACK -AM_CPPFLAGS += -DGT_USE_LAPACK -endif - # On Mac, we compile using the BLAS/LAPACK headers in the Accelerate framework if USE_ACCELERATE_MACOS AM_CPPFLAGS += -I/System/Library/Frameworks/vecLib.framework/Headers @@ -70,7 +66,7 @@ endif #---------------------------------------------------------------------------------------------------- TESTS = $(check_PROGRAMS) AM_DEFAULT_SOURCE_EXT = .cpp -AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) +AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) -L$(CCOLAMDLib) -lccolamd LDADD = libbase.la ../CppUnitLite/libCppUnitLite.a if USE_BLAS_LINUX @@ -78,7 +74,8 @@ AM_LDFLAGS += -lcblas -latlas endif if USE_LAPACK -AM_LDFLAGS += -L$(SparseLib) -lcholmod -lspqr +AM_CPPFLAGS += -DGT_USE_LAPACK +AM_LDFLAGS += -L$(DenseQRLib) -lDenseQR endif if USE_LAPACK_LINUX diff --git a/base/SPQRUtil.h b/base/SPQRUtil.h index 63daaba32..e7efc84ad 100644 --- a/base/SPQRUtil.h +++ b/base/SPQRUtil.h @@ -22,10 +22,7 @@ #include #ifdef GT_USE_LAPACK -extern "C" { -#include -} -#include +#include namespace gtsam { diff --git a/configure.ac b/configure.ac index b4e46d56a..12f4a943b 100644 --- a/configure.ac +++ b/configure.ac @@ -150,24 +150,45 @@ AC_ARG_WITH([boost], ]) AC_SUBST([boost]) -# ask for sparse library include directory -AC_ARG_WITH([sparse-inc], - [AS_HELP_STRING([--with-sparse-inc], - [specify the sparse library include directory (mandatory)])], - [SparseInc=$withval], +# ask for ccolamd library include directory +AC_ARG_WITH([ccolamd-inc], + [AS_HELP_STRING([--with-ccolamd-inc], + [specify the CCOLAMD library include directory (mandatory)])], + [CCOLAMDInc=$withval], [AC_MSG_FAILURE( - [--with-sparse-inc has to be specified]) + [--with-ccolamd-inc has to be specified]) ]) -AC_SUBST([SparseInc]) +AC_SUBST([CCOLAMDInc]) # ask for sparse library lib directory -AC_ARG_WITH([sparse-lib], - [AS_HELP_STRING([--with-sparse-lib], - [specify the sparse library lib directory (mandatory)])], - [SparseLib=$withval], +AC_ARG_WITH([ccolamd-lib], + [AS_HELP_STRING([--with-ccolamd-lib], + [specify the CCOLAMD library lib directory (mandatory)])], + [CCOLAMDLib=$withval], [AC_MSG_FAILURE( - [--with-sparse-lib has to be specified]) + [--with-ccolamd-lib has to be specified]) ]) -AC_SUBST([SparseLib]) - +AC_SUBST([CCOLAMDLib]) + +# ask for DenseQR library include directory +AC_ARG_WITH([denseqr-inc], + [AS_HELP_STRING([--with-denseqr-inc], + [specify the DenseQR library include directory (mandatory)])], + [DenseQRInc=$withval], + [AC_MSG_FAILURE( + [--with-denseqr-inc has to be specified]) + ]) +AC_SUBST([DenseQRInc]) + +# ask for DenseQR library lib directory +AC_ARG_WITH([denseqr-lib], + [AS_HELP_STRING([--with-denseqr-lib], + [specify the DenseQR library lib directory (mandatory)])], + [DenseQRLib=$withval], + [AC_MSG_FAILURE( + [--with-denseqr-lib has to be specified]) + ]) +AC_SUBST([DenseQRLib]) + + AC_OUTPUT diff --git a/examples/Makefile.am b/examples/Makefile.am index 0f0570cdc..cc5492be5 100644 --- a/examples/Makefile.am +++ b/examples/Makefile.am @@ -15,12 +15,12 @@ noinst_PROGRAMS = SimpleRotation # Optimizes a single nonlinear rotation v noinst_PROGRAMS += PlanarSLAMExample # Solves SLAM example from tutorial by using planarSLAM noinst_PROGRAMS += Pose2SLAMExample # Solves SLAM example from tutorial by using planarSLAM noinst_PROGRAMS += PlanarSLAMSelfContained # Solves SLAM example from tutorial with all typedefs in the script -SUBDIRS = vSLAMexample +#SUBDIRS = vSLAMexample # does not compile.... #---------------------------------------------------------------------------------------------------- # rules to build local programs #---------------------------------------------------------------------------------------------------- AM_LDFLAGS = $(BOOST_LDFLAGS) -AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/.. +AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(top_srcdir)/.. LDADD = ../libgtsam.la AM_DEFAULT_SOURCE_EXT = .cpp diff --git a/examples/Pose2SLAMExample.cpp b/examples/Pose2SLAMExample.cpp index 776665cbf..a35bc2f0b 100644 --- a/examples/Pose2SLAMExample.cpp +++ b/examples/Pose2SLAMExample.cpp @@ -77,7 +77,7 @@ int main(int argc, char** argv) { Optimizer::shared_solver solver(new Optimizer::solver(ordering)); Optimizer optimizer(graph, initialEstimate, solver); - Optimizer::verbosityLevel verbosity = pose2SLAM::Optimizer::SILENT; + Optimizer::Parameters::verbosityLevel verbosity = pose2SLAM::Optimizer::Parameters::SILENT; Optimizer optimizer0 = optimizer.levenbergMarquardt(1e-15, 1e-15, verbosity); Values result2 = *optimizer0.config(); diff --git a/examples/SimpleRotation.cpp b/examples/SimpleRotation.cpp index 5bb63421a..811e1505c 100644 --- a/examples/SimpleRotation.cpp +++ b/examples/SimpleRotation.cpp @@ -70,7 +70,7 @@ int main() { initialEstimate.print("Initialization"); // create an ordering - Optimizer::shared_values result = Optimizer::optimizeLM(graph, initialEstimate, Optimizer::LAMBDA); + Optimizer::shared_values result = Optimizer::optimizeLM(graph, initialEstimate, Optimizer::Parameters::LAMBDA); result->print("Final config"); return 0; diff --git a/inference/Makefile.am b/inference/Makefile.am index e8d9530c7..f5c475e0b 100644 --- a/inference/Makefile.am +++ b/inference/Makefile.am @@ -63,14 +63,14 @@ inferencedir = $(pkgincludedir)/inference inference_HEADERS = $(headers) noinst_LTLIBRARIES = libinference.la libinference_la_SOURCES = $(sources) -AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/.. +AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(top_srcdir)/.. AM_CXXFLAGS = #---------------------------------------------------------------------------------------------------- # rules to build local programs #---------------------------------------------------------------------------------------------------- TESTS = $(check_PROGRAMS) -AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) -L$(SparseLib) -lccolamd +AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) LDADD = libinference.la ../base/libbase.la LDADD += ../CppUnitLite/libCppUnitLite.a AM_DEFAULT_SOURCE_EXT = .cpp diff --git a/linear/Makefile.am b/linear/Makefile.am index d3a71e74b..30b5282bd 100644 --- a/linear/Makefile.am +++ b/linear/Makefile.am @@ -51,14 +51,14 @@ lineardir = $(pkgincludedir)/linear linear_HEADERS = $(headers) noinst_LTLIBRARIES = liblinear.la liblinear_la_SOURCES = $(sources) -AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/.. +AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(DenseQRInc) -I$(top_srcdir)/.. AM_CXXFLAGS = #---------------------------------------------------------------------------------------------------- # rules to build local programs #---------------------------------------------------------------------------------------------------- TESTS = $(check_PROGRAMS) -AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) -L$(SparseLib) -lccolamd +AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) LDADD = liblinear.la ../inference/libinference.la ../base/libbase.la LDADD += ../CppUnitLite/libCppUnitLite.a AM_DEFAULT_SOURCE_EXT = .cpp @@ -77,5 +77,6 @@ endif if USE_LAPACK AM_CXXFLAGS += -DGT_USE_LAPACK +AM_LDFLAGS += -L$(DenseQRLib) -lDenseQR endif diff --git a/nonlinear/Makefile.am b/nonlinear/Makefile.am index e748dcd0a..1298c40ea 100644 --- a/nonlinear/Makefile.am +++ b/nonlinear/Makefile.am @@ -21,7 +21,7 @@ check_PROGRAMS += tests/testLieValues tests/testKey # Nonlinear nonlinear headers += NonlinearFactorGraph.h NonlinearFactorGraph-inl.h -headers += NonlinearOptimizer-inl.h +headers += NonlinearOptimizer-inl.h headers += NonlinearFactor.h sources += NonlinearOptimizer.cpp Ordering.cpp @@ -39,14 +39,14 @@ nonlineardir = $(pkgincludedir)/nonlinear nonlinear_HEADERS = $(headers) noinst_LTLIBRARIES = libnonlinear.la libnonlinear_la_SOURCES = $(sources) -AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/.. +AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(top_srcdir)/.. AM_CXXFLAGS = #---------------------------------------------------------------------------------------------------- # rules to build local programs #---------------------------------------------------------------------------------------------------- TESTS = $(check_PROGRAMS) -AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) -L$(SparseLib) -lccolamd +AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) LDADD = libnonlinear.la ../linear/liblinear.la ../inference/libinference.la ../base/libbase.la LDADD += ../CppUnitLite/libCppUnitLite.a AM_DEFAULT_SOURCE_EXT = .cpp diff --git a/nonlinear/NonlinearOptimizationParameters.h b/nonlinear/NonlinearOptimizationParameters.h new file mode 100644 index 000000000..5a3f3d332 --- /dev/null +++ b/nonlinear/NonlinearOptimizationParameters.h @@ -0,0 +1,51 @@ +/* + * NonlinearOptimizationParameters.h + * + * Created on: Oct 14, 2010 + * Author: nikai + * Description: + */ + +#pragma once + +namespace gtsam { + + // a container for all related parameters + struct NonLinearOptimizerParameters { + typedef enum { + SILENT, + ERROR, + LAMBDA, + TRYLAMBDA, + CONFIG, + DELTA, + TRYCONFIG, + TRYDELTA, + LINEAR, + DAMPED + } verbosityLevel; + + typedef enum { + FAST, + BOUNDED, + CAUTIOUS + } LambdaMode; + + double absDecrease_; /* threshold for the absolute decrease per iteration */ + double relDecrease_; /* threshold for the relative decrease per iteration */ + double sumError_; /* threshold for the sum of error */ + int maxIterations_ ; + double lambdaFactor_ ; + verbosityLevel verbosity_; + LambdaMode lambdaMode_; + + NonLinearOptimizerParameters(): absDecrease_(1), relDecrease_(1e-3), sumError_(0.0), + maxIterations_(100), lambdaFactor_(10.0), verbosity_(ERROR), lambdaMode_(BOUNDED){} + + NonLinearOptimizerParameters(double absDecrease, double relDecrease, double sumError, + int iIters = 100, double lambdaFactor = 10, verbosityLevel v = ERROR, LambdaMode lambdaMode = BOUNDED) + :absDecrease_(absDecrease), relDecrease_(relDecrease), sumError_(sumError), + maxIterations_(iIters), lambdaFactor_(lambdaFactor), verbosity_(v), lambdaMode_(lambdaMode){} + + }; +} diff --git a/nonlinear/NonlinearOptimizer-inl.h b/nonlinear/NonlinearOptimizer-inl.h index 55a16f0dd..d6a838f98 100644 --- a/nonlinear/NonlinearOptimizer-inl.h +++ b/nonlinear/NonlinearOptimizer-inl.h @@ -100,24 +100,24 @@ namespace gtsam { /* ************************************************************************* */ template NonlinearOptimizer NonlinearOptimizer::iterate( - verbosityLevel verbosity) const { + Parameters::verbosityLevel verbosity) const { // linearize and optimize VectorValues delta = linearizeAndOptimizeForDelta(); // maybe show output - if (verbosity >= DELTA) + if (verbosity >= Parameters::DELTA) delta.print("delta"); // take old config and update it shared_values newValues(new C(solver_->expmap(*config_, delta))); // maybe show output - if (verbosity >= CONFIG) + if (verbosity >= Parameters::CONFIG) newValues->print("newValues"); NonlinearOptimizer newOptimizer = NonlinearOptimizer(graph_, newValues, solver_, lambda_); - if (verbosity >= ERROR) + if (verbosity >= Parameters::ERROR) cout << "error: " << newOptimizer.error_ << endl; return newOptimizer; @@ -127,12 +127,12 @@ namespace gtsam { template NonlinearOptimizer NonlinearOptimizer::gaussNewton( double relativeThreshold, double absoluteThreshold, - verbosityLevel verbosity, int maxIterations) const { + Parameters::verbosityLevel verbosity, int maxIterations) const { static W writer(error_); // check if we're already close enough if (error_ < absoluteThreshold) { - if (verbosity >= ERROR) cout << "Exiting, as error = " << error_ + if (verbosity >= Parameters::ERROR) cout << "Exiting, as error = " << error_ << " < absoluteThreshold (" << absoluteThreshold << ")" << endl; return *this; } @@ -166,19 +166,19 @@ namespace gtsam { /* ************************************************************************* */ template NonlinearOptimizer NonlinearOptimizer::try_lambda( - const L& linear, verbosityLevel verbosity, double factor, LambdaMode lambdaMode) const { + const L& linear, Parameters::verbosityLevel verbosity, double factor, Parameters::LambdaMode lambdaMode) const { - if (verbosity >= TRYLAMBDA) + if (verbosity >= Parameters::TRYLAMBDA) cout << "trying lambda = " << lambda_ << endl; // add prior-factors L damped = linear.add_priors(1.0/sqrt(lambda_), GaussianVariableIndex<>(linear)); - if (verbosity >= DAMPED) + if (verbosity >= Parameters::DAMPED) damped.print("damped"); // solve VectorValues delta = solver_->optimize(damped); - if (verbosity >= TRYDELTA) + if (verbosity >= Parameters::TRYDELTA) delta.print("delta"); // update config @@ -188,9 +188,9 @@ namespace gtsam { // create new optimization state with more adventurous lambda NonlinearOptimizer next(graph_, newValues, solver_, lambda_ / factor); - if (verbosity >= TRYLAMBDA) cout << "next error = " << next.error_ << endl; + if (verbosity >= Parameters::TRYLAMBDA) cout << "next error = " << next.error_ << endl; - if(lambdaMode >= CAUTIOUS) { + if(lambdaMode >= Parameters::CAUTIOUS) { throw runtime_error("CAUTIOUS mode not working yet, please use BOUNDED."); } @@ -198,7 +198,7 @@ namespace gtsam { // If we're cautious, see if the current lambda is better // todo: include stopping criterion here? - if(lambdaMode == CAUTIOUS) { + if(lambdaMode == Parameters::CAUTIOUS) { NonlinearOptimizer sameLambda(graph_, newValues, solver_, lambda_); if(sameLambda.error_ <= next.error_) return sameLambda; @@ -211,7 +211,7 @@ namespace gtsam { else { // A more adventerous lambda was worse. If we're cautious, try the same lambda. - if(lambdaMode == CAUTIOUS) { + if(lambdaMode == Parameters::CAUTIOUS) { NonlinearOptimizer sameLambda(graph_, newValues, solver_, lambda_); if(sameLambda.error_ <= error_) return sameLambda; @@ -222,7 +222,7 @@ namespace gtsam { // and keep the same config. // TODO: can we avoid copying the config ? - if(lambdaMode >= BOUNDED && lambda_ >= 1.0e5) { + if(lambdaMode >= Parameters::BOUNDED && lambda_ >= 1.0e5) { return NonlinearOptimizer(graph_, newValues, solver_, lambda_);; } else { NonlinearOptimizer cautious(graph_, config_, solver_, lambda_ * factor); @@ -237,24 +237,24 @@ namespace gtsam { /* ************************************************************************* */ template NonlinearOptimizer NonlinearOptimizer::iterateLM( - verbosityLevel verbosity, double lambdaFactor, LambdaMode lambdaMode) const { + Parameters::verbosityLevel verbosity, double lambdaFactor, Parameters::LambdaMode lambdaMode) const { // maybe show output - if (verbosity >= CONFIG) + if (verbosity >= Parameters::CONFIG) config_->print("config"); - if (verbosity >= ERROR) + if (verbosity >= Parameters::ERROR) cout << "error: " << error_ << endl; - if (verbosity >= LAMBDA) + if (verbosity >= Parameters::LAMBDA) cout << "lambda = " << lambda_ << endl; // linearize all factors once boost::shared_ptr linear = solver_->linearize(*graph_, *config_); NonlinearOptimizer prepared(graph_, config_, solver_->prepareLinear(*linear), error_, lambda_); - if (verbosity >= LINEAR) + if (verbosity >= Parameters::LINEAR) linear->print("linear"); // try lambda steps with successively larger lambda until we achieve descent - if (verbosity >= LAMBDA) cout << "Trying Lambda for the first time" << endl; + if (verbosity >= Parameters::LAMBDA) cout << "Trying Lambda for the first time" << endl; return prepared.try_lambda(*linear, verbosity, lambdaFactor, lambdaMode); } @@ -262,22 +262,22 @@ namespace gtsam { template NonlinearOptimizer NonlinearOptimizer::levenbergMarquardt( double relativeThreshold, double absoluteThreshold, - verbosityLevel verbosity, int maxIterations, double lambdaFactor, LambdaMode lambdaMode) const { + Parameters::verbosityLevel verbosity, int maxIterations, double lambdaFactor, Parameters::LambdaMode lambdaMode) const { - return levenbergMarquardt(NonLinearOptimizerPara (absoluteThreshold, relativeThreshold, absoluteThreshold, + return levenbergMarquardt(NonLinearOptimizerParameters (absoluteThreshold, relativeThreshold, absoluteThreshold, maxIterations, lambdaFactor, verbosity, lambdaMode)) ; } template NonlinearOptimizer NonlinearOptimizer:: - levenbergMarquardt(const NonLinearOptimizerPara ¶) const { + levenbergMarquardt(const NonLinearOptimizerParameters ¶) const { if (para.maxIterations_ <= 0) return *this; // check if we're already close enough if (error_ < para.sumError_) { - if (para.verbosity_ >= ERROR) + if (para.verbosity_ >= Parameters::ERROR) cout << "Exiting, as error = " << error_ << " < " << para.sumError_ << endl; return *this; } @@ -299,15 +299,15 @@ namespace gtsam { // return converged state or iterate if (converged || para.maxIterations_ <= 1) { // maybe show output - if (para.verbosity_ >= CONFIG) + if (para.verbosity_ >= Parameters::CONFIG) next.config_->print("final config"); - if (para.verbosity_ >= ERROR) + if (para.verbosity_ >= Parameters::ERROR) cout << "final error: " << next.error_ << endl; - if (para.verbosity_ >= LAMBDA) + if (para.verbosity_ >= Parameters::LAMBDA) cout << "final lambda = " << next.lambda_ << endl; return next; } else { - NonLinearOptimizerPara newPara = para ; + NonLinearOptimizerParameters newPara = para ; newPara.maxIterations_ = newPara.maxIterations_ - 1; return next.levenbergMarquardt(newPara) ; } diff --git a/nonlinear/NonlinearOptimizer.h b/nonlinear/NonlinearOptimizer.h index 89a06ba6e..e5f0fb572 100644 --- a/nonlinear/NonlinearOptimizer.h +++ b/nonlinear/NonlinearOptimizer.h @@ -25,6 +25,7 @@ #include #include #include +#include namespace gtsam { @@ -41,17 +42,28 @@ namespace gtsam { * until convergence. All methods are functional and return a new state. * * The class is parameterized by the Graph type $G$, Values class type $T$, - * linear system class $L$ and the non linear solver type $S$. - * the config type is in order to be able to optimize over non-vector values structures. - * To use in code, include in your cpp file + * linear system class $L$, the non linear solver type $S$, and the writer type $W$ + * + * The values class type $T$ is in order to be able to optimize over non-vector values structures. + * + * A nonlinear system solver $S$ + * Concept NonLinearSolver implements + * linearize: G * T -> L + * solve : L -> T + * + * The writer $W$ generates output to disk or the screen. * * For example, in a 2D case, $G$ can be Pose2Graph, $T$ can be Pose2Values, * $L$ can be GaussianFactorGraph and $S$ can be Factorization. * The solver class has two main functions: linearize and optimize. The first one * linearizes the nonlinear cost function around the current estimate, and the second * one optimizes the linearized system using various methods. + * + * To use the optimizer in code, include in your cpp file + * + * */ - template, class Writer = NullOptimizerWriter> + template, class W = NullOptimizerWriter> class NonlinearOptimizer { public: @@ -60,49 +72,7 @@ namespace gtsam { typedef boost::shared_ptr shared_graph; typedef boost::shared_ptr shared_solver; typedef const S solver; - - typedef enum { - SILENT, - ERROR, - LAMBDA, - TRYLAMBDA, - CONFIG, - DELTA, - TRYCONFIG, - TRYDELTA, - LINEAR, - DAMPED - } verbosityLevel; - - typedef enum { - FAST, - BOUNDED, - CAUTIOUS - } LambdaMode; - - // a container for all related parameters - struct NonLinearOptimizerPara { - public: - double absDecrease_; /* threshold for the absolute decrease per iteration */ - double relDecrease_; /* threshold for the relative decrease per iteration */ - double sumError_; /* threshold for the sum of error */ - int maxIterations_ ; - double lambdaFactor_ ; - verbosityLevel verbosity_; - LambdaMode lambdaMode_; - - public: - - NonLinearOptimizerPara(): absDecrease_(1), relDecrease_(1e-3), sumError_(0.0), - maxIterations_(100), lambdaFactor_(10.0), verbosity_(ERROR), lambdaMode_(BOUNDED){} - - NonLinearOptimizerPara(double absDecrease, double relDecrease, double sumError, - int iIters = 100, double lambdaFactor = 10, verbosityLevel v = ERROR, LambdaMode lambdaMode = BOUNDED) - :absDecrease_(absDecrease), relDecrease_(relDecrease), sumError_(sumError), - maxIterations_(iIters), lambdaFactor_(lambdaFactor), verbosity_(v), lambdaMode_(lambdaMode){} - - }; - + typedef NonLinearOptimizerParameters Parameters; private: @@ -124,7 +94,7 @@ namespace gtsam { // Recursively try to do tempered Gauss-Newton steps until we succeed NonlinearOptimizer try_lambda(const L& linear, - verbosityLevel verbosity, double factor, LambdaMode lambdaMode) const; + Parameters::verbosityLevel verbosity, double factor, Parameters::LambdaMode lambdaMode) const; public: @@ -178,7 +148,7 @@ namespace gtsam { /** * Do one Gauss-Newton iteration and return next state */ - NonlinearOptimizer iterate(verbosityLevel verbosity = SILENT) const; + NonlinearOptimizer iterate(Parameters::verbosityLevel verbosity = Parameters::SILENT) const; /** * Optimize a solution for a non linear factor graph @@ -188,13 +158,13 @@ namespace gtsam { */ NonlinearOptimizer gaussNewton(double relativeThreshold, double absoluteThreshold, - verbosityLevel verbosity = SILENT, int maxIterations = 100) const; + Parameters::verbosityLevel verbosity = Parameters::SILENT, int maxIterations = 100) const; /** * One iteration of Levenberg Marquardt */ - NonlinearOptimizer iterateLM(verbosityLevel verbosity = SILENT, - double lambdaFactor = 10, LambdaMode lambdaMode = BOUNDED) const; + NonlinearOptimizer iterateLM(Parameters::verbosityLevel verbosity = Parameters::SILENT, + double lambdaFactor = 10, Parameters::LambdaMode lambdaMode = Parameters::BOUNDED) const; /** * Optimize using Levenberg-Marquardt. Really Levenberg's @@ -212,12 +182,12 @@ namespace gtsam { */ NonlinearOptimizer levenbergMarquardt(double relativeThreshold, double absoluteThreshold, - verbosityLevel verbosity = SILENT, int maxIterations = 100, - double lambdaFactor = 10, LambdaMode lambdaMode = BOUNDED) const; + Parameters::verbosityLevel verbosity = Parameters::SILENT, int maxIterations = 100, + double lambdaFactor = 10, Parameters::LambdaMode lambdaMode = Parameters::BOUNDED) const; NonlinearOptimizer - levenbergMarquardt(const NonLinearOptimizerPara ¶) const; + levenbergMarquardt(const NonLinearOptimizerParameters ¶) const; /** * Static interface to LM optimization using default ordering and thresholds @@ -227,7 +197,7 @@ namespace gtsam { * @return an optimized values structure */ static shared_values optimizeLM(shared_graph graph, shared_values config, - verbosityLevel verbosity = SILENT) { + Parameters::verbosityLevel verbosity = Parameters::SILENT) { // Use a variable ordering from COLAMD Ordering::shared_ptr ordering = graph->orderingCOLAMD(*config); @@ -248,7 +218,7 @@ namespace gtsam { * Static interface to LM optimization (no shared_ptr arguments) - see above */ inline static shared_values optimizeLM(const G& graph, const T& config, - verbosityLevel verbosity = SILENT) { + Parameters::verbosityLevel verbosity = Parameters::SILENT) { return optimizeLM(boost::make_shared(graph), boost::make_shared(config), verbosity); } @@ -261,7 +231,7 @@ namespace gtsam { * @return an optimized values structure */ static shared_values optimizeGN(shared_graph graph, shared_values config, - verbosityLevel verbosity = SILENT) { + Parameters::verbosityLevel verbosity = Parameters::SILENT) { Ordering::shared_ptr ordering = graph->orderingCOLAMD(*config); double relativeThreshold = 1e-5, absoluteThreshold = 1e-5; @@ -279,7 +249,7 @@ namespace gtsam { * Static interface to GN optimization (no shared_ptr arguments) - see above */ inline static shared_values optimizeGN(const G& graph, const T& config, - verbosityLevel verbosity = SILENT) { + Parameters::verbosityLevel verbosity = Parameters::SILENT) { return optimizeGN(boost::make_shared(graph), boost::make_shared(config), verbosity); } diff --git a/slam/Makefile.am b/slam/Makefile.am index d5b6f782b..9fb78ac17 100644 --- a/slam/Makefile.am +++ b/slam/Makefile.am @@ -65,14 +65,14 @@ slamdir = $(pkgincludedir)/slam slam_HEADERS = $(headers) noinst_LTLIBRARIES = libslam.la libslam_la_SOURCES = $(sources) -AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/.. +AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(top_srcdir)/.. #---------------------------------------------------------------------------------------------------- # rules to build local programs #---------------------------------------------------------------------------------------------------- TESTS = $(check_PROGRAMS) AM_DEFAULT_SOURCE_EXT = .cpp -AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) -L$(SparseLib) -lccolamd +AM_LDFLAGS = $(BOOST_LDFLAGS) $(boost_serialization) -L$(CCOLAMDLib) -lccolamd LDADD = libslam.la ../geometry/libgeometry.la ../nonlinear/libnonlinear.la ../linear/liblinear.la ../inference/libinference.la ../base/libbase.la LDADD += ../CppUnitLite/libCppUnitLite.a diff --git a/slam/tests/testPose2SLAM.cpp b/slam/tests/testPose2SLAM.cpp index c1cb3b72f..9239bab3e 100644 --- a/slam/tests/testPose2SLAM.cpp +++ b/slam/tests/testPose2SLAM.cpp @@ -115,7 +115,7 @@ TEST(Pose2Graph, optimize) { typedef NonlinearOptimizer Optimizer; Optimizer::shared_solver solver(new Optimizer::solver(ordering)); Optimizer optimizer0(fg, initial, solver); - Optimizer::verbosityLevel verbosity = Optimizer::SILENT; + Optimizer::Parameters::verbosityLevel verbosity = Optimizer::Parameters::SILENT; //Optimizer::verbosityLevel verbosity = Optimizer::ERROR; Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity); @@ -155,7 +155,7 @@ TEST(Pose2Graph, optimizeThreePoses) { // optimize pose2SLAM::Optimizer::shared_solver solver(new pose2SLAM::Optimizer::solver(ordering)); pose2SLAM::Optimizer optimizer0(fg, initial, solver); - pose2SLAM::Optimizer::verbosityLevel verbosity = pose2SLAM::Optimizer::SILENT; + pose2SLAM::Optimizer::Parameters::verbosityLevel verbosity = pose2SLAM::Optimizer::Parameters::SILENT; pose2SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity); Pose2Values actual = *optimizer.config(); @@ -199,7 +199,7 @@ TEST(Pose2Graph, optimizeCircle) { // optimize pose2SLAM::Optimizer::shared_solver solver(new pose2SLAM::Optimizer::solver(ordering)); pose2SLAM::Optimizer optimizer0(fg, initial, solver); - pose2SLAM::Optimizer::verbosityLevel verbosity = pose2SLAM::Optimizer::SILENT; + pose2SLAM::Optimizer::Parameters::verbosityLevel verbosity = pose2SLAM::Optimizer::Parameters::SILENT; pose2SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity); Pose2Values actual = *optimizer.config(); diff --git a/slam/tests/testPose3SLAM.cpp b/slam/tests/testPose3SLAM.cpp index 122ecb1b7..390fbed15 100644 --- a/slam/tests/testPose3SLAM.cpp +++ b/slam/tests/testPose3SLAM.cpp @@ -73,7 +73,7 @@ TEST(Pose3Graph, optimizeCircle) { typedef NonlinearOptimizer Optimizer; Optimizer::shared_solver solver(new Optimizer::solver(ordering)); Optimizer optimizer0(fg, initial, solver); - Optimizer::verbosityLevel verbosity = Optimizer::SILENT; + Optimizer::Parameters::verbosityLevel verbosity = Optimizer::Parameters::SILENT; // Optimizer::verbosityLevel verbosity = Optimizer::ERROR; Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity); diff --git a/tests/Makefile.am b/tests/Makefile.am index d1228b3e9..57609f05b 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -36,7 +36,7 @@ noinst_PROGRAMS = timeGaussianFactorGraph timeLinearOnDataset # rules to build unit tests #---------------------------------------------------------------------------------------------------- TESTS = $(check_PROGRAMS) -AM_CPPFLAGS = -I$(boost) -I$(SparseInc) -I$(top_srcdir)/.. +AM_CPPFLAGS = -I$(boost) -I$(CCOLAMDInc) -I$(top_srcdir)/.. AM_LDFLAGS = $(BOOST_LDFLAGS) if USE_ACCELERATE_MACOS diff --git a/tests/testNonlinearEquality.cpp b/tests/testNonlinearEquality.cpp index 491d01d59..3262d8396 100644 --- a/tests/testNonlinearEquality.cpp +++ b/tests/testNonlinearEquality.cpp @@ -200,7 +200,7 @@ TEST ( NonlinearEquality, allow_error_optimize ) { PoseOptimizer::shared_solver solver(new PoseOptimizer::solver(ord)); PoseOptimizer optimizer(graph, init, solver); double relThresh = 1e-5, absThresh = 1e-5; - PoseOptimizer result = optimizer.levenbergMarquardt(relThresh, absThresh, PoseOptimizer::SILENT); + PoseOptimizer result = optimizer.levenbergMarquardt(relThresh, absThresh, PoseOptimizer::Parameters::SILENT); // verify PoseValues expected; @@ -237,7 +237,7 @@ TEST ( NonlinearEquality, allow_error_optimize_with_factors ) { PoseOptimizer::shared_solver solver(new PoseOptimizer::solver(ord)); PoseOptimizer optimizer(graph, init, solver); double relThresh = 1e-5, absThresh = 1e-5; - PoseOptimizer result = optimizer.levenbergMarquardt(relThresh, absThresh, PoseOptimizer::SILENT); + PoseOptimizer result = optimizer.levenbergMarquardt(relThresh, absThresh, PoseOptimizer::Parameters::SILENT); // verify PoseValues expected; diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index c68b31e1e..4c1098091 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -179,7 +179,7 @@ TEST( NonlinearOptimizer, optimize ) // Levenberg-Marquardt Optimizer actual2 = optimizer.levenbergMarquardt(relativeThreshold, - absoluteThreshold, Optimizer::SILENT); + absoluteThreshold, Optimizer::Parameters::SILENT); DOUBLES_EQUAL(0,fg->error(*(actual2.config())),tol); }