diff --git a/examples/CameraResectioning.cpp b/examples/CameraResectioning.cpp index f00509c3b..7d0e37e7f 100644 --- a/examples/CameraResectioning.cpp +++ b/examples/CameraResectioning.cpp @@ -22,7 +22,7 @@ #include #include #include -#include +#include using namespace gtsam; @@ -95,7 +95,7 @@ int main(int argc, char* argv[]) { 0.,0.,-1.), Point3(0.,0.,2.0))); /* 4. Optimize the graph using Levenberg-Marquardt*/ - Values result = optimize (graph, initial); + Values result = LevenbergMarquardtOptimizer(graph, initial).optimize(); result.print("Final result: "); return 0; diff --git a/examples/PlanarSLAMExample_easy.cpp b/examples/PlanarSLAMExample_easy.cpp index 1ff3a8677..912589080 100644 --- a/examples/PlanarSLAMExample_easy.cpp +++ b/examples/PlanarSLAMExample_easy.cpp @@ -20,7 +20,7 @@ // pull in the planar SLAM domain with all typedefs and helper functions defined #include -#include +#include using namespace std; using namespace gtsam; @@ -83,7 +83,7 @@ int main(int argc, char** argv) { initialEstimate.print("initial estimate"); // optimize using Levenberg-Marquardt optimization with an ordering from colamd - planarSLAM::Values result = optimize(graph, initialEstimate); + planarSLAM::Values result = LevenbergMarquardtOptimizer(graph, initialEstimate).optimize(); result.print("final result"); return 0; diff --git a/examples/PlanarSLAMSelfContained_advanced.cpp b/examples/PlanarSLAMSelfContained_advanced.cpp index 10802ee87..bd5c5295a 100644 --- a/examples/PlanarSLAMSelfContained_advanced.cpp +++ b/examples/PlanarSLAMSelfContained_advanced.cpp @@ -35,17 +35,12 @@ // implementations for structures - needed if self-contained, and these should be included last #include -#include -#include -#include +#include +#include using namespace std; using namespace gtsam; -// Main typedefs -typedef NonlinearOptimizer OptimizerSeqential; // optimization engine for this domain -typedef NonlinearOptimizer OptimizerMultifrontal; // optimization engine for this domain - /** * In this version of the system we make the following assumptions: * - All values are axis aligned @@ -61,14 +56,14 @@ int main(int argc, char** argv) { Symbol l1('l',1), l2('l',2); // create graph container and add factors to it - NonlinearFactorGraph::shared_ptr graph(new NonlinearFactorGraph); + NonlinearFactorGraph graph; /* add prior */ // gaussian for prior SharedDiagonal prior_model = noiseModel::Diagonal::Sigmas(Vector_(3, 0.3, 0.3, 0.1)); Pose2 prior_measurement(0.0, 0.0, 0.0); // prior at origin PriorFactor posePrior(x1, prior_measurement, prior_model); // create the factor - graph->add(posePrior); // add the factor to the graph + graph.add(posePrior); // add the factor to the graph /* add odometry */ // general noisemodel for odometry @@ -77,8 +72,8 @@ int main(int argc, char** argv) { // create between factors to represent odometry BetweenFactor odom12(x1, x2, odom_measurement, odom_model); BetweenFactor odom23(x2, x3, odom_measurement, odom_model); - graph->add(odom12); // add both to graph - graph->add(odom23); + graph.add(odom12); // add both to graph + graph.add(odom23); /* add measurements */ // general noisemodel for measurements @@ -98,41 +93,44 @@ int main(int argc, char** argv) { BearingRangeFactor meas32(x3, l2, bearing32, range32, meas_model); // add the factors - graph->add(meas11); - graph->add(meas21); - graph->add(meas32); + graph.add(meas11); + graph.add(meas21); + graph.add(meas32); - graph->print("Full Graph"); + graph.print("Full Graph"); // initialize to noisy points - boost::shared_ptr initial(new Values); - initial->insert(x1, Pose2(0.5, 0.0, 0.2)); - initial->insert(x2, Pose2(2.3, 0.1,-0.2)); - initial->insert(x3, Pose2(4.1, 0.1, 0.1)); - initial->insert(l1, Point2(1.8, 2.1)); - initial->insert(l2, Point2(4.1, 1.8)); + Values initial; + initial.insert(x1, Pose2(0.5, 0.0, 0.2)); + initial.insert(x2, Pose2(2.3, 0.1,-0.2)); + initial.insert(x3, Pose2(4.1, 0.1, 0.1)); + initial.insert(l1, Point2(1.8, 2.1)); + initial.insert(l2, Point2(4.1, 1.8)); - initial->print("initial estimate"); + initial.print("initial estimate"); // optimize using Levenberg-Marquardt optimization with an ordering from colamd // first using sequential elimination - OptimizerSeqential::shared_values resultSequential = OptimizerSeqential::optimizeLM(*graph, *initial); - resultSequential->print("final result (solved with a sequential solver)"); + LevenbergMarquardtParams lmParams; + lmParams.elimination = LevenbergMarquardtParams::SEQUENTIAL; + Values resultSequential = LevenbergMarquardtOptimizer(graph, initial, lmParams).optimize(); + resultSequential.print("final result (solved with a sequential solver)"); // then using multifrontal, advanced interface - // Note how we create an optimizer, call LM, then we get access to covariances - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*initial); - OptimizerMultifrontal optimizerMF(graph, initial, ordering); - OptimizerMultifrontal resultMF = optimizerMF.levenbergMarquardt(); - resultMF.values()->print("final result (solved with a multifrontal solver)"); + // Note that we keep the original optimizer object so we can use the COLAMD + // ordering it computes. + LevenbergMarquardtOptimizer optimizer(graph, initial); + Values resultMultifrontal = optimizer.optimize(); + resultMultifrontal.print("final result (solved with a multifrontal solver)"); // Print marginals covariances for all variables - print(resultMF.marginalCovariance(x1), "x1 covariance"); - print(resultMF.marginalCovariance(x2), "x2 covariance"); - print(resultMF.marginalCovariance(x3), "x3 covariance"); - print(resultMF.marginalCovariance(l1), "l1 covariance"); - print(resultMF.marginalCovariance(l2), "l2 covariance"); + Marginals marginals(graph, resultMultifrontal, Marginals::CHOLESKY); + print(marginals.marginalCovariance(x1), "x1 covariance"); + print(marginals.marginalCovariance(x2), "x2 covariance"); + print(marginals.marginalCovariance(x3), "x3 covariance"); + print(marginals.marginalCovariance(l1), "l1 covariance"); + print(marginals.marginalCovariance(l2), "l2 covariance"); return 0; } diff --git a/examples/Pose2SLAMExample_advanced.cpp b/examples/Pose2SLAMExample_advanced.cpp index 432b6a050..29d8c58cf 100644 --- a/examples/Pose2SLAMExample_advanced.cpp +++ b/examples/Pose2SLAMExample_advanced.cpp @@ -22,7 +22,8 @@ // pull in the Pose2 SLAM domain with all typedefs and helper functions defined #include -#include +#include +#include #include #include @@ -34,13 +35,13 @@ using namespace pose2SLAM; int main(int argc, char** argv) { /* 1. create graph container and add factors to it */ - shared_ptr graph(new Graph); + Graph graph; /* 2.a add prior */ // gaussian for prior SharedDiagonal prior_model = noiseModel::Diagonal::Sigmas(Vector_(3, 0.3, 0.3, 0.1)); Pose2 prior_measurement(0.0, 0.0, 0.0); // prior at origin - graph->addPrior(1, prior_measurement, prior_model); // add directly to graph + graph.addPrior(1, prior_measurement, prior_model); // add directly to graph /* 2.b add odometry */ // general noisemodel for odometry @@ -48,33 +49,36 @@ int main(int argc, char** argv) { /* Pose2 measurements take (x,y,theta), where theta is taken from the positive x-axis*/ Pose2 odom_measurement(2.0, 0.0, 0.0); // create a measurement for both factors (the same in this case) - graph->addOdometry(1, 2, odom_measurement, odom_model); - graph->addOdometry(2, 3, odom_measurement, odom_model); - graph->print("full graph"); + graph.addOdometry(1, 2, odom_measurement, odom_model); + graph.addOdometry(2, 3, odom_measurement, odom_model); + graph.print("full graph"); /* 3. Create the data structure to hold the initial estimate to the solution * initialize to noisy points */ - shared_ptr initial(new pose2SLAM::Values); - initial->insertPose(1, Pose2(0.5, 0.0, 0.2)); - initial->insertPose(2, Pose2(2.3, 0.1,-0.2)); - initial->insertPose(3, Pose2(4.1, 0.1, 0.1)); - initial->print("initial estimate"); + pose2SLAM::Values initial; + initial.insertPose(1, Pose2(0.5, 0.0, 0.2)); + initial.insertPose(2, Pose2(2.3, 0.1,-0.2)); + initial.insertPose(3, Pose2(4.1, 0.1, 0.1)); + initial.print("initial estimate"); /* 4.2.1 Alternatively, you can go through the process step by step * Choose an ordering */ - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*initial); + Ordering ordering = *graph.orderingCOLAMD(initial); /* 4.2.2 set up solver and optimize */ - NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-15, 1e-15); - Optimizer optimizer(graph, initial, ordering, params); - Optimizer optimizer_result = optimizer.levenbergMarquardt(); + LevenbergMarquardtParams params; + params.absoluteErrorTol = 1e-15; + params.relativeErrorTol = 1e-15; + params.ordering = ordering; + LevenbergMarquardtOptimizer optimizer(graph, initial, params); - pose2SLAM::Values result = *optimizer_result.values(); + pose2SLAM::Values result = optimizer.optimize(); result.print("final result"); /* Get covariances */ - Matrix covariance1 = optimizer_result.marginalCovariance(PoseKey(1)); - Matrix covariance2 = optimizer_result.marginalCovariance(PoseKey(2)); + Marginals marginals(graph, result, Marginals::CHOLESKY); + Matrix covariance1 = marginals.marginalCovariance(PoseKey(1)); + Matrix covariance2 = marginals.marginalCovariance(PoseKey(2)); print(covariance1, "Covariance1"); print(covariance2, "Covariance2"); diff --git a/examples/Pose2SLAMExample_easy.cpp b/examples/Pose2SLAMExample_easy.cpp index 2593c043c..18a756aba 100644 --- a/examples/Pose2SLAMExample_easy.cpp +++ b/examples/Pose2SLAMExample_easy.cpp @@ -24,7 +24,7 @@ // pull in the Pose2 SLAM domain with all typedefs and helper functions defined #include -#include +#include using namespace std; using namespace gtsam; @@ -61,7 +61,7 @@ int main(int argc, char** argv) { /* 4 Single Step Optimization * optimize using Levenberg-Marquardt optimization with an ordering from colamd */ - pose2SLAM::Values result = optimize(graph, initial); + pose2SLAM::Values result = graph.optimize(initial); result.print("final result"); diff --git a/examples/SimpleRotation.cpp b/examples/SimpleRotation.cpp index 71727f750..603d345bf 100644 --- a/examples/SimpleRotation.cpp +++ b/examples/SimpleRotation.cpp @@ -24,7 +24,7 @@ #include #include #include -#include +#include /* * TODO: make factors independent of RotValues @@ -105,7 +105,7 @@ int main() { * initial estimate. This will yield a new RotValues structure * with the final state of the optimization. */ - Values result = optimize(graph, initial); + Values result = LevenbergMarquardtOptimizer(graph, initial).optimize(); result.print("final result"); return 0; diff --git a/examples/vSLAMexample/vSFMexample.cpp b/examples/vSLAMexample/vSFMexample.cpp index c7e0df05c..8774b79f3 100644 --- a/examples/vSLAMexample/vSFMexample.cpp +++ b/examples/vSLAMexample/vSFMexample.cpp @@ -20,7 +20,7 @@ using namespace boost; #include -#include +#include #include #include @@ -129,26 +129,27 @@ int main(int argc, char* argv[]) { readAllData(); // Create a graph using the 2D measurements (features) and the calibration data - boost::shared_ptr graph(new Graph(setupGraph(g_measurements, measurementSigma, g_calib))); + Graph graph(setupGraph(g_measurements, measurementSigma, g_calib)); // Create an initial Values structure using groundtruth values as the initial estimates - boost::shared_ptr initialEstimates(new Values(initialize(g_landmarks, g_poses))); + Values initialEstimates(initialize(g_landmarks, g_poses)); cout << "*******************************************************" << endl; - initialEstimates->print("INITIAL ESTIMATES: "); + initialEstimates.print("INITIAL ESTIMATES: "); // Add prior factor for all poses in the graph map::iterator poseit = g_poses.begin(); for (; poseit != g_poses.end(); poseit++) - graph->addPosePrior(poseit->first, poseit->second, noiseModel::Unit::Create(1)); + graph.addPosePrior(poseit->first, poseit->second, noiseModel::Unit::Create(1)); // Optimize the graph cout << "*******************************************************" << endl; - NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newVerbosity(Optimizer::Parameters::DAMPED); - visualSLAM::Optimizer::shared_values result = visualSLAM::Optimizer::optimizeGN(graph, initialEstimates, params); + LevenbergMarquardtParams params; + params.verbosityLM = LevenbergMarquardtParams::DAMPED; + visualSLAM::Values result = LevenbergMarquardtOptimizer(graph, initialEstimates, params).optimize(); // Print final results cout << "*******************************************************" << endl; - result->print("FINAL RESULTS: "); + result.print("FINAL RESULTS: "); return 0; } diff --git a/gtsam/geometry/CalibratedCamera.h b/gtsam/geometry/CalibratedCamera.h index 95ff66e68..04296eba1 100644 --- a/gtsam/geometry/CalibratedCamera.h +++ b/gtsam/geometry/CalibratedCamera.h @@ -120,7 +120,7 @@ namespace gtsam { /* ************************************************************************* */ /// @} - /// @name Transformations + /// @name Transformations and mesaurement functions /// @{ /** @@ -147,6 +147,36 @@ namespace gtsam { */ static Point3 backproject_from_camera(const Point2& p, const double scale); + /** + * Calculate range to a landmark + * @param point 3D location of landmark + * @return range (double) + */ + double range(const Point3& point, + boost::optional H1=boost::none, + boost::optional H2=boost::none) const { + return pose_.range(point, H1, H2); } + + /** + * Calculate range to another pose + * @param pose Other SO(3) pose + * @return range (double) + */ + double range(const Pose3& pose, + boost::optional H1=boost::none, + boost::optional H2=boost::none) const { + return pose_.range(pose, H1, H2); } + + /** + * Calculate range to another camera + * @param camera Other camera + * @return range (double) + */ + double range(const CalibratedCamera& camera, + boost::optional H1=boost::none, + boost::optional H2=boost::none) const { + return pose_.range(camera.pose_, H1, H2); } + private: /// @} diff --git a/gtsam/geometry/PinholeCamera.h b/gtsam/geometry/PinholeCamera.h index 8aee12064..943409b4b 100644 --- a/gtsam/geometry/PinholeCamera.h +++ b/gtsam/geometry/PinholeCamera.h @@ -161,9 +161,9 @@ namespace gtsam { return PinholeCamera(pose3, K); } - /* ************************************************************************* */ - // measurement functions and derivatives - /* ************************************************************************* */ + /// @} + /// @name Transformations and measurement functions + /// @{ /** * projects a 3-dimensional point in camera coordinates into the @@ -186,10 +186,6 @@ namespace gtsam { return std::make_pair(k_.uncalibrate(pn), pc.z()>0); } - /// @} - /// @name Transformations - /// @{ - /** project a point from world coordinate to the image * @param pw is a point in the world coordinate * @param H1 is the jacobian w.r.t. pose3 @@ -270,6 +266,77 @@ namespace gtsam { return backproject(pi, scale); } + /** + * Calculate range to a landmark + * @param point 3D location of landmark + * @return range (double) + */ + double range(const Point3& point, + boost::optional H1=boost::none, + boost::optional H2=boost::none) const { + double result = pose_.range(point, H1, H2); + if(H1) { + // Add columns of zeros to Jacobian for calibration + Matrix& H1r(*H1); + H1r.conservativeResize(Eigen::NoChange, pose_.dim() + k_.dim()); + H1r.block(0, pose_.dim(), 1, k_.dim()) = Matrix::Zero(1, k_.dim()); + } + return result; + } + + /** + * Calculate range to another pose + * @param pose Other SO(3) pose + * @return range (double) + */ + double range(const Pose3& pose, + boost::optional H1=boost::none, + boost::optional H2=boost::none) const { + double result = pose_.range(pose, H1, H2); + if(H1) { + // Add columns of zeros to Jacobian for calibration + Matrix& H1r(*H1); + H1r.conservativeResize(Eigen::NoChange, pose_.dim() + k_.dim()); + H1r.block(0, pose_.dim(), 1, k_.dim()) = Matrix::Zero(1, k_.dim()); + } + return result; + } + + /** + * Calculate range to another camera + * @param camera Other camera + * @return range (double) + */ + template + double range(const PinholeCamera& camera, + boost::optional H1=boost::none, + boost::optional H2=boost::none) const { + double result = pose_.range(camera.pose_, H1, H2); + if(H1) { + // Add columns of zeros to Jacobian for calibration + Matrix& H1r(*H1); + H1r.conservativeResize(Eigen::NoChange, pose_.dim() + k_.dim()); + H1r.block(0, pose_.dim(), 1, k_.dim()) = Matrix::Zero(1, k_.dim()); + } + if(H2) { + // Add columns of zeros to Jacobian for calibration + Matrix& H2r(*H2); + H2r.conservativeResize(Eigen::NoChange, camera.pose().dim() + camera.calibration().dim()); + H2r.block(0, camera.pose().dim(), 1, camera.calibration().dim()) = Matrix::Zero(1, camera.calibration().dim()); + } + return result; + } + + /** + * Calculate range to another camera + * @param camera Other camera + * @return range (double) + */ + double range(const CalibratedCamera& camera, + boost::optional H1=boost::none, + boost::optional H2=boost::none) const { + return pose_.range(camera.pose_, H1, H2); } + private: /// @} diff --git a/gtsam/geometry/Pose3.h b/gtsam/geometry/Pose3.h index 29c4a90ca..9cfe7ba37 100644 --- a/gtsam/geometry/Pose3.h +++ b/gtsam/geometry/Pose3.h @@ -217,10 +217,10 @@ namespace gtsam { /** * Calculate range to another pose - * @param point SO(3) pose of landmark + * @param pose Other SO(3) pose * @return range (double) */ - double range(const Pose3& point, + double range(const Pose3& pose, boost::optional H1=boost::none, boost::optional H2=boost::none) const; diff --git a/gtsam/inference/BayesNet.h b/gtsam/inference/BayesNet.h index a05440d69..0f78a15e3 100644 --- a/gtsam/inference/BayesNet.h +++ b/gtsam/inference/BayesNet.h @@ -81,7 +81,7 @@ public: } /** BayesNet with 1 conditional */ - BayesNet(const sharedConditional& conditional) { push_back(conditional); } + explicit BayesNet(const sharedConditional& conditional) { push_back(conditional); } /// @} /// @name Testable diff --git a/gtsam/inference/BayesTree.h b/gtsam/inference/BayesTree.h index aba7181ff..a9ed53ee9 100644 --- a/gtsam/inference/BayesTree.h +++ b/gtsam/inference/BayesTree.h @@ -145,7 +145,7 @@ namespace gtsam { BayesTree() {} /** Create a Bayes Tree from a Bayes Net (requires CONDITIONAL is IndexConditional *or* CONDITIONAL::Combine) */ - BayesTree(const BayesNet& bayesNet); + explicit BayesTree(const BayesNet& bayesNet); /** Copy constructor */ BayesTree(const This& other); diff --git a/gtsam/linear/GaussianMultifrontalSolver.cpp b/gtsam/linear/GaussianMultifrontalSolver.cpp index dd0589965..d7e0d1f91 100644 --- a/gtsam/linear/GaussianMultifrontalSolver.cpp +++ b/gtsam/linear/GaussianMultifrontalSolver.cpp @@ -42,7 +42,7 @@ void GaussianMultifrontalSolver::replaceFactors(const FactorGraph::shared_ptr GaussianMultifrontalSolver::eliminate() const { +GaussianBayesTree::shared_ptr GaussianMultifrontalSolver::eliminate() const { if (useQR_) return Base::eliminate(&EliminateQR); else diff --git a/gtsam/linear/GaussianMultifrontalSolver.h b/gtsam/linear/GaussianMultifrontalSolver.h index 71a4c0c20..8177bed0b 100644 --- a/gtsam/linear/GaussianMultifrontalSolver.h +++ b/gtsam/linear/GaussianMultifrontalSolver.h @@ -18,6 +18,7 @@ #pragma once #include +#include #include #include @@ -86,7 +87,7 @@ public: * Eliminate the factor graph sequentially. Uses a column elimination tree * to recursively eliminate. */ - BayesTree::shared_ptr eliminate() const; + GaussianBayesTree::shared_ptr eliminate() const; /** * Compute the least-squares solution of the GaussianFactorGraph. This diff --git a/gtsam/linear/tests/timeSLAMlike.cpp b/gtsam/linear/tests/timeSLAMlike.cpp index 24470f126..a02858def 100644 --- a/gtsam/linear/tests/timeSLAMlike.cpp +++ b/gtsam/linear/tests/timeSLAMlike.cpp @@ -41,7 +41,7 @@ int main(int argc, char *argv[]) { size_t vardim = 3; size_t blockdim = 3; - size_t nVars = 500; + int nVars = 500; size_t blocksPerVar = 5; size_t varsPerBlock = 2; size_t varSpread = 10; @@ -70,7 +70,7 @@ int main(int argc, char *argv[]) { for(size_t trial=0; trial > terms; terms.reserve(varsPerBlock); if(c == 0 && d == 0) diff --git a/gtsam/nonlinear/DoglegOptimizer-inl.h b/gtsam/nonlinear/DoglegOptimizer-inl.h deleted file mode 100644 index 432844101..000000000 --- a/gtsam/nonlinear/DoglegOptimizer-inl.h +++ /dev/null @@ -1,7 +0,0 @@ -/** - * @file DoglegOptimizer-inl.h - * @brief Nonlinear factor graph optimizer using Powell's Dogleg algorithm - * @author Richard Roberts - */ - -#pragma once diff --git a/gtsam/nonlinear/DoglegOptimizer.cpp b/gtsam/nonlinear/DoglegOptimizer.cpp new file mode 100644 index 000000000..84090d666 --- /dev/null +++ b/gtsam/nonlinear/DoglegOptimizer.cpp @@ -0,0 +1,68 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file DoglegOptimizer.cpp + * @brief + * @author Richard Roberts + * @created Feb 26, 2012 + */ + +#include + +#include +#include +#include + +using namespace std; + +namespace gtsam { + +/* ************************************************************************* */ +void DoglegOptimizer::iterate(void) { + + // Linearize graph + const Ordering& ordering = *params_.ordering; + GaussianFactorGraph::shared_ptr linear = graph_.linearize(state_.values, ordering); + + // Get elimination method + GaussianFactorGraph::Eliminate eliminationMethod = params_.getEliminationFunction(); + + // Pull out parameters we'll use + const bool dlVerbose = (params_.verbosityDL > DoglegParams::SILENT); + + // Do Dogleg iteration with either Multifrontal or Sequential elimination + DoglegOptimizerImpl::IterationResult result; + + if(params_.elimination == DoglegParams::MULTIFRONTAL) { + GaussianBayesTree bt; + bt.insert(GaussianJunctionTree(*linear).eliminate(eliminationMethod)); + result = DoglegOptimizerImpl::Iterate(state_.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, bt, graph_, state_.values, ordering, state_.error, dlVerbose); + + } else if(params_.elimination == DoglegParams::SEQUENTIAL) { + GaussianBayesNet::shared_ptr bn = EliminationTree::Create(*linear)->eliminate(eliminationMethod); + result = DoglegOptimizerImpl::Iterate(state_.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, graph_, state_.values, ordering, state_.error, dlVerbose); + + } else { + throw runtime_error("Optimization parameter is invalid: DoglegParams::elimination"); + } + + // Maybe show output + if(params_.verbosity >= NonlinearOptimizerParams::DELTA) result.dx_d.print("delta"); + + // Create new state with new values and new error + state_.values = state_.values.retract(result.dx_d, ordering); + state_.error = result.f_error; + state_.Delta = result.Delta; + ++state_.iterations; +} + +} /* namespace gtsam */ diff --git a/gtsam/nonlinear/DoglegOptimizer.h b/gtsam/nonlinear/DoglegOptimizer.h index 62ff72b05..7edb43ca9 100644 --- a/gtsam/nonlinear/DoglegOptimizer.h +++ b/gtsam/nonlinear/DoglegOptimizer.h @@ -1,67 +1,154 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + /** * @file DoglegOptimizer.h - * @brief Nonlinear factor graph optimizer using Powell's Dogleg algorithm + * @brief * @author Richard Roberts + * @created Feb 26, 2012 */ #pragma once -#include +#include namespace gtsam { -/** - * A class to perform nonlinear optimization using Powell's dogleg algorithm. - * This class is functional, meaning every method is \c const, and returns a new - * copy of the class. - * - * \tparam VALUES The Values or TupleValues type to hold the values to be - * estimated. - * - * \tparam GAUSSIAN_SOLVER The linear solver to use at each iteration, - * currently either GaussianSequentialSolver or GaussianMultifrontalSolver. - * The latter is typically faster, especially for non-trivial problems. +class DoglegOptimizer; + +/** Parameters for Levenberg-Marquardt optimization. Note that this parameters + * class inherits from NonlinearOptimizerParams, which specifies the parameters + * common to all nonlinear optimization algorithms. This class also contains + * all of those parameters. */ -template -class DoglegOptimizer { +class DoglegParams : public SuccessiveLinearizationParams { +public: + /** See DoglegParams::dlVerbosity */ + enum VerbosityDL { + SILENT, + VERBOSE + }; + + double deltaInitial; ///< The initial trust region radius (default: 1.0) + VerbosityDL verbosityDL; ///< The verbosity level for Dogleg (default: SILENT), see also NonlinearOptimizerParams::verbosity + + DoglegParams() : + deltaInitial(1.0), verbosityDL(SILENT) {} + + virtual ~DoglegParams() {} + + virtual void print(const std::string& str = "") const { + SuccessiveLinearizationParams::print(str); + std::cout << " deltaInitial: " << deltaInitial << "\n"; + std::cout.flush(); + } +}; + +/** + * State for DoglegOptimizer + */ +class DoglegState : public NonlinearOptimizerState { +public: + + double Delta; + + DoglegState() {} + + virtual ~DoglegState() {} protected: + DoglegState(const NonlinearFactorGraph& graph, const Values& values, const DoglegParams& params, unsigned int iterations = 0) : + NonlinearOptimizerState(graph, values, iterations), Delta(params.deltaInitial) {} - typedef DoglegOptimizer This; ///< Typedef to this class + friend class DoglegOptimizer; +}; - const sharedGraph graph_; - const sharedValues values_; - const double error_; +/** + * This class performs Dogleg nonlinear optimization + */ +class DoglegOptimizer : public NonlinearOptimizer { public: - typedef VALUES ValuesType; ///< Typedef of the VALUES template parameter - typedef GAUSSIAN_SOLVER SolverType; ///< Typedef of the GAUSSIAN_SOLVER template parameter - typedef NonlinearFactorGraph GraphType; ///< A nonlinear factor graph templated on ValuesType + typedef boost::shared_ptr shared_ptr; - typedef boost::shared_ptr sharedGraph; ///< A shared_ptr to GraphType - typedef boost::shared_ptr sharedValues; ///< A shared_ptr to ValuesType + /// @name Standard interface + /// @{ - - /** - * Construct a DoglegOptimizer from the factor graph to optimize and the - * initial estimate of the variable values, using the default variable - * ordering method, currently COLAMD. - * @param graph The factor graph to optimize - * @param initialization An initial estimate of the variable values + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. For convenience this + * version takes plain objects instead of shared pointers, but internally + * copies the objects. + * @param graph The nonlinear factor graph to optimize + * @param initialValues The initial variable assignments + * @param params The optimization parameters */ - DoglegOptimizer(sharedGraph graph, sharedValues initialization); + DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, + const DoglegParams& params = DoglegParams()) : + NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph, initialValues)), state_(graph, initialValues, params_) {} - /** - * Construct a DoglegOptimizer from the factor graph to optimize and the - * initial estimate of the variable values, using the default variable - * ordering method, currently COLAMD. (this non-shared-pointer version - * incurs a performance hit for copying, see DoglegOptimizer(sharedGraph, sharedValues)). - * @param graph The factor graph to optimize - * @param initialization An initial estimate of the variable values + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. For convenience this + * version takes plain objects instead of shared pointers, but internally + * copies the objects. + * @param graph The nonlinear factor graph to optimize + * @param initialValues The initial variable assignments + * @param params The optimization parameters */ - DoglegOptimizer(const GraphType& graph, const ValuesType& initialization); + DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : + NonlinearOptimizer(graph) { + params_.ordering = ordering; + state_ = DoglegState(graph, initialValues, params_); } + /// @} + + /// @name Advanced interface + /// @{ + + /** Virtual destructor */ + virtual ~DoglegOptimizer() {} + + /** Perform a single iteration, returning a new NonlinearOptimizer class + * containing the updated variable assignments, which may be retrieved with + * values(). + */ + virtual void iterate(); + + /** Access the parameters */ + const DoglegParams& params() const { return params_; } + + /** Access the last state */ + const DoglegState& state() const { return state_; } + + /** Access the current trust region radius Delta */ + double Delta() const { return state_.Delta; } + + /// @} + +protected: + DoglegParams params_; + DoglegState state_; + + /** Access the parameters (base class version) */ + virtual const NonlinearOptimizerParams& _params() const { return params_; } + + /** Access the state (base class version) */ + virtual const NonlinearOptimizerState& _state() const { return state_; } + + /** Internal function for computing a COLAMD ordering if no ordering is specified */ + DoglegParams ensureHasOrdering(DoglegParams params, const NonlinearFactorGraph& graph, const Values& values) const { + if(!params.ordering) + params.ordering = *graph.orderingCOLAMD(values); + return params; + } }; } diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.cpp b/gtsam/nonlinear/GaussNewtonOptimizer.cpp new file mode 100644 index 000000000..8e2b20859 --- /dev/null +++ b/gtsam/nonlinear/GaussNewtonOptimizer.cpp @@ -0,0 +1,56 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file GaussNewtonOptimizer.cpp + * @brief + * @author Richard Roberts + * @created Feb 26, 2012 + */ + +#include +#include +#include + +using namespace std; + +namespace gtsam { + +/* ************************************************************************* */ +void GaussNewtonOptimizer::iterate() { + + const NonlinearOptimizerState& current = state_; + + // Linearize graph + GaussianFactorGraph::shared_ptr linear = graph_.linearize(current.values, *params_.ordering); + + // Optimize + VectorValues delta; + { + GaussianFactorGraph::Eliminate eliminationMethod = params_.getEliminationFunction(); + if(params_.elimination == GaussNewtonParams::MULTIFRONTAL) + delta = GaussianJunctionTree(*linear).optimize(eliminationMethod); + else if(params_.elimination == GaussNewtonParams::SEQUENTIAL) + delta = gtsam::optimize(*EliminationTree::Create(*linear)->eliminate(eliminationMethod)); + else + throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::elimination"); + } + + // Maybe show output + if(params_.verbosity >= NonlinearOptimizerParams::DELTA) delta.print("delta"); + + // Create new state with new values and new error + state_.values = current.values.retract(delta, *params_.ordering); + state_.error = graph_.error(state_.values); + ++ state_.iterations; +} + +} /* namespace gtsam */ diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.h b/gtsam/nonlinear/GaussNewtonOptimizer.h new file mode 100644 index 000000000..39b64b667 --- /dev/null +++ b/gtsam/nonlinear/GaussNewtonOptimizer.h @@ -0,0 +1,117 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file GaussNewtonOptimizer.h + * @brief + * @author Richard Roberts + * @created Feb 26, 2012 + */ + +#pragma once + +#include + +namespace gtsam { + +class GaussNewtonOptimizer; + +/** Parameters for Gauss-Newton optimization, inherits from + * NonlinearOptimizationParams. + */ +class GaussNewtonParams : public SuccessiveLinearizationParams { +}; + +class GaussNewtonState : public NonlinearOptimizerState { +protected: + GaussNewtonState(const NonlinearFactorGraph& graph, const Values& values, unsigned int iterations = 0) : + NonlinearOptimizerState(graph, values, iterations) {} + + friend class GaussNewtonOptimizer; +}; + +/** + * This class performs Gauss-Newton nonlinear optimization + */ +class GaussNewtonOptimizer : public NonlinearOptimizer { + +public: + + /// @name Standard interface + /// @{ + + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. For convenience this + * version takes plain objects instead of shared pointers, but internally + * copies the objects. + * @param graph The nonlinear factor graph to optimize + * @param initialValues The initial variable assignments + * @param params The optimization parameters + */ + GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, + const GaussNewtonParams& params = GaussNewtonParams()) : + NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph, initialValues)), state_(graph, initialValues) {} + + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. For convenience this + * version takes plain objects instead of shared pointers, but internally + * copies the objects. + * @param graph The nonlinear factor graph to optimize + * @param initialValues The initial variable assignments + * @param params The optimization parameters + */ + GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : + NonlinearOptimizer(graph), state_(graph, initialValues) { + params_.ordering = ordering; } + + /// @} + + /// @name Advanced interface + /// @{ + + /** Virtual destructor */ + virtual ~GaussNewtonOptimizer() {} + + /** Perform a single iteration, returning a new NonlinearOptimizer class + * containing the updated variable assignments, which may be retrieved with + * values(). + */ + virtual void iterate(); + + /** Access the parameters */ + const GaussNewtonParams& params() const { return params_; } + + /** Access the last state */ + const GaussNewtonState& state() const { return state_; } + + /// @} + +protected: + + GaussNewtonParams params_; + GaussNewtonState state_; + + /** Access the parameters (base class version) */ + virtual const NonlinearOptimizerParams& _params() const { return params_; } + + /** Access the state (base class version) */ + virtual const NonlinearOptimizerState& _state() const { return state_; } + + /** Internal function for computing a COLAMD ordering if no ordering is specified */ + GaussNewtonParams ensureHasOrdering(GaussNewtonParams params, const NonlinearFactorGraph& graph, const Values& values) const { + if(!params.ordering) + params.ordering = *graph.orderingCOLAMD(values); + return params; + } + +}; + +} diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp new file mode 100644 index 000000000..1b19cfe75 --- /dev/null +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -0,0 +1,127 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file LevenbergMarquardtOptimizer.cpp + * @brief + * @author Richard Roberts + * @created Feb 26, 2012 + */ + +#include + +#include // For NegativeMatrixException +#include +#include + +using namespace std; + +namespace gtsam { + +/* ************************************************************************* */ +void LevenbergMarquardtOptimizer::iterate() { + + // Linearize graph + GaussianFactorGraph::shared_ptr linear = graph_.linearize(state_.values, *params_.ordering); + + // Get elimination method + GaussianFactorGraph::Eliminate eliminationMethod = params_.getEliminationFunction(); + + // Pull out parameters we'll use + const NonlinearOptimizerParams::Verbosity nloVerbosity = params_.verbosity; + const LevenbergMarquardtParams::VerbosityLM lmVerbosity = params_.verbosityLM; + + // Keep increasing lambda until we make make progress + while(true) { + if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) + cout << "trying lambda = " << state_.lambda << endl; + + // Add prior-factors + // TODO: replace this dampening with a backsubstitution approach + GaussianFactorGraph dampedSystem(*linear); + { + double sigma = 1.0 / sqrt(state_.lambda); + dampedSystem.reserve(dampedSystem.size() + dimensions_.size()); + // for each of the variables, add a prior + for(Index j=0; j= LevenbergMarquardtParams::DAMPED) dampedSystem.print("damped"); + + // Try solving + try { + + // Optimize + VectorValues delta; + if(params_.elimination == SuccessiveLinearizationParams::MULTIFRONTAL) + delta = GaussianJunctionTree(dampedSystem).optimize(eliminationMethod); + else if(params_.elimination == SuccessiveLinearizationParams::SEQUENTIAL) + delta = gtsam::optimize(*EliminationTree::Create(dampedSystem)->eliminate(eliminationMethod)); + else + throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination"); + + if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "linear delta norm = " << delta.vector().norm() << endl; + if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA) delta.print("delta"); + + // update values + Values newValues = state_.values.retract(delta, *params_.ordering); + + // create new optimization state with more adventurous lambda + double error = graph_.error(newValues); + + if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "next error = " << error << endl; + + if(error <= state_.error) { + state_.values.swap(newValues); + state_.error = error; + state_.lambda /= params_.lambdaFactor; + break; + } else { + // Either we're not cautious, or the same lambda was worse than the current error. + // The more adventurous lambda was worse too, so make lambda more conservative + // and keep the same values. + if(state_.lambda >= params_.lambdaUpperBound) { + if(nloVerbosity >= NonlinearOptimizerParams::ERROR) + cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; + break; + } else { + state_.lambda *= params_.lambdaFactor; + } + } + } catch(const NegativeMatrixException& e) { + if(lmVerbosity >= LevenbergMarquardtParams::LAMBDA) + cout << "Negative matrix, increasing lambda" << endl; + // Either we're not cautious, or the same lambda was worse than the current error. + // The more adventurous lambda was worse too, so make lambda more conservative + // and keep the same values. + if(state_.lambda >= params_.lambdaUpperBound) { + if(nloVerbosity >= NonlinearOptimizerParams::ERROR) + cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; + break; + } else { + state_.lambda *= params_.lambdaFactor; + } + } catch(...) { + throw; + } + } // end while + + // Increment the iteration counter + ++state_.iterations; +} + +} /* namespace gtsam */ diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.h b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h new file mode 100644 index 000000000..c1fe55c26 --- /dev/null +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.h @@ -0,0 +1,165 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file LevenbergMarquardtOptimizer.h + * @brief + * @author Richard Roberts + * @created Feb 26, 2012 + */ + +#pragma once + +#include + +namespace gtsam { + +class LevenbergMarquardtOptimizer; + +/** Parameters for Levenberg-Marquardt optimization. Note that this parameters + * class inherits from NonlinearOptimizerParams, which specifies the parameters + * common to all nonlinear optimization algorithms. This class also contains + * all of those parameters. + */ +class LevenbergMarquardtParams : public SuccessiveLinearizationParams { +public: + /** See LevenbergMarquardtParams::lmVerbosity */ + enum VerbosityLM { + SILENT, + LAMBDA, + TRYLAMBDA, + TRYCONFIG, + TRYDELTA, + DAMPED + }; + + double lambdaInitial; ///< The initial Levenberg-Marquardt damping term (default: 1e-5) + double lambdaFactor; ///< The amount by which to multiply or divide lambda when adjusting lambda (default: 10.0) + double lambdaUpperBound; ///< The maximum lambda to try before assuming the optimization has failed (default: 1e5) + VerbosityLM verbosityLM; ///< The verbosity level for Levenberg-Marquardt (default: SILENT), see also NonlinearOptimizerParams::verbosity + + LevenbergMarquardtParams() : + lambdaInitial(1e-5), lambdaFactor(10.0), lambdaUpperBound(1e5), verbosityLM(SILENT) {} + + virtual ~LevenbergMarquardtParams() {} + + virtual void print(const std::string& str = "") const { + SuccessiveLinearizationParams::print(str); + std::cout << " lambdaInitial: " << lambdaInitial << "\n"; + std::cout << " lambdaFactor: " << lambdaFactor << "\n"; + std::cout << " lambdaUpperBound: " << lambdaUpperBound << "\n"; + std::cout.flush(); + } +}; + +/** + * State for LevenbergMarquardtOptimizer + */ +class LevenbergMarquardtState : public NonlinearOptimizerState { +public: + + double lambda; + + LevenbergMarquardtState() {} + + virtual ~LevenbergMarquardtState() {} + +protected: + LevenbergMarquardtState(const NonlinearFactorGraph& graph, const Values& initialValues, const LevenbergMarquardtParams& params, unsigned int iterations = 0) : + NonlinearOptimizerState(graph, initialValues, iterations), lambda(params.lambdaInitial) {} + + friend class LevenbergMarquardtOptimizer; +}; + +/** + * This class performs Levenberg-Marquardt nonlinear optimization + */ +class LevenbergMarquardtOptimizer : public NonlinearOptimizer { + +public: + + typedef boost::shared_ptr shared_ptr; + + /// @name Standard interface + /// @{ + + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. For convenience this + * version takes plain objects instead of shared pointers, but internally + * copies the objects. + * @param graph The nonlinear factor graph to optimize + * @param initialValues The initial variable assignments + * @param params The optimization parameters + */ + LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, + const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) : + NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph, initialValues)), + state_(graph, initialValues, params_), dimensions_(initialValues.dims(*params_.ordering)) {} + + /** Standard constructor, requires a nonlinear factor graph, initial + * variable assignments, and optimization parameters. For convenience this + * version takes plain objects instead of shared pointers, but internally + * copies the objects. + * @param graph The nonlinear factor graph to optimize + * @param initialValues The initial variable assignments + * @param params The optimization parameters + */ + LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) : + NonlinearOptimizer(graph), dimensions_(initialValues.dims(ordering)) { + params_.ordering = ordering; + state_ = LevenbergMarquardtState(graph, initialValues, params_); } + + /** Access the current damping value */ + double lambda() const { return state_.lambda; } + + /// @} + + /// @name Advanced interface + /// @{ + + /** Virtual destructor */ + virtual ~LevenbergMarquardtOptimizer() {} + + /** Perform a single iteration, returning a new NonlinearOptimizer class + * containing the updated variable assignments, which may be retrieved with + * values(). + */ + virtual void iterate(); + + /** Access the parameters */ + const LevenbergMarquardtParams& params() const { return params_; } + + /** Access the last state */ + const LevenbergMarquardtState& state() const { return state_; } + + /// @} + +protected: + + LevenbergMarquardtParams params_; + LevenbergMarquardtState state_; + std::vector dimensions_; + + /** Access the parameters (base class version) */ + virtual const NonlinearOptimizerParams& _params() const { return params_; } + + /** Access the state (base class version) */ + virtual const NonlinearOptimizerState& _state() const { return state_; } + + /** Internal function for computing a COLAMD ordering if no ordering is specified */ + LevenbergMarquardtParams ensureHasOrdering(LevenbergMarquardtParams params, const NonlinearFactorGraph& graph, const Values& values) const { + if(!params.ordering) + params.ordering = *graph.orderingCOLAMD(values); + return params; + } +}; + +} diff --git a/gtsam/nonlinear/Marginals.cpp b/gtsam/nonlinear/Marginals.cpp new file mode 100644 index 000000000..34d620bc2 --- /dev/null +++ b/gtsam/nonlinear/Marginals.cpp @@ -0,0 +1,143 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file Marginals.cpp + * @brief + * @author Richard Roberts + * @date May 14, 2012 + */ + +#include +#include +#include +#include + +namespace gtsam { + +/* ************************************************************************* */ +Marginals::Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization) { + + // Compute COLAMD ordering + ordering_ = *graph.orderingCOLAMD(solution); + + // Linearize graph + graph_ = *graph.linearize(solution, ordering_); + + // Store values + values_ = solution; + + // Compute BayesTree + factorization_ = factorization; + if(factorization_ == CHOLESKY) + bayesTree_ = *GaussianMultifrontalSolver(graph_, false).eliminate(); + else if(factorization_ == QR) + bayesTree_ = *GaussianMultifrontalSolver(graph_, true).eliminate(); +} + +/* ************************************************************************* */ +Matrix Marginals::marginalCovariance(Key variable) const { + return marginalInformation(variable).inverse(); +} + +/* ************************************************************************* */ +Matrix Marginals::marginalInformation(Key variable) const { + // Get linear key + Index index = ordering_[variable]; + + // Compute marginal + GaussianFactor::shared_ptr marginalFactor; + if(factorization_ == CHOLESKY) + marginalFactor = bayesTree_.marginalFactor(index, EliminatePreferCholesky); + else if(factorization_ == QR) + marginalFactor = bayesTree_.marginalFactor(index, EliminateQR); + + // Get information matrix (only store upper-right triangle) + if(typeid(*marginalFactor) == typeid(JacobianFactor)) { + JacobianFactor::constABlock A = static_cast(*marginalFactor).getA(); + return A.transpose() * A; // Compute A'A + } else if(typeid(*marginalFactor) == typeid(HessianFactor)) { + const HessianFactor& hessian = static_cast(*marginalFactor); + const size_t dim = hessian.getDim(hessian.begin()); + return hessian.info().topLeftCorner(dim,dim).selfadjointView(); // Take the non-augmented part of the information matrix + } else { + throw runtime_error("Internal error: Marginals::marginalInformation expected either a JacobianFactor or HessianFactor"); + } +} + +/* ************************************************************************* */ +JointMarginal Marginals::jointMarginalCovariance(const std::vector& variables) const { + JointMarginal info = jointMarginalInformation(variables); + info.fullMatrix_ = info.fullMatrix_.inverse(); + return info; +} + +/* ************************************************************************* */ +JointMarginal Marginals::jointMarginalInformation(const std::vector& variables) const { + + // If 2 variables, we can use the BayesTree::joint function, otherwise we + // have to use sequential elimination. + if(variables.size() == 1) { + Matrix info = marginalInformation(variables.front()); + std::vector dims; + dims.push_back(info.rows()); + Ordering indices; + indices.insert(variables.front(), 0); + return JointMarginal(info, dims, indices); + + } else { + // Convert keys to linear indices + vector indices(variables.size()); + for(size_t i=0; i usedIndices; + for(size_t i=0; i Index_Key; + BOOST_FOREACH(const Index_Key& index_key, usedIndices) { + variableConversion.insert(index_key.second, slot); + ++ slot; + } + } + + // Get dimensions from factor graph + std::vector dims(indices.size(), 0); + for(size_t i = 0; i < variables.size(); ++i) + dims[i] = values_.at(variables[i]).dim(); + + // Get information matrix + Matrix augmentedInfo = jointFG.denseHessian(); + Matrix info = augmentedInfo.topLeftCorner(augmentedInfo.rows()-1, augmentedInfo.cols()-1); + + return JointMarginal(info, dims, variableConversion); + } +} + +} /* namespace gtsam */ diff --git a/gtsam/nonlinear/Marginals.h b/gtsam/nonlinear/Marginals.h new file mode 100644 index 000000000..77bccdfd2 --- /dev/null +++ b/gtsam/nonlinear/Marginals.h @@ -0,0 +1,116 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file Marginals.h + * @brief A class for computing marginals in a NonlinearFactorGraph + * @author Richard Roberts + * @date May 14, 2012 + */ + +#pragma once + +#include +#include +#include +#include + +namespace gtsam { + +class JointMarginal; + +/** + * A class for computing Gaussian marginals of variables in a NonlinearFactorGraph + */ +class Marginals { + +public: + + /** The linear factorization mode - either CHOLESKY (faster and suitable for most problems) or QR (slower but more numerically stable for poorly-conditioned problems). */ + enum Factorization { + CHOLESKY, + QR + }; + + /** Construct a marginals class. + * @param graph The factor graph defining the full joint density on all variables. + * @param solution The linearization point about which to compute Gaussian marginals (usually the MLE as obtained from a NonlinearOptimizer). + * @param factorization The linear decomposition mode - either Marginals::CHOLESKY (faster and suitable for most problems) or Marginals::QR (slower but more numerically stable for poorly-conditioned problems). + */ + Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization = CHOLESKY); + + /** Compute the marginal covariance of a single variable */ + Matrix marginalCovariance(Key variable) const; + + /** Compute the marginal information matrix of a single variable. You can + * use LLt(const Matrix&) or RtR(const Matrix&) to obtain the square-root information + * matrix. */ + Matrix marginalInformation(Key variable) const; + + /** Compute the joint marginal covariance of several variables */ + JointMarginal jointMarginalCovariance(const std::vector& variables) const; + + /** Compute the joint marginal information of several variables */ + JointMarginal jointMarginalInformation(const std::vector& variables) const; + +protected: + + GaussianFactorGraph graph_; + Values values_; + Ordering ordering_; + Factorization factorization_; + GaussianBayesTree bayesTree_; +}; + +/** + * A class to store and access a joint marginal, returned from Marginals::jointMarginalCovariance and Marginals::jointMarginalInformation + */ +class JointMarginal { + +protected: + typedef SymmetricBlockView BlockView; + +public: + /** A block view of the joint marginal - this stores a reference to the + * JointMarginal object, so the JointMarginal object must be kept in scope + * while this block view is needed, otherwise assign this block object to a + * Matrix to store it. + */ + typedef BlockView::constBlock Block; + + /** Access a block, corresponding to a pair of variables, of the joint + * marginal. Each block is accessed by its "vertical position", + * corresponding to the variable with nonlinear Key \c iVariable and + * "horizontal position", corresponding to the variable with nonlinear Key + * \c jVariable. + * + * For example, if we have the joint marginal on a 2D pose "x3" and a 2D + * landmark "l2", then jointMarginal(Symbol('x',3), Symbol('l',2)) will + * return the 3x2 block of the joint covariance matrix corresponding to x3 + * and l2. + * @param iVariable The nonlinear Key specifying the "vertical position" of the requested block + * @param jVariable The nonlinear Key specifying the "horizontal position" of the requested block + */ + Block operator()(Key iVariable, Key jVariable) const { + return blockView_(indices_[iVariable], indices_[jVariable]); } + +protected: + Matrix fullMatrix_; + BlockView blockView_; + Ordering indices_; + + JointMarginal(const Matrix& fullMatrix, const std::vector& dims, const Ordering& indices) : + fullMatrix_(fullMatrix), blockView_(fullMatrix_, dims.begin(), dims.end()), indices_(indices) {} + + friend class Marginals; +}; + +} /* namespace gtsam */ diff --git a/gtsam/nonlinear/NonlinearOptimization-inl.h b/gtsam/nonlinear/NonlinearOptimization-inl.h deleted file mode 100644 index 361de59e2..000000000 --- a/gtsam/nonlinear/NonlinearOptimization-inl.h +++ /dev/null @@ -1,134 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file NonlinearOptimization-inl.h - * @date Oct 17, 2010 - * @author Kai Ni - * @brief Easy interfaces for NonlinearOptimizer - */ - -#pragma once - -#include -#include - -#if ENABLE_SPCG -#include -#endif - -#include - -using namespace std; - -namespace gtsam { - - /** - * The Elimination solver - */ - template - Values optimizeSequential(const G& graph, const Values& initialEstimate, - const NonlinearOptimizationParameters& parameters, bool useLM) { - - // Use a variable ordering from COLAMD - Ordering::shared_ptr ordering = graph.orderingCOLAMD(initialEstimate); - - // Create an nonlinear Optimizer that uses a Sequential Solver - typedef NonlinearOptimizer Optimizer; - Optimizer optimizer(boost::make_shared(graph), - boost::make_shared(initialEstimate), ordering, - boost::make_shared(parameters)); - - // Now optimize using either LM or GN methods. - if (useLM) - return *optimizer.levenbergMarquardt().values(); - else - return *optimizer.gaussNewton().values(); - } - - /** - * The multifrontal solver - */ - template - Values optimizeMultiFrontal(const G& graph, const Values& initialEstimate, - const NonlinearOptimizationParameters& parameters, bool useLM) { - - // Use a variable ordering from COLAMD - Ordering::shared_ptr ordering = graph.orderingCOLAMD(initialEstimate); - - // Create an nonlinear Optimizer that uses a Multifrontal Solver - typedef NonlinearOptimizer Optimizer; - Optimizer optimizer(boost::make_shared(graph), - boost::make_shared(initialEstimate), ordering, - boost::make_shared(parameters)); - - // now optimize using either LM or GN methods - if (useLM) - return *optimizer.levenbergMarquardt().values(); - else - return *optimizer.gaussNewton().values(); - } - -#if ENABLE_SPCG - /** - * The sparse preconditioned conjugate gradient solver - */ - template - Values optimizeSPCG(const G& graph, const Values& initialEstimate, - const NonlinearOptimizationParameters& parameters = NonlinearOptimizationParameters(), - bool useLM = true) { - - // initial optimization state is the same in both cases tested - typedef SubgraphSolver Solver; - typedef boost::shared_ptr shared_Solver; - typedef NonlinearOptimizer SPCGOptimizer; - shared_Solver solver = boost::make_shared( - graph, initialEstimate, IterativeOptimizationParameters()); - SPCGOptimizer optimizer( - boost::make_shared(graph), - boost::make_shared(initialEstimate), - solver->ordering(), - solver, - boost::make_shared(parameters)); - - // choose nonlinear optimization method - if (useLM) - return *optimizer.levenbergMarquardt().values(); - else - return *optimizer.gaussNewton().values(); - } -#endif - - /** - * optimization that returns the values - */ - template - Values optimize(const G& graph, const Values& initialEstimate, const NonlinearOptimizationParameters& parameters, - const LinearSolver& solver, - const NonlinearOptimizationMethod& nonlinear_method) { - switch (solver) { - case SEQUENTIAL: - return optimizeSequential(graph, initialEstimate, parameters, - nonlinear_method == LM); - case MULTIFRONTAL: - return optimizeMultiFrontal(graph, initialEstimate, parameters, - nonlinear_method == LM); -#if ENABLE_SPCG - case SPCG: -// return optimizeSPCG(graph, initialEstimate, parameters, -// nonlinear_method == LM); - throw runtime_error("optimize: SPCG not supported yet due to the specific pose constraint"); -#endif - } - throw runtime_error("optimize: undefined solver"); - } - -} //namespace gtsam diff --git a/gtsam/nonlinear/NonlinearOptimization.h b/gtsam/nonlinear/NonlinearOptimization.h deleted file mode 100644 index 1d134688e..000000000 --- a/gtsam/nonlinear/NonlinearOptimization.h +++ /dev/null @@ -1,56 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file NonlinearOptimization.h - * @date Oct 14, 2010 - * @author Kai Ni - * @brief Easy interfaces for NonlinearOptimizer - */ - -#pragma once - -#include -#include - -namespace gtsam { - - /** - * all the nonlinear optimization methods - */ - typedef enum { - LM, // Levenberg Marquardt - GN // Gaussian-Newton - } NonlinearOptimizationMethod; - - /** - * all the linear solver types - */ - typedef enum { - SEQUENTIAL, // Sequential elimination - MULTIFRONTAL, // Multi-frontal elimination -#if ENABLE_SPCG - SPCG, // Subgraph Preconditioned Conjugate Gradient -#endif - } LinearSolver; - - /** - * optimization that returns the values - */ - template - Values optimize(const G& graph, const Values& initialEstimate, - const NonlinearOptimizationParameters& parameters = NonlinearOptimizationParameters(), - const LinearSolver& solver = MULTIFRONTAL, - const NonlinearOptimizationMethod& nonlinear_method = LM); - -} - -#include diff --git a/gtsam/nonlinear/NonlinearOptimizationParameters.cpp b/gtsam/nonlinear/NonlinearOptimizationParameters.cpp deleted file mode 100644 index e80c3b59c..000000000 --- a/gtsam/nonlinear/NonlinearOptimizationParameters.cpp +++ /dev/null @@ -1,121 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file NonlinearOptimizationParameters.cpp - * @date Jan 28, 2012 - * @author Alex Cunningham - * @brief Implements parameters structure - */ - -#include - -#include - -#include - -namespace gtsam { - -using namespace std; - -/* ************************************************************************* */ -NonlinearOptimizationParameters::NonlinearOptimizationParameters() : - absDecrease_(1e-6), relDecrease_(1e-6), sumError_(0.0), maxIterations_( - 100), lambda_(1e-5), lambdaFactor_(10.0), verbosity_(SILENT), lambdaMode_( - BOUNDED), useQR_(false) { -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::NonlinearOptimizationParameters(double absDecrease, double relDecrease, - double sumError, int iIters, double lambda, - double lambdaFactor, verbosityLevel v, - LambdaMode lambdaMode, bool useQR) : - absDecrease_(absDecrease), relDecrease_(relDecrease), sumError_( - sumError), maxIterations_(iIters), lambda_(lambda), lambdaFactor_( - lambdaFactor), verbosity_(v), lambdaMode_(lambdaMode), useQR_(useQR) { -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::NonlinearOptimizationParameters( - const NonlinearOptimizationParameters ¶meters) : - absDecrease_(parameters.absDecrease_), relDecrease_( - parameters.relDecrease_), sumError_(parameters.sumError_), maxIterations_( - parameters.maxIterations_), lambda_(parameters.lambda_), lambdaFactor_( - parameters.lambdaFactor_), verbosity_(parameters.verbosity_), lambdaMode_( - parameters.lambdaMode_), useQR_(parameters.useQR_) { -} - -/* ************************************************************************* */ -void NonlinearOptimizationParameters::print(const std::string& s) const { - cout << "NonlinearOptimizationParameters " << s << endl; - cout << "absolute decrease threshold: " << absDecrease_ << endl; - cout << "relative decrease threshold: " << relDecrease_ << endl; - cout << " error sum threshold: " << sumError_ << endl; - cout << " maximum nr. of iterations: " << maxIterations_ << endl; - cout << " initial lambda value: " << lambda_ << endl; - cout << " factor to multiply lambda: " << lambdaFactor_ << endl; - cout << " verbosity level: " << verbosity_ << endl; - cout << " lambda mode: " << lambdaMode_ << endl; - cout << " use QR: " << useQR_ << endl; -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::shared_ptr -NonlinearOptimizationParameters::newLambda_(double lambda) const { - shared_ptr ptr( - boost::make_shared < NonlinearOptimizationParameters > (*this)); - ptr->lambda_ = lambda; - return ptr; -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::shared_ptr -NonlinearOptimizationParameters::newVerbosity(verbosityLevel verbosity) { - shared_ptr ptr(boost::make_shared()); - ptr->verbosity_ = verbosity; - return ptr; -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::shared_ptr -NonlinearOptimizationParameters::newMaxIterations(int maxIterations) { - shared_ptr ptr(boost::make_shared()); - ptr->maxIterations_ = maxIterations; - return ptr; -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::shared_ptr -NonlinearOptimizationParameters::newLambda(double lambda) { - shared_ptr ptr(boost::make_shared()); - ptr->lambda_ = lambda; - return ptr; -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::shared_ptr -NonlinearOptimizationParameters::newDecreaseThresholds(double absDecrease, - double relDecrease) { - shared_ptr ptr(boost::make_shared()); - ptr->absDecrease_ = absDecrease; - ptr->relDecrease_ = relDecrease; - return ptr; -} - -/* ************************************************************************* */ -NonlinearOptimizationParameters::shared_ptr -NonlinearOptimizationParameters::newFactorization(bool useQR) { - shared_ptr ptr(boost::make_shared()); - ptr->useQR_ = useQR; - return ptr; -} - -} // \namespace gtsam diff --git a/gtsam/nonlinear/NonlinearOptimizationParameters.h b/gtsam/nonlinear/NonlinearOptimizationParameters.h deleted file mode 100644 index 07e01ea58..000000000 --- a/gtsam/nonlinear/NonlinearOptimizationParameters.h +++ /dev/null @@ -1,138 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file NonlinearOptimizationParameters.h - * @date Oct 14, 2010 - * @author Kai Ni - * @brief structure to store parameters for nonlinear optimization - */ - -#pragma once - -#include -#include -#include - -namespace gtsam { - - /** - * a container for all related parameters - * \nosubgrouping - */ - struct NonlinearOptimizationParameters { - - typedef boost::shared_ptr shared_ptr; - typedef NonlinearOptimizationParameters This; - - double absDecrease_; ///< threshold for the absolute decrease per iteration - - /** - * Relative decrease threshold, where relative error = (new-current)/current. - * This can be set to 0 if there is a possibility for negative error values. - */ - double relDecrease_; ///< threshold for the relative decrease per iteration - - double sumError_; ///< threshold for the sum of error - size_t maxIterations_; ///< maximum number of iterations - double lambda_; ///< starting lambda value - double lambdaFactor_; ///< factor by which lambda is multiplied - - /// verbosity levels - typedef enum { - SILENT, - ERROR, - LAMBDA, - TRYLAMBDA, - VALUES, - DELTA, - TRYCONFIG, - TRYDELTA, - LINEAR, - DAMPED - } verbosityLevel; - verbosityLevel verbosity_; ///< verbosity - - /// trust-region method mode - typedef enum { - FAST, BOUNDED, CAUTIOUS - } LambdaMode; - LambdaMode lambdaMode_; /// - void serialize(ARCHIVE & ar, const unsigned int version) - { - ar & BOOST_SERIALIZATION_NVP(absDecrease_); - ar & BOOST_SERIALIZATION_NVP(relDecrease_); - ar & BOOST_SERIALIZATION_NVP(sumError_); - ar & BOOST_SERIALIZATION_NVP(maxIterations_); - ar & BOOST_SERIALIZATION_NVP(lambda_); - ar & BOOST_SERIALIZATION_NVP(lambdaFactor_); - ar & BOOST_SERIALIZATION_NVP(verbosity_); - ar & BOOST_SERIALIZATION_NVP(lambdaMode_); - ar & BOOST_SERIALIZATION_NVP(useQR_); - } - }; -} diff --git a/gtsam/nonlinear/NonlinearOptimizer-inl.h b/gtsam/nonlinear/NonlinearOptimizer-inl.h deleted file mode 100644 index f788eac2a..000000000 --- a/gtsam/nonlinear/NonlinearOptimizer-inl.h +++ /dev/null @@ -1,356 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file NonlinearOptimizer-inl.h - * This is a template definition file, include it where needed (only!) - * so that the appropriate code is generated and link errors avoided. - * @brief: Encapsulates nonlinear optimization state - * @author Frank Dellaert - * @date Sep 7, 2009 - */ - -#pragma once - -#include -#include -#include -#include - -using namespace std; - -namespace gtsam { - - /* ************************************************************************* */ - template - NonlinearOptimizer::NonlinearOptimizer(shared_graph graph, - shared_values values, shared_ordering ordering, shared_parameters parameters) : - graph_(graph), values_(values), error_(graph->error(*values)), ordering_(ordering), - parameters_(parameters), iterations_(0), - dimensions_(new vector(values->dims(*ordering))), - structure_(new VariableIndex(*graph->symbolic(*ordering))) { - if (!graph) throw std::invalid_argument( - "NonlinearOptimizer constructor: graph = NULL"); - if (!values) throw std::invalid_argument( - "NonlinearOptimizer constructor: values = NULL"); - if (!ordering) throw std::invalid_argument( - "NonlinearOptimizer constructor: ordering = NULL"); - } - - /* ************************************************************************* */ - // FIXME: remove this constructor - template - NonlinearOptimizer::NonlinearOptimizer(shared_graph graph, - shared_values values, shared_ordering ordering, - shared_solver spcg_solver, shared_parameters parameters) : - graph_(graph), values_(values), error_(graph->error(*values)), ordering_(ordering), - parameters_(parameters), iterations_(0), - dimensions_(new vector(values->dims(*ordering))), - spcg_solver_(spcg_solver) { - if (!graph) throw std::invalid_argument( - "NonlinearOptimizer constructor: graph = NULL"); - if (!values) throw std::invalid_argument( - "NonlinearOptimizer constructor: values = NULL"); - if (!spcg_solver) throw std::invalid_argument( - "NonlinearOptimizer constructor: spcg_solver = NULL"); - } - - /* ************************************************************************* */ - // One iteration of Gauss Newton - /* ************************************************************************* */ - template - NonlinearOptimizer NonlinearOptimizer::iterate() const { - - Parameters::verbosityLevel verbosity = parameters_->verbosity_ ; - - // FIXME: get rid of spcg solver - shared_solver solver; - if (spcg_solver_) { // special case for SPCG - spcg_solver_->replaceFactors(linearize()); - solver = spcg_solver_; - } else { // normal case - solver = createSolver(); - } - - VectorValues delta = *solver->optimize(); - - // maybe show output - if (verbosity >= Parameters::DELTA) delta.print("delta"); - - // take old values and update it - shared_values newValues(new Values(values_->retract(delta, *ordering_))); - - // maybe show output - if (verbosity >= Parameters::VALUES) newValues->print("newValues"); - - NonlinearOptimizer newOptimizer = newValues_(newValues); - ++ newOptimizer.iterations_; - - if (verbosity >= Parameters::ERROR) cout << "error: " << newOptimizer.error_ << endl; - return newOptimizer; - } - - /* ************************************************************************* */ - template - NonlinearOptimizer NonlinearOptimizer::gaussNewton() const { - static W writer(error_); - - if (error_ < parameters_->sumError_ ) { - if ( parameters_->verbosity_ >= Parameters::ERROR) - cout << "Exiting, as error = " << error_ - << " < sumError (" << parameters_->sumError_ << ")" << endl; - return *this; - } - - // linearize, solve, update - NonlinearOptimizer next = iterate(); - - writer.write(next.error_); - - // check convergence - bool converged = gtsam::check_convergence(*parameters_, error_, next.error_); - - // return converged state or iterate - if (converged) return next; - else return next.gaussNewton(); - } - - /* ************************************************************************* */ - // Iteratively try to do tempered Gauss-Newton steps until we succeed. - // Form damped system with given lambda, and return a new, more optimistic - // optimizer if error decreased or iterate with a larger lambda if not. - // TODO: in theory we can't infinitely recurse, but maybe we should put a max. - // Reminder: the parameters are Graph type $G$, Values class type $T$, - // linear system class $L$, the non linear solver type $S$, and the writer type $W$ - template - NonlinearOptimizer NonlinearOptimizer::try_lambda(const L& linearSystem) { - - const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ; - const Parameters::LambdaMode lambdaMode = parameters_->lambdaMode_ ; - const double factor = parameters_->lambdaFactor_ ; - double lambda = parameters_->lambda_ ; - - if( lambdaMode >= Parameters::CAUTIOUS) throw runtime_error("CAUTIOUS mode not working yet, please use BOUNDED."); - - double next_error = error_; - shared_values next_values = values_; - - // Keep increasing lambda until we make make progress - while(true) { - if (verbosity >= Parameters::TRYLAMBDA) cout << "trying lambda = " << lambda << endl; - - // add prior-factors - // TODO: replace this dampening with a backsubstitution approach - typename L::shared_ptr dampedSystem(new L(linearSystem)); - { - double sigma = 1.0 / sqrt(lambda); - dampedSystem->reserve(dampedSystem->size() + dimensions_->size()); - // for each of the variables, add a prior - for(Index j=0; jsize(); ++j) { - size_t dim = (*dimensions_)[j]; - Matrix A = eye(dim); - Vector b = zero(dim); - SharedDiagonal model = noiseModel::Isotropic::Sigma(dim,sigma); - typename L::sharedFactor prior(new JacobianFactor(j, A, b, model)); - dampedSystem->push_back(prior); - } - } - if (verbosity >= Parameters::DAMPED) dampedSystem->print("damped"); - - // Create a new solver using the damped linear system - // FIXME: remove spcg specific code - if (spcg_solver_) spcg_solver_->replaceFactors(dampedSystem); - shared_solver solver = (spcg_solver_) ? spcg_solver_ : shared_solver( - new S(dampedSystem, structure_, parameters_->useQR_)); - - // Try solving - try { - VectorValues delta = *solver->optimize(); - if (verbosity >= Parameters::TRYLAMBDA) cout << "linear delta norm = " << delta.vector().norm() << endl; - if (verbosity >= Parameters::TRYDELTA) delta.print("delta"); - - // update values - shared_values newValues(new Values(values_->retract(delta, *ordering_))); - - // create new optimization state with more adventurous lambda - double error = graph_->error(*newValues); - - if (verbosity >= Parameters::TRYLAMBDA) cout << "next error = " << error << endl; - - if( error <= error_ ) { - next_values = newValues; - next_error = error; - lambda /= factor; - break; - } - else { - // Either we're not cautious, or the same lambda was worse than the current error. - // The more adventurous lambda was worse too, so make lambda more conservative - // and keep the same values. - if(lambdaMode >= Parameters::BOUNDED && lambda >= 1.0e5) { - if(verbosity >= Parameters::ERROR) - cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; - break; - } else { - lambda *= factor; - } - } - } catch(const NegativeMatrixException& e) { - if(verbosity >= Parameters::LAMBDA) - cout << "Negative matrix, increasing lambda" << endl; - // Either we're not cautious, or the same lambda was worse than the current error. - // The more adventurous lambda was worse too, so make lambda more conservative - // and keep the same values. - if(lambdaMode >= Parameters::BOUNDED && lambda >= 1.0e5) { - if(verbosity >= Parameters::ERROR) - cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; - break; - } else { - lambda *= factor; - } - } catch(...) { - throw; - } - } // end while - - return newValuesErrorLambda_(next_values, next_error, lambda); - } - - /* ************************************************************************* */ - // One iteration of Levenberg Marquardt - // Reminder: the parameters are Graph type $G$, Values class type $T$, - // linear system class $L$, the non linear solver type $S$, and the writer type $W$ - template - NonlinearOptimizer NonlinearOptimizer::iterateLM() { - - const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ; - const double lambda = parameters_->lambda_ ; - - // show output - if (verbosity >= Parameters::VALUES) values_->print("values"); - if (verbosity >= Parameters::ERROR) cout << "error: " << error_ << endl; - if (verbosity >= Parameters::LAMBDA) cout << "lambda = " << lambda << endl; - - // linearize all factors once - boost::shared_ptr linear(new L(*graph_->linearize(*values_, *ordering_))); - - if (verbosity >= Parameters::LINEAR) linear->print("linear"); - - // try lambda steps with successively larger lambda until we achieve descent - if (verbosity >= Parameters::LAMBDA) cout << "Trying Lambda for the first time" << endl; - - return try_lambda(*linear); - } - - /* ************************************************************************* */ - // Reminder: the parameters are Graph type $G$, Values class type $T$, - // linear system class $L$, the non linear solver type $S$, and the writer type $W$ - template - NonlinearOptimizer NonlinearOptimizer::levenbergMarquardt() { - - // Initialize - bool converged = false; - const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ; - - // check if we're already close enough - if (error_ < parameters_->sumError_) { - if ( verbosity >= Parameters::ERROR ) - cout << "Exiting, as sumError = " << error_ << " < " << parameters_->sumError_ << endl; - return *this; - } - - // for the case that maxIterations_ = 0 - iterations_ = 1; - if (iterations_ >= parameters_->maxIterations_) - return *this; - - // Iterative loop that implements Levenberg-Marquardt - while (true) { - double previous_error = error_; - // do one iteration of LM - NonlinearOptimizer next = iterateLM(); - error_ = next.error_; - values_ = next.values_; - parameters_ = next.parameters_; - iterations_ = next.iterations_; - - // check convergence - // TODO: move convergence checks here and incorporate in verbosity levels - // TODO: build into iterations somehow as an instance variable - converged = gtsam::check_convergence(*parameters_, previous_error, error_); - - if(iterations_ >= parameters_->maxIterations_ || converged == true) { - if (verbosity >= Parameters::VALUES) values_->print("final values"); - if (verbosity >= Parameters::ERROR && iterations_ >= parameters_->maxIterations_) cout << "Terminating because reached maximum iterations" << endl; - if (verbosity >= Parameters::ERROR) cout << "final error: " << error_ << endl; - if (verbosity >= Parameters::LAMBDA) cout << "final lambda = " << lambda() << endl; - return *this; - } - iterations_++; - } - } - - /* ************************************************************************* */ - template - NonlinearOptimizer NonlinearOptimizer::iterateDogLeg() { - - S solver(*graph_->linearize(*values_, *ordering_), parameters_->useQR_); - DoglegOptimizerImpl::IterationResult result = DoglegOptimizerImpl::Iterate( - parameters_->lambda_, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *solver.eliminate(), - *graph_, *values_, *ordering_, error_, parameters_->verbosity_ > Parameters::ERROR); - shared_values newValues(new Values(values_->retract(result.dx_d, *ordering_))); - return newValuesErrorLambda_(newValues, result.f_error, result.Delta); - } - - /* ************************************************************************* */ - template - NonlinearOptimizer NonlinearOptimizer::dogLeg() { - static W writer(error_); - - // check if we're already close enough - if (error_ < parameters_->sumError_) { - if ( parameters_->verbosity_ >= Parameters::ERROR ) - cout << "Exiting, as sumError = " << error_ << " < " << parameters_->sumError_ << endl; - return *this; - } - - // for the case that maxIterations_ = 0 - iterations_ = 1; - if (iterations_ >= parameters_->maxIterations_) - return *this; - - // Iterative loop that runs Dog Leg - while (true) { - double previous_error = error_; - // do one iteration of LM - NonlinearOptimizer next = iterateDogLeg(); - writer.write(next.error_); - error_ = next.error_; - values_ = next.values_; - parameters_ = next.parameters_; - - // check convergence - // TODO: move convergence checks here and incorporate in verbosity levels - // TODO: build into iterations somehow as an instance variable - bool converged = gtsam::check_convergence(*parameters_, previous_error, error_); - - if(iterations_ >= parameters_->maxIterations_ || converged == true) { - if (parameters_->verbosity_ >= Parameters::VALUES) values_->print("final values"); - if (parameters_->verbosity_ >= Parameters::ERROR) cout << "final error: " << error_ << endl; - if (parameters_->verbosity_ >= Parameters::LAMBDA) cout << "final Delta (called lambda) = " << lambda() << endl; - return *this; - } - iterations_++; - } - - } - -} diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index 4d7d76bfc..76d1fe573 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -24,22 +24,50 @@ using namespace std; namespace gtsam { +/* ************************************************************************* */ +void NonlinearOptimizer::defaultOptimize() { -bool check_convergence ( - const NonlinearOptimizationParameters ¶meters, - double currentError, double newError) { - return check_convergence(parameters.relDecrease_, - parameters.absDecrease_, - parameters.sumError_, - currentError, newError, - parameters.verbosity_) ; + const NonlinearOptimizerParams& params = this->_params(); + double currentError = this->error(); + + // check if we're already close enough + if(currentError <= params.errorTol) { + if (params.verbosity >= NonlinearOptimizerParams::ERROR) + cout << "Exiting, as error = " << currentError << " < " << params.errorTol << endl; + return; + } + + // Maybe show output + if (params.verbosity >= NonlinearOptimizerParams::VALUES) this->values().print("Initial values"); + if (params.verbosity >= NonlinearOptimizerParams::ERROR) cout << "Initial error: " << currentError << endl; + + // Return if we already have too many iterations + if(this->iterations() >= params.maxIterations) + return; + + // Iterative loop + do { + // Do next iteration + currentError = this->error(); + this->iterate(); + + // Maybe show output + if(params.verbosity >= NonlinearOptimizerParams::VALUES) this->values().print("newValues"); + if(params.verbosity >= NonlinearOptimizerParams::ERROR) cout << "newError: " << this->error() << endl; + } while(this->iterations() < params.maxIterations && + !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, + params.errorTol, currentError, this->error(), params.verbosity)); + + // Printing if verbose + if (params.verbosity >= NonlinearOptimizerParams::ERROR && + this->iterations() >= params.maxIterations) + cout << "Terminating because reached maximum iterations" << endl; } -bool check_convergence( - double relativeErrorTreshold, - double absoluteErrorTreshold, - double errorThreshold, - double currentError, double newError, int verbosity) { +/* ************************************************************************* */ +bool checkConvergence(double relativeErrorTreshold, double absoluteErrorTreshold, + double errorThreshold, double currentError, double newError, + NonlinearOptimizerParams::Verbosity verbosity) { if ( verbosity >= 2 ) { if ( newError <= errorThreshold ) @@ -53,7 +81,7 @@ bool check_convergence( // check if diverges double absoluteDecrease = currentError - newError; if (verbosity >= 2) { - if (absoluteDecrease < absoluteErrorTreshold) + if (absoluteDecrease <= absoluteErrorTreshold) cout << "absoluteDecrease: " << setprecision(12) << absoluteDecrease << " < " << absoluteErrorTreshold << endl; else cout << "absoluteDecrease: " << setprecision(12) << absoluteDecrease << " >= " << absoluteErrorTreshold << endl; @@ -62,13 +90,13 @@ bool check_convergence( // calculate relative error decrease and update currentError double relativeDecrease = absoluteDecrease / currentError; if (verbosity >= 2) { - if (relativeDecrease < relativeErrorTreshold) + if (relativeDecrease <= relativeErrorTreshold) cout << "relativeDecrease: " << setprecision(12) << relativeDecrease << " < " << relativeErrorTreshold << endl; else cout << "relativeDecrease: " << setprecision(12) << relativeDecrease << " >= " << relativeErrorTreshold << endl; } - bool converged = (relativeErrorTreshold && (relativeDecrease < relativeErrorTreshold)) - || (absoluteDecrease < absoluteErrorTreshold); + bool converged = (relativeErrorTreshold && (relativeDecrease <= relativeErrorTreshold)) + || (absoluteDecrease <= absoluteErrorTreshold); if (verbosity >= 1 && converged) { if(absoluteDecrease >= 0.0) cout << "converged" << endl; diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index 4e41649e6..388058ac0 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -11,449 +11,229 @@ /** * @file NonlinearOptimizer.h - * @brief Encapsulates nonlinear optimization state - * @author Frank Dellaert + * @brief Base class and parameters for nonlinear optimization algorithms + * @author Richard Roberts * @date Sep 7, 2009 */ #pragma once -#include #include -#include namespace gtsam { -class NullOptimizerWriter { +class NonlinearOptimizer; + +/** The common parameters for Nonlinear optimizers. Most optimizers + * deriving from NonlinearOptimizer also subclass the parameters. + */ +class NonlinearOptimizerParams { public: - NullOptimizerWriter(double error) {} ///Contructor - virtual ~NullOptimizerWriter() {} - virtual void write(double error) {} ///Capturing the values of the parameters after the optimization -}; /// + /** See NonlinearOptimizerParams::verbosity */ + enum Verbosity { + SILENT, + ERROR, + VALUES, + DELTA, + LINEAR + }; + + size_t maxIterations; ///< The maximum iterations to stop iterating (default 100) + double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5) + double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5) + double errorTol; ///< The maximum total error to stop iterating (default 0.0) + Verbosity verbosity; ///< The printing verbosity during optimization (default SILENT) + + NonlinearOptimizerParams() : + maxIterations(100.0), relativeErrorTol(1e-5), absoluteErrorTol(1e-5), + errorTol(0.0), verbosity(SILENT) {} + + virtual void print(const std::string& str = "") const { + std::cout << str << "\n"; + std::cout << "relative decrease threshold: " << relativeErrorTol << "\n"; + std::cout << "absolute decrease threshold: " << absoluteErrorTol << "\n"; + std::cout << " total error threshold: " << errorTol << "\n"; + std::cout << " maximum iterations: " << maxIterations << "\n"; + std::cout << " verbosity level: " << verbosity << std::endl; + } + + virtual ~NonlinearOptimizerParams() {} +}; + /** - * The class NonlinearOptimizer encapsulates an optimization state. - * Typically it is instantiated with a NonlinearFactorGraph and initial values - * and then one of the optimization routines is called. These iterate - * until convergence. All methods are functional and return a new state. - * - * The class is parameterized by the Graph type $G$, Values class type $Values$, - * linear system class $L$, the non linear solver type $S$, and the writer type $W$ - * - * The values class type $Values$ is in order to be able to optimize over non-vector values structures. - * - * A nonlinear system solver $S$ - * Concept NonLinearSolver implements - * linearize: G * Values -> L - * solve : L -> Values - * - * The writer $W$ generates output to disk or the screen. - * - * For example, in a 2D case, $G$ can be pose2SLAM::Graph, $Values$ can be Pose2Values, - * $L$ can be GaussianFactorGraph and $S$ can be Factorization. - * The solver class has two main functions: linearize and optimize. The first one - * linearizes the nonlinear cost function around the current estimate, and the second - * one optimizes the linearized system using various methods. - * - * To use the optimizer in code, include in your cpp file - * \nosubgrouping + * Base class for a nonlinear optimization state, including the current estimate + * of the variable values, error, and number of iterations. Optimizers derived + * from NonlinearOptimizer usually also define a derived state class containing + * additional state specific to the algorithm (for example, Dogleg state + * contains the current trust region radius). */ -template -class NonlinearOptimizer { +class NonlinearOptimizerState { public: - // For performance reasons in recursion, we store values in a shared_ptr - typedef boost::shared_ptr shared_values; ///Prevent memory leaks in Values - typedef boost::shared_ptr shared_graph; /// Prevent memory leaks in Graph - typedef boost::shared_ptr shared_linear; /// Not sure - typedef boost::shared_ptr shared_ordering; ///ordering parameters - typedef boost::shared_ptr shared_solver; /// Solver - typedef NonlinearOptimizationParameters Parameters; ///These take the parameters defined in NonLinearOptimizationParameters.h - typedef boost::shared_ptr shared_parameters ; /// - typedef boost::shared_ptr shared_structure; // TODO: make this const + /** The current estimate of the variable values. */ + Values values; -private: + /** The factor graph error on the current values. */ + double error; - typedef NonlinearOptimizer This; - typedef boost::shared_ptr > shared_dimensions; + /** The number of optimization iterations performed. */ + unsigned int iterations; - /// keep a reference to const version of the graph - /// These normally do not change - const shared_graph graph_; + NonlinearOptimizerState() {} - // keep a values structure and its error - // These typically change once per iteration (in a functional way) - shared_values values_; + /** Virtual destructor */ + virtual ~NonlinearOptimizerState() {} - // current error for this state - double error_; +protected: + NonlinearOptimizerState(const NonlinearFactorGraph& graph, const Values& values, unsigned int iterations = 0) : + values(values), error(graph.error(values)), iterations(iterations) {} - // the variable ordering - const shared_ordering ordering_; + NonlinearOptimizerState(const Values& values, double error, unsigned int iterations) : + values(values), error(error), iterations(iterations) {} - // storage for parameters, lambda, thresholds, etc. - shared_parameters parameters_; - - // for performance track - size_t iterations_; - - // The dimensions of each linearized variable - const shared_dimensions dimensions_; - - // storage of structural components that don't change between iterations - // used at creation of solvers in each iteration - // TODO: make this structure component specific to solver type - const shared_structure structure_; - - // solver used only for SPCG - // FIXME: remove this! - shared_solver spcg_solver_; - - /// @name Advanced Constructors - /// @{ - - /** - * Constructor that does not do any computation - */ - NonlinearOptimizer(shared_graph graph, shared_values values, const double error, - shared_ordering ordering, shared_parameters parameters, shared_dimensions dimensions, - size_t iterations, shared_structure structure) - : graph_(graph), values_(values), error_(error), ordering_(ordering), - parameters_(parameters), iterations_(iterations), dimensions_(dimensions), - structure_(structure) {} - - /** constructors to replace specific components */ - - ///TODO: comment - This newValues_(shared_values newValues) const { - return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), - ordering_, parameters_, dimensions_, iterations_, structure_); } - - ///TODO: comment - This newValuesErrorLambda_(shared_values newValues, double newError, double newLambda) const { - return NonlinearOptimizer(graph_, newValues, newError, ordering_, - parameters_->newLambda_(newLambda), dimensions_, iterations_, structure_); } - - ///TODO: comment - This newIterations_(int iterations) const { - return NonlinearOptimizer(graph_, values_, error_, ordering_, parameters_, dimensions_, - iterations, structure_); } - - ///TODO: comment - This newLambda_(double newLambda) const { - return NonlinearOptimizer(graph_, values_, error_, ordering_, - parameters_->newLambda_(newLambda), dimensions_, iterations_, structure_); } - - ///TODO: comment - This newValuesLambda_(shared_values newValues, double newLambda) const { - return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues), - ordering_, parameters_->newLambda_(newLambda), dimensions_, iterations_, structure_); } - - ///TODO: comment - This newParameters_(shared_parameters parameters) const { - return NonlinearOptimizer(graph_, values_, error_, ordering_, parameters, dimensions_, - iterations_, structure_); } - - /// @} - -public: - - /// @name Standard Constructors - /// @{ - - /** - * Constructor that evaluates new error - */ - NonlinearOptimizer(shared_graph graph, - shared_values values, - shared_ordering ordering, - shared_parameters parameters = boost::make_shared()); - - /** - * Constructor that takes a solver directly - only useful for SPCG - * FIXME: REMOVE THIS FUNCTION! - */ - NonlinearOptimizer(shared_graph graph, - shared_values values, - shared_ordering ordering, - shared_solver spcg_solver, - shared_parameters parameters = boost::make_shared()); - - /** - * Copy constructor - */ - NonlinearOptimizer(const NonlinearOptimizer &optimizer) : - graph_(optimizer.graph_), values_(optimizer.values_), error_(optimizer.error_), - ordering_(optimizer.ordering_), parameters_(optimizer.parameters_), - iterations_(optimizer.iterations_), dimensions_(optimizer.dimensions_), structure_(optimizer.structure_) {} - - // access to member variables - - /// @} - /// @name Standard Interface - /// @{ - - /// print - void print(const std::string& s="") const { - cout << "NonlinearOptimizer " << s << endl; - cout << " current error: " << error_ << endl; - cout << "iterations count: " << iterations_ << endl; - } - - /** Return current error */ - double error() const { return error_; } - - /** Return current lambda */ - double lambda() const { return parameters_->lambda_; } - - /** Return the values */ - shared_values values() const { return values_; } - - /** Return the graph */ - shared_graph graph() const { return graph_; } - - /** Return the itertions */ - size_t iterations() const { return iterations_; } - - /** Return the ordering */ - shared_ordering ordering() const { return ordering_; } - - /** Return the parameters */ - shared_parameters parameters() const { return parameters_; } - - /** Return the structure variable (variable index) */ - shared_structure structure() const { return structure_; } - - /** - * Return a linearized graph at the current graph/values/ordering - */ - shared_linear linearize() const { - return shared_linear(new L(*graph_->linearize(*values_, *ordering_))); - } - - /** - * create a solver around the current graph/values - * NOTE: this will actually solve a linear system - */ - shared_solver createSolver() const { - return shared_solver(new GS(linearize(), structure_, parameters_->useQR_)); - } - - /** - * Return mean and covariance on a single variable - */ - Matrix marginalCovariance(Key j) const { - return createSolver()->marginalCovariance((*ordering_)[j]); - } - - /** - * linearize and optimize - * This returns an VectorValues, i.e., vectors in tangent space of Values - */ - VectorValues linearizeAndOptimizeForDelta() const { - return *createSolver()->optimize(); - } - - /** - * Do one Gauss-Newton iteration and return next state - * suggested interface - */ - NonlinearOptimizer iterate() const; - - /// - ///Optimize a solution for a non linear factor graph - ///param relativeTreshold - ///@param absoluteTreshold - ///@param verbosity Integer specifying how much output to provide - /// - - // suggested interface - NonlinearOptimizer gaussNewton() const; - - /// @} - /// @name Advanced Interface - /// @{ - - /** Recursively try to do tempered Gauss-Newton steps until we succeed */ - NonlinearOptimizer try_lambda(const L& linear); - - /** - * One iteration of Levenberg Marquardt - */ - NonlinearOptimizer iterateLM(); - - /// - ///Optimize using Levenberg-Marquardt. Really Levenberg's - ///algorithm at this moment, as we just add I*\lambda to Hessian - ///H'H. The probabilistic explanation is very simple: every - ///variable gets an extra Gaussian prior that biases staying at - ///current value, with variance 1/lambda. This is done very easily - ///(but perhaps wastefully) by adding a prior factor for each of - ///the variables, after linearization. - /// - ///@param relativeThreshold - ///@param absoluteThreshold - ///@param verbosity Integer specifying how much output to provide - ///@param lambdaFactor Factor by which to decrease/increase lambda - /// - NonlinearOptimizer levenbergMarquardt(); - - /** - * One iteration of the dog leg algorithm - */ - NonlinearOptimizer iterateDogLeg(); - - /** - * Optimize using the Dog Leg algorithm - */ - NonlinearOptimizer dogLeg(); - - // static interfaces to LM, Dog leg, and GN optimization techniques - - ///Static interface to Dog leg optimization using default ordering - ///@param graph Nonlinear factor graph to optimize - ///@param values Initial values - ///@param parameters Optimization parameters - ///@return an optimized values structure - static shared_values optimizeLM(shared_graph graph, - shared_values values, - shared_parameters parameters = boost::make_shared()) { - - // Use a variable ordering from COLAMD - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values); - // initial optimization state is the same in both cases tested - //GS solver(*graph->linearize(*values, *ordering)); - - NonlinearOptimizer optimizer(graph, values, ordering, parameters); - NonlinearOptimizer result = optimizer.levenbergMarquardt(); - return result.values(); - } - - ///TODO: comment - static shared_values optimizeLM(shared_graph graph, - shared_values values, - Parameters::verbosityLevel verbosity) { - return optimizeLM(graph, values, Parameters::newVerbosity(verbosity)); - } - - /** - * Static interface to LM optimization (no shared_ptr arguments) - see above - */ - static shared_values optimizeLM(const G& graph, - const Values& values, - const Parameters parameters = Parameters()) { - return optimizeLM(boost::make_shared(graph), - boost::make_shared(values), - boost::make_shared(parameters)); - } - - ///TODO: comment - static shared_values optimizeLM(const G& graph, - const Values& values, - Parameters::verbosityLevel verbosity) { - return optimizeLM(boost::make_shared(graph), - boost::make_shared(values), - verbosity); - } - - ///Static interface to Dog leg optimization using default ordering - ///@param graph Nonlinear factor graph to optimize - ///@param values Initial values - ///@param parameters Optimization parameters - ///@return an optimized values structure - static shared_values optimizeDogLeg(shared_graph graph, - shared_values values, - shared_parameters parameters = boost::make_shared()) { - - // Use a variable ordering from COLAMD - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values); - // initial optimization state is the same in both cases tested - //GS solver(*graph->linearize(*values, *ordering)); - - NonlinearOptimizer optimizer(graph, values, ordering, parameters); - NonlinearOptimizer result = optimizer.dogLeg(); - return result.values(); - } - - /// - ///Static interface to Dog leg optimization using default ordering and thresholds - ///@param graph Nonlinear factor graph to optimize - ///@param values Initial values - ///@param verbosity Integer specifying how much output to provide - ///@return an optimized values structure - /// - static shared_values optimizeDogLeg(shared_graph graph, - shared_values values, - Parameters::verbosityLevel verbosity) { - return optimizeDogLeg(graph, values, Parameters::newVerbosity(verbosity)->newLambda_(1.0)); - } - - /** - * Static interface to Dogleg optimization (no shared_ptr arguments) - see above - */ - static shared_values optimizeDogLeg(const G& graph, - const Values& values, - const Parameters parameters = Parameters()) { - return optimizeDogLeg(boost::make_shared(graph), - boost::make_shared(values), - boost::make_shared(parameters)); - } - - ///TODO: comment - static shared_values optimizeDogLeg(const G& graph, - const Values& values, - Parameters::verbosityLevel verbosity) { - return optimizeDogLeg(boost::make_shared(graph), - boost::make_shared(values), - verbosity); - } - - /// - ///Static interface to GN optimization using default ordering and thresholds - ///@param graph Nonlinear factor graph to optimize - ///@param values Initial values - ///@param verbosity Integer specifying how much output to provide - ///@return an optimized values structure - /// - static shared_values optimizeGN(shared_graph graph, - shared_values values, - shared_parameters parameters = boost::make_shared()) { - - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values); - // initial optimization state is the same in both cases tested - GS solver(*graph->linearize(*values, *ordering)); - - NonlinearOptimizer optimizer(graph, values, ordering, parameters); - NonlinearOptimizer result = optimizer.gaussNewton(); - return result.values(); - } - - /** - * Static interface to GN optimization (no shared_ptr arguments) - see above - */ - static shared_values optimizeGN(const G& graph, const Values& values, const Parameters parameters = Parameters()) { - return optimizeGN(boost::make_shared(graph), - boost::make_shared(values), - boost::make_shared(parameters)); - } + friend class NonlinearOptimizer; }; /** - * Check convergence - */ -bool check_convergence ( - double relativeErrorTreshold, - double absoluteErrorTreshold, - double errorThreshold, - double currentError, double newError, int verbosity); + * This is the abstract interface for classes that can optimize for the + * maximum-likelihood estimate of a NonlinearFactorGraph. + * + * To use a class derived from this interface, construct the class with a + * NonlinearFactorGraph and an initial Values variable assignment. Next, call the + * optimize() method, which returns a new NonlinearOptimizer object containing + * the optimized variable assignment. Call the values() method to retrieve the + * optimized estimate. Alternatively, to take a shortcut, instead of calling + * optimize(), call optimized(), which performs full optimization and returns + * the resulting Values instead of the new optimizer. + * + * Note: This class is immutable, optimize() and iterate() return new + * NonlinearOptimizer objects, so be sure to use the returned object and not + * simply keep the unchanged original. + * + * Simple and compact example: + * \code +// One-liner to do full optimization and use the result. +// Note use of "optimized()" to directly return Values, instead of "optimize()" that returns a new optimizer. +Values::const_shared_ptr result = DoglegOptimizer(graph, initialValues).optimized(); +\endcode + * + * Example exposing more functionality and details: + * \code +// Create initial optimizer +DoglegOptimizer initial(graph, initialValues); -///TODO: comment -bool check_convergence ( - const NonlinearOptimizationParameters ¶meters, - double currentError, double newError); +// Run full optimization until convergence. +// Note use of "optimize()" to return a new optimizer, instead of "optimized()" that returns only the Values. +// NonlinearOptimizer pointers are always returned, though they are actually a derived optimizer type. +NonlinearOptimizer::auto_ptr final = initial->optimize(); + +// The new optimizer has results and statistics +cout << "Converged in " << final->iterations() << " iterations " + "with final error " << final->error() << endl; + +// The values are a const_shared_ptr (boost::shared_ptr) +Values::const_shared_ptr result = final->values(); + +// Use the results +useTheResult(result); +\endcode + * + * Example of setting parameters before optimization: + * \code +// Each derived optimizer type has its own parameters class, which inherits from NonlinearOptimizerParams +DoglegParams params; +params.factorization = DoglegParams::QR; +params.relativeErrorTol = 1e-3; +params.absoluteErrorTol = 1e-3; + +// Optimize +Values::const_shared_ptr result = DoglegOptimizer(graph, initialValues, params).optimized(); +\endcode + * + * This interface also exposes an iterate() method, which performs one + * iteration, returning a NonlinearOptimizer containing the adjusted variable + * assignment. The optimize() method simply calls iterate() multiple times, + * until the error changes less than a threshold. We expose iterate() so that + * you can easily control what happens between iterations, such as drawing or + * printing, moving points from behind the camera to in front, etc. + * + * To modify the graph, values, or parameters between iterations, call the + * update() functions, which preserve all other state (for example, the trust + * region size in DoglegOptimizer). Derived optimizer classes also have + * additional update methods, not in this abstract interface, for updating + * algorithm-specific state. + * + * For more flexibility, since all functions are virtual, you may override them + * in your own derived class. + */ +class NonlinearOptimizer { + +public: + + /** A shared pointer to this class */ + typedef boost::shared_ptr shared_ptr; + + /// @name Standard interface + /// @{ + + /** Optimize for the maximum-likelihood estimate, returning a new + * NonlinearOptimizer class containing the optimized variable assignments, + * which may be retrieved with values(). + * + * This function simply calls iterate() in a loop, checking for convergence + * with check_convergence(). For fine-grain control over the optimization + * process, you may call iterate() and check_convergence() yourself, and if + * needed modify the optimization state between iterations. + */ + virtual const Values& optimize() { defaultOptimize(); return values(); } + + double error() const { return _state().error; } + + unsigned int iterations() const { return _state().iterations; } + + const Values& values() const { return _state().values; } + + /// @} + + /// @name Advanced interface + /// @{ + + /** Virtual destructor */ + virtual ~NonlinearOptimizer() {} + + /** Perform a single iteration, returning a new NonlinearOptimizer class + * containing the updated variable assignments, which may be retrieved with + * values(). + */ + virtual void iterate() = 0; + + /// @} + +protected: + + NonlinearFactorGraph graph_; + + /** A default implementation of the optimization loop, which calls iterate() + * until checkConvergence returns true. + */ + void defaultOptimize(); + + virtual const NonlinearOptimizerState& _state() const = 0; + + virtual const NonlinearOptimizerParams& _params() const = 0; + + /** Constructor for initial construction of base classes. */ + NonlinearOptimizer(const NonlinearFactorGraph& graph) : graph_(graph) {} + +}; + +/** Check whether the relative error decrease is less than relativeErrorTreshold, + * the absolute error decrease is less than absoluteErrorTreshold, or + * the error itself is less than errorThreshold. + */ +bool checkConvergence(double relativeErrorTreshold, + double absoluteErrorTreshold, double errorThreshold, + double currentError, double newError, NonlinearOptimizerParams::Verbosity verbosity); } // gtsam - -/// @} - -#include diff --git a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h new file mode 100644 index 000000000..8f02059fc --- /dev/null +++ b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h @@ -0,0 +1,87 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file SuccessiveLinearizationOptimizer.h + * @brief + * @author Richard Roberts + * @date Apr 1, 2012 + */ + +#pragma once + +#include + +namespace gtsam { + +class SuccessiveLinearizationParams : public NonlinearOptimizerParams { +public: + /** See SuccessiveLinearizationParams::elimination */ + enum Elimination { + MULTIFRONTAL, + SEQUENTIAL + }; + + /** See SuccessiveLinearizationParams::factorization */ + enum Factorization { + CHOLESKY, + LDL, + QR, + }; + + Elimination elimination; ///< The elimination algorithm to use (default: MULTIFRONTAL) + Factorization factorization; ///< The numerical factorization (default: LDL) + boost::optional ordering; ///< The variable elimination ordering, or empty to use COLAMD (default: empty) + + SuccessiveLinearizationParams() : + elimination(MULTIFRONTAL), factorization(LDL) {} + + virtual ~SuccessiveLinearizationParams() {} + + virtual void print(const std::string& str = "") const { + NonlinearOptimizerParams::print(str); + if(elimination == MULTIFRONTAL) + std::cout << " elimination method: MULTIFRONTAL\n"; + else if(elimination == SEQUENTIAL) + std::cout << " elimination method: SEQUENTIAL\n"; + else + std::cout << " elimination method: (invalid)\n"; + + if(factorization == CHOLESKY) + std::cout << " factorization method: CHOLESKY\n"; + else if(factorization == LDL) + std::cout << " factorization method: LDL\n"; + else if(factorization == QR) + std::cout << " factorization method: QR\n"; + else + std::cout << " factorization method: (invalid)\n"; + + if(ordering) + std::cout << " ordering: custom\n"; + else + std::cout << " ordering: COLAMD\n"; + + std::cout.flush(); + } + + GaussianFactorGraph::Eliminate getEliminationFunction() const { + if(factorization == SuccessiveLinearizationParams::CHOLESKY) + return EliminatePreferCholesky; + else if(factorization == SuccessiveLinearizationParams::LDL) + return EliminatePreferLDL; + else if(factorization == SuccessiveLinearizationParams::QR) + return EliminateQR; + else + throw runtime_error("Nonlinear optimization parameter \"factorization\" is invalid"); + } +}; + +} /* namespace gtsam */ diff --git a/gtsam/nonlinear/Values-inl.h b/gtsam/nonlinear/Values-inl.h index a31e100ef..40e95188e 100644 --- a/gtsam/nonlinear/Values-inl.h +++ b/gtsam/nonlinear/Values-inl.h @@ -40,31 +40,6 @@ namespace gtsam { ValueCloneAllocator() {} }; -#if 0 - /* ************************************************************************* */ - class ValueAutomaticCasting { - Key key_; - const Value& value_; - - public: - ValueAutomaticCasting(Key key, const Value& value) : key_(key), value_(value) {} - - template - class ConvertibleToValue : public ValueType { - }; - - template - operator const ConvertibleToValue& () const { - // Check the type and throw exception if incorrect - if(typeid(ValueType) != typeid(value_)) - throw ValuesIncorrectType(key_, typeid(ValueType), typeid(value_)); - - // We have already checked the type, so do a "blind" static_cast, not dynamic_cast - return static_cast&>(value_); - } - }; -#endif - /* ************************************************************************* */ template const ValueType& Values::at(Key j) const { @@ -83,27 +58,6 @@ namespace gtsam { return static_cast(*item->second); } -#if 0 - /* ************************************************************************* */ - inline ValueAutomaticCasting Values::at(Key j) const { - // Find the item - KeyValueMap::const_iterator item = values_.find(j); - - // Throw exception if it does not exist - if(item == values_.end()) - throw ValuesKeyDoesNotExist("retrieve", j); - - return ValueAutomaticCasting(item->first, *item->second); - } -#endif - -#if 0 - /* ************************************************************************* */ - inline ValueAutomaticCasting Values::operator[](Key j) const { - return at(j); - } -#endif - /* ************************************************************************* */ template boost::optional Values::exists(Key j) const { diff --git a/gtsam/nonlinear/Values.h b/gtsam/nonlinear/Values.h index 5f6dad022..4af8e5a3e 100644 --- a/gtsam/nonlinear/Values.h +++ b/gtsam/nonlinear/Values.h @@ -84,6 +84,9 @@ namespace gtsam { /// A shared_ptr to this class typedef boost::shared_ptr shared_ptr; + /// A const shared_ptr to this class + typedef boost::shared_ptr const_shared_ptr; + /// A key-value pair, which you get by dereferencing iterators struct KeyValuePair { const Key key; ///< The key @@ -299,22 +302,6 @@ namespace gtsam { */ const Value& at(Key j) const; -#if 0 - /** Retrieve a variable by key \c j. This non-templated version returns a - * special ValueAutomaticCasting object that may be assigned to the proper - * value. - * @param j Retrieve the value associated with this key - * @return A ValueAutomaticCasting object that may be assignmed to a Value - * of the proper type. - */ - ValueAutomaticCasting at(Key j) const; -#endif - -#if 0 - /** operator[] syntax for at(Key j) */ - ValueAutomaticCasting operator[](Key j) const; -#endif - /** Check if a value exists with key \c j. See exists<>(Key j) * and exists(const TypedKey& j) for versions that return the value if it * exists. */ @@ -390,6 +377,9 @@ namespace gtsam { /** Replace all keys and variables */ Values& operator=(const Values& rhs); + /** Swap the contents of two Values without copying data */ + void swap(Values& other) { values_.swap(other.values_); } + /** Remove all variables from the config */ void clear() { values_.clear(); } diff --git a/gtsam/slam/planarSLAM.cpp b/gtsam/slam/planarSLAM.cpp index db3d6b146..5e38acbd2 100644 --- a/gtsam/slam/planarSLAM.cpp +++ b/gtsam/slam/planarSLAM.cpp @@ -17,8 +17,6 @@ #include #include -#include -#include // Use planarSLAM namespace for specific SLAM instance namespace planarSLAM { diff --git a/gtsam/slam/planarSLAM.h b/gtsam/slam/planarSLAM.h index 33dc97a8c..06994d8bd 100644 --- a/gtsam/slam/planarSLAM.h +++ b/gtsam/slam/planarSLAM.h @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include @@ -110,15 +110,10 @@ namespace planarSLAM { /// Optimize Values optimize(const Values& initialEstimate) { - typedef NonlinearOptimizer Optimizer; - return *Optimizer::optimizeLM(*this, initialEstimate, - NonlinearOptimizationParameters::LAMBDA); + return LevenbergMarquardtOptimizer(*this, initialEstimate).optimize(); } }; - /// Optimizer - typedef NonlinearOptimizer Optimizer; - } // planarSLAM diff --git a/gtsam/slam/pose2SLAM.cpp b/gtsam/slam/pose2SLAM.cpp index b006c61f2..a500ed22e 100644 --- a/gtsam/slam/pose2SLAM.cpp +++ b/gtsam/slam/pose2SLAM.cpp @@ -21,8 +21,6 @@ // Use pose2SLAM namespace for specific SLAM instance - template class gtsam::NonlinearOptimizer; - namespace pose2SLAM { /* ************************************************************************* */ diff --git a/gtsam/slam/pose2SLAM.h b/gtsam/slam/pose2SLAM.h index 121e0c062..42bd8758c 100644 --- a/gtsam/slam/pose2SLAM.h +++ b/gtsam/slam/pose2SLAM.h @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include @@ -97,20 +97,10 @@ namespace pose2SLAM { /// Optimize Values optimize(const Values& initialEstimate) { - typedef NonlinearOptimizer Optimizer; - return *Optimizer::optimizeLM(*this, initialEstimate, - NonlinearOptimizationParameters::LAMBDA); + return LevenbergMarquardtOptimizer(*this, initialEstimate).optimize(); } }; - /// The sequential optimizer - typedef NonlinearOptimizer OptimizerSequential; - - /// The multifrontal optimizer - typedef NonlinearOptimizer Optimizer; - } // pose2SLAM diff --git a/gtsam/slam/pose3SLAM.h b/gtsam/slam/pose3SLAM.h index eb4eb9573..c394d800b 100644 --- a/gtsam/slam/pose3SLAM.h +++ b/gtsam/slam/pose3SLAM.h @@ -61,9 +61,6 @@ namespace gtsam { void addHardConstraint(Index i, const Pose3& p); }; - /// Optimizer - typedef NonlinearOptimizer Optimizer; - } // pose3SLAM /** diff --git a/gtsam/slam/tests/testGeneralSFMFactor.cpp b/gtsam/slam/tests/testGeneralSFMFactor.cpp index 7a8dc4825..88f4dc65e 100644 --- a/gtsam/slam/tests/testGeneralSFMFactor.cpp +++ b/gtsam/slam/tests/testGeneralSFMFactor.cpp @@ -16,10 +16,12 @@ using namespace boost; #include #include #include -#include +#include #include #include #include +#include +#include using namespace std; using namespace gtsam; @@ -62,8 +64,6 @@ double getGaussian() return sqrt(-2.0f * (double)log(S) / S) * V1; } -typedef NonlinearOptimizer Optimizer; - const SharedNoiseModel sigma1(noiseModel::Unit::Create(1)); /* ************************************************************************* */ @@ -148,11 +148,11 @@ TEST( GeneralSFMFactor, optimize_defaultK ) { vector X = genCameraDefaultCalibration(); // add measurement with noise - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } @@ -160,26 +160,24 @@ TEST( GeneralSFMFactor, optimize_defaultK ) { // add initial const double noise = baseline*0.1; - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; for ( size_t i = 0 ; i < L.size() ; ++i ) { Point3 pt(L[i].x()+noise*getGaussian(), L[i].y()+noise*getGaussian(), L[i].z()+noise*getGaussian()); - values->insert(Symbol('l',i), pt) ; + values.insert(Symbol('l',i), pt) ; } - graph->addCameraConstraint(0, X[0]); + graph.addCameraConstraint(0, X[0]); // Create an ordering of the variables - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-5, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - EXPECT(optimizer2.error() < 0.5 * 1e-5 * nMeasurements); + Ordering ordering = *getOrdering(X,L); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * 1e-5 * nMeasurements); } /* ************************************************************************* */ @@ -187,11 +185,11 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) { vector L = genPoint3(); vector X = genCameraVariableCalibration(); // add measurement with noise - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } @@ -199,9 +197,9 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) { // add initial const double noise = baseline*0.1; - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; // add noise only to the first landmark for ( size_t i = 0 ; i < L.size() ; ++i ) { @@ -209,22 +207,20 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) { Point3 pt(L[i].x()+noise*getGaussian(), L[i].y()+noise*getGaussian(), L[i].z()+noise*getGaussian()); - values->insert(Symbol('l',i), pt) ; + values.insert(Symbol('l',i), pt) ; } else { - values->insert(Symbol('l',i), L[i]) ; + values.insert(Symbol('l',i), L[i]) ; } } - graph->addCameraConstraint(0, X[0]); + graph.addCameraConstraint(0, X[0]); const double reproj_error = 1e-5; - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-5, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements); + Ordering ordering = *getOrdering(X,L); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -235,40 +231,37 @@ TEST( GeneralSFMFactor, optimize_varK_FixCameras ) { // add measurement with noise const double noise = baseline*0.1; - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } const size_t nMeasurements = L.size()*X.size(); - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; for ( size_t i = 0 ; i < L.size() ; ++i ) { Point3 pt(L[i].x()+noise*getGaussian(), L[i].y()+noise*getGaussian(), L[i].z()+noise*getGaussian()); //Point3 pt(L[i].x(), L[i].y(), L[i].z()); - values->insert(Symbol('l',i), pt) ; + values.insert(Symbol('l',i), pt) ; } for ( size_t i = 0 ; i < X.size() ; ++i ) - graph->addCameraConstraint(i, X[i]); + graph.addCameraConstraint(i, X[i]); const double reproj_error = 1e-5 ; - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-3, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements); + Ordering ordering = *getOrdering(X,L); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -278,17 +271,17 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) { vector X = genCameraVariableCalibration(); // add measurement with noise - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } const size_t nMeasurements = L.size()*X.size(); - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) { const double rot_noise = 1e-5, @@ -296,7 +289,7 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) { focal_noise = 1, skew_noise = 1e-5; if ( i == 0 ) { - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; } else { @@ -307,29 +300,25 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) { skew_noise, // s trans_noise, trans_noise // ux, uy ) ; - values->insert(Symbol('x',i), X[i].retract(delta)) ; + values.insert(Symbol('x',i), X[i].retract(delta)) ; } } for ( size_t i = 0 ; i < L.size() ; ++i ) { - values->insert(Symbol('l',i), L[i]) ; + values.insert(Symbol('l',i), L[i]) ; } // fix X0 and all landmarks, allow only the X[1] to move - graph->addCameraConstraint(0, X[0]); + graph.addCameraConstraint(0, X[0]); for ( size_t i = 0 ; i < L.size() ; ++i ) - graph->addPoint3Constraint(i, L[i]); + graph.addPoint3Constraint(i, L[i]); const double reproj_error = 1e-5 ; - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-3, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - - EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements); + Ordering ordering = *getOrdering(X,L); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -338,11 +327,11 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) { vector X = genCameraVariableCalibration(); // add measurement with noise - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } @@ -350,28 +339,79 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) { // add initial const double noise = baseline*0.1; - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; // add noise only to the first landmark for ( size_t i = 0 ; i < L.size() ; ++i ) { Point3 pt(L[i].x()+noise*getGaussian(), L[i].y()+noise*getGaussian(), L[i].z()+noise*getGaussian()); - values->insert(Symbol('l',i), pt) ; + values.insert(Symbol('l',i), pt) ; } - graph->addCameraConstraint(0, X[0]); + // Constrain position of system with the first camera constrained to the origin + graph.addCameraConstraint(0, X[0]); + + // Constrain the scale of the problem with a soft range factor of 1m between the cameras + graph.add(RangeFactor(Symbol('x',0), Symbol('x',1), 2.0, sharedSigma(1, 10.0))); + const double reproj_error = 1e-5 ; - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-2, 1e-2, 0.0, 100, 1e-5, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); + Ordering ordering = *getOrdering(X,L); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements); +} - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements); +/* ************************************************************************* */ +TEST(GeneralSFMFactor, GeneralCameraPoseRange) { + // Tests range factor between a GeneralCamera and a Pose3 + Graph graph; + graph.addCameraConstraint(0, GeneralCamera()); + graph.add(RangeFactor(Symbol('x',0), Symbol('x',1), 2.0, sharedSigma(1, 1.0))); + graph.add(PriorFactor(Symbol('x',1), Pose3(Rot3(), Point3(1.0, 0.0, 0.0)), sharedSigma(6, 1.0))); + + Values init; + init.insert(Symbol('x',0), GeneralCamera()); + init.insert(Symbol('x',1), Pose3(Rot3(), Point3(1.0,1.0,1.0))); + + // The optimal value between the 2m range factor and 1m prior is 1.5m + Values expected; + expected.insert(Symbol('x',0), GeneralCamera()); + expected.insert(Symbol('x',1), Pose3(Rot3(), Point3(1.5,0.0,0.0))); + + LevenbergMarquardtParams params; + params.absoluteErrorTol = 1e-9; + params.relativeErrorTol = 1e-9; + Values actual = LevenbergMarquardtOptimizer(graph, init, params).optimize(); + + EXPECT(assert_equal(expected, actual, 1e-4)); +} + +/* ************************************************************************* */ +TEST(GeneralSFMFactor, CalibratedCameraPoseRange) { + // Tests range factor between a CalibratedCamera and a Pose3 + NonlinearFactorGraph graph; + graph.add(PriorFactor(Symbol('x',0), CalibratedCamera(), sharedSigma(6, 1.0))); + graph.add(RangeFactor(Symbol('x',0), Symbol('x',1), 2.0, sharedSigma(1, 1.0))); + graph.add(PriorFactor(Symbol('x',1), Pose3(Rot3(), Point3(1.0, 0.0, 0.0)), sharedSigma(6, 1.0))); + + Values init; + init.insert(Symbol('x',0), CalibratedCamera()); + init.insert(Symbol('x',1), Pose3(Rot3(), Point3(1.0,1.0,1.0))); + + Values expected; + expected.insert(Symbol('x',0), CalibratedCamera(Pose3(Rot3(), Point3(-0.333333333333, 0, 0)))); + expected.insert(Symbol('x',1), Pose3(Rot3(), Point3(1.333333333333, 0, 0))); + + LevenbergMarquardtParams params; + params.absoluteErrorTol = 1e-9; + params.relativeErrorTol = 1e-9; + Values actual = LevenbergMarquardtOptimizer(graph, init, params).optimize(); + + EXPECT(assert_equal(expected, actual, 1e-4)); } /* ************************************************************************* */ diff --git a/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp b/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp index f762505a3..8a6c3b575 100644 --- a/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp +++ b/gtsam/slam/tests/testGeneralSFMFactor_Cal3Bundler.cpp @@ -16,10 +16,11 @@ using namespace boost; #include #include #include -#include +#include #include #include #include +#include using namespace std; using namespace gtsam; @@ -63,8 +64,6 @@ double getGaussian() return sqrt(-2.0f * (double)log(S) / S) * V1; } -typedef NonlinearOptimizer Optimizer; - const SharedNoiseModel sigma1(noiseModel::Unit::Create(1)); /* ************************************************************************* */ @@ -150,11 +149,11 @@ TEST( GeneralSFMFactor, optimize_defaultK ) { vector X = genCameraDefaultCalibration(); // add measurement with noise - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } @@ -162,26 +161,24 @@ TEST( GeneralSFMFactor, optimize_defaultK ) { // add initial const double noise = baseline*0.1; - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; for ( size_t i = 0 ; i < L.size() ; ++i ) { Point3 pt(L[i].x()+noise*getGaussian(), L[i].y()+noise*getGaussian(), L[i].z()+noise*getGaussian()); - values->insert(Symbol('l',i), pt) ; + values.insert(Symbol('l',i), pt) ; } - graph->addCameraConstraint(0, X[0]); + graph.addCameraConstraint(0, X[0]); // Create an ordering of the variables - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-5, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - EXPECT(optimizer2.error() < 0.5 * 1e-5 * nMeasurements); + Ordering ordering = *getOrdering(X,L); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * 1e-5 * nMeasurements); } /* ************************************************************************* */ @@ -189,11 +186,11 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) { vector L = genPoint3(); vector X = genCameraVariableCalibration(); // add measurement with noise - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } @@ -201,9 +198,9 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) { // add initial const double noise = baseline*0.1; - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; // add noise only to the first landmark for ( size_t i = 0 ; i < L.size() ; ++i ) { @@ -211,22 +208,20 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) { Point3 pt(L[i].x()+noise*getGaussian(), L[i].y()+noise*getGaussian(), L[i].z()+noise*getGaussian()); - values->insert(Symbol('l',i), pt) ; + values.insert(Symbol('l',i), pt) ; } else { - values->insert(Symbol('l',i), L[i]) ; + values.insert(Symbol('l',i), L[i]) ; } } - graph->addCameraConstraint(0, X[0]); + graph.addCameraConstraint(0, X[0]); const double reproj_error = 1e-5; - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-5, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements); + Ordering ordering = *getOrdering(X,L); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -237,40 +232,37 @@ TEST( GeneralSFMFactor, optimize_varK_FixCameras ) { // add measurement with noise const double noise = baseline*0.1; - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } const size_t nMeasurements = L.size()*X.size(); - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; for ( size_t i = 0 ; i < L.size() ; ++i ) { Point3 pt(L[i].x()+noise*getGaussian(), L[i].y()+noise*getGaussian(), L[i].z()+noise*getGaussian()); //Point3 pt(L[i].x(), L[i].y(), L[i].z()); - values->insert(Symbol('l',i), pt) ; + values.insert(Symbol('l',i), pt) ; } for ( size_t i = 0 ; i < X.size() ; ++i ) - graph->addCameraConstraint(i, X[i]); + graph.addCameraConstraint(i, X[i]); const double reproj_error = 1e-5 ; - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-3, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements); + Ordering ordering = *getOrdering(X,L); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -280,23 +272,23 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) { vector X = genCameraVariableCalibration(); // add measurement with noise - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } const size_t nMeasurements = L.size()*X.size(); - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) { const double rot_noise = 1e-5, trans_noise = 1e-3, focal_noise = 1, distort_noise = 1e-3; if ( i == 0 ) { - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; } else { @@ -305,28 +297,25 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) { trans_noise, trans_noise, trans_noise, // translation focal_noise, distort_noise, distort_noise // f, k1, k2 ) ; - values->insert(Symbol('x',i), X[i].retract(delta)) ; + values.insert(Symbol('x',i), X[i].retract(delta)) ; } } for ( size_t i = 0 ; i < L.size() ; ++i ) { - values->insert(Symbol('l',i), L[i]) ; + values.insert(Symbol('l',i), L[i]) ; } // fix X0 and all landmarks, allow only the X[1] to move - graph->addCameraConstraint(0, X[0]); + graph.addCameraConstraint(0, X[0]); for ( size_t i = 0 ; i < L.size() ; ++i ) - graph->addPoint3Constraint(i, L[i]); + graph.addPoint3Constraint(i, L[i]); const double reproj_error = 1e-5 ; - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-3, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements); + Ordering ordering = *getOrdering(X,L); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ @@ -335,11 +324,11 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) { vector X = genCameraVariableCalibration(); // add measurement with noise - shared_ptr graph(new Graph()); + Graph graph; for ( size_t j = 0 ; j < X.size() ; ++j) { for ( size_t i = 0 ; i < L.size() ; ++i) { Point2 pt = X[j].project(L[i]) ; - graph->addMeasurement(j, i, pt, sigma1); + graph.addMeasurement(j, i, pt, sigma1); } } @@ -347,28 +336,30 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) { // add initial const double noise = baseline*0.1; - boost::shared_ptr values(new Values); + Values values; for ( size_t i = 0 ; i < X.size() ; ++i ) - values->insert(Symbol('x',i), X[i]) ; + values.insert(Symbol('x',i), X[i]) ; // add noise only to the first landmark for ( size_t i = 0 ; i < L.size() ; ++i ) { Point3 pt(L[i].x()+noise*getGaussian(), L[i].y()+noise*getGaussian(), L[i].z()+noise*getGaussian()); - values->insert(Symbol('l',i), pt) ; + values.insert(Symbol('l',i), pt) ; } - graph->addCameraConstraint(0, X[0]); + // Constrain position of system with the first camera constrained to the origin + graph.addCameraConstraint(0, X[0]); + + // Constrain the scale of the problem with a soft range factor of 1m between the cameras + graph.add(RangeFactor(Symbol('x',0), Symbol('x',1), 2.0, sharedSigma(1, 10.0))); + const double reproj_error = 1e-5 ; - shared_ptr ordering = getOrdering(X,L); - NonlinearOptimizationParameters::shared_ptr params ( - new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-5, 10, NonlinearOptimizationParameters::SILENT)); - Optimizer optimizer(graph, values, ordering, params); - - Optimizer optimizer2 = optimizer.levenbergMarquardt(); - EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements); + Ordering ordering = *getOrdering(X,L); + LevenbergMarquardtOptimizer optimizer(graph, values, ordering); + Values final = optimizer.optimize(); + EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements); } /* ************************************************************************* */ diff --git a/gtsam/slam/tests/testPose2SLAM.cpp b/gtsam/slam/tests/testPose2SLAM.cpp index 81ce20334..22c02ab52 100644 --- a/gtsam/slam/tests/testPose2SLAM.cpp +++ b/gtsam/slam/tests/testPose2SLAM.cpp @@ -148,29 +148,29 @@ TEST( Pose2SLAM, linearization ) TEST(Pose2Graph, optimize) { // create a Pose graph with one equality constraint and one measurement - shared_ptr fg(new pose2SLAM::Graph); - fg->addPoseConstraint(0, Pose2(0,0,0)); - fg->addOdometry(0, 1, Pose2(1,2,M_PI_2), covariance); + pose2SLAM::Graph fg; + fg.addPoseConstraint(0, Pose2(0,0,0)); + fg.addOdometry(0, 1, Pose2(1,2,M_PI_2), covariance); // Create initial config - boost::shared_ptr initial(new Values()); - initial->insert(pose2SLAM::PoseKey(0), Pose2(0,0,0)); - initial->insert(pose2SLAM::PoseKey(1), Pose2(0,0,0)); + Values initial; + initial.insert(pose2SLAM::PoseKey(0), Pose2(0,0,0)); + initial.insert(pose2SLAM::PoseKey(1), Pose2(0,0,0)); // Choose an ordering and optimize - shared_ptr ordering(new Ordering); - *ordering += kx0, kx1; - typedef NonlinearOptimizer Optimizer; + Ordering ordering; + ordering += kx0, kx1; - NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-15, 1e-15); - Optimizer optimizer0(fg, initial, ordering, params); - Optimizer optimizer = optimizer0.levenbergMarquardt(); + LevenbergMarquardtParams params; + params.relativeErrorTol = 1e-15; + params.ordering = ordering; + Values actual = LevenbergMarquardtOptimizer(fg, initial, params).optimize(); // Check with expected config Values expected; expected.insert(pose2SLAM::PoseKey(0), Pose2(0,0,0)); expected.insert(pose2SLAM::PoseKey(1), Pose2(1,2,M_PI_2)); - CHECK(assert_equal(expected, *optimizer.values())); + CHECK(assert_equal(expected, actual)); } /* ************************************************************************* */ @@ -182,29 +182,28 @@ TEST(Pose2Graph, optimizeThreePoses) { Pose2 p0 = hexagon.pose(0), p1 = hexagon.pose(1); // create a Pose graph with one equality constraint and one measurement - shared_ptr fg(new pose2SLAM::Graph); - fg->addPoseConstraint(0, p0); + pose2SLAM::Graph fg; + fg.addPoseConstraint(0, p0); Pose2 delta = p0.between(p1); - fg->addOdometry(0, 1, delta, covariance); - fg->addOdometry(1, 2, delta, covariance); - fg->addOdometry(2, 0, delta, covariance); + fg.addOdometry(0, 1, delta, covariance); + fg.addOdometry(1, 2, delta, covariance); + fg.addOdometry(2, 0, delta, covariance); // Create initial config - boost::shared_ptr initial(new pose2SLAM::Values()); - initial->insertPose(0, p0); - initial->insertPose(1, hexagon.pose(1).retract(Vector_(3,-0.1, 0.1,-0.1))); - initial->insertPose(2, hexagon.pose(2).retract(Vector_(3, 0.1,-0.1, 0.1))); + pose2SLAM::Values initial; + initial.insertPose(0, p0); + initial.insertPose(1, hexagon.pose(1).retract(Vector_(3,-0.1, 0.1,-0.1))); + initial.insertPose(2, hexagon.pose(2).retract(Vector_(3, 0.1,-0.1, 0.1))); // Choose an ordering - shared_ptr ordering(new Ordering); - *ordering += kx0,kx1,kx2; + Ordering ordering; + ordering += kx0,kx1,kx2; // optimize - NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-15, 1e-15); - pose2SLAM::Optimizer optimizer0(fg, initial, ordering, params); - pose2SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt(); - - Values actual = *optimizer.values(); + LevenbergMarquardtParams params; + params.relativeErrorTol = 1e-15; + params.ordering = ordering; + Values actual = LevenbergMarquardtOptimizer(fg, initial, params).optimize(); // Check with ground truth CHECK(assert_equal((const Values&)hexagon, actual)); @@ -219,35 +218,34 @@ TEST_UNSAFE(Pose2SLAM, optimizeCircle) { Pose2 p0 = hexagon.pose(0), p1 = hexagon.pose(1); // create a Pose graph with one equality constraint and one measurement - shared_ptr fg(new pose2SLAM::Graph); - fg->addPoseConstraint(0, p0); + pose2SLAM::Graph fg; + fg.addPoseConstraint(0, p0); Pose2 delta = p0.between(p1); - fg->addOdometry(0, 1, delta, covariance); - fg->addOdometry(1,2, delta, covariance); - fg->addOdometry(2,3, delta, covariance); - fg->addOdometry(3,4, delta, covariance); - fg->addOdometry(4,5, delta, covariance); - fg->addOdometry(5, 0, delta, covariance); + fg.addOdometry(0, 1, delta, covariance); + fg.addOdometry(1,2, delta, covariance); + fg.addOdometry(2,3, delta, covariance); + fg.addOdometry(3,4, delta, covariance); + fg.addOdometry(4,5, delta, covariance); + fg.addOdometry(5, 0, delta, covariance); // Create initial config - boost::shared_ptr initial(new pose2SLAM::Values()); - initial->insertPose(0, p0); - initial->insertPose(1, hexagon.pose(1).retract(Vector_(3,-0.1, 0.1,-0.1))); - initial->insertPose(2, hexagon.pose(2).retract(Vector_(3, 0.1,-0.1, 0.1))); - initial->insertPose(3, hexagon.pose(3).retract(Vector_(3,-0.1, 0.1,-0.1))); - initial->insertPose(4, hexagon.pose(4).retract(Vector_(3, 0.1,-0.1, 0.1))); - initial->insertPose(5, hexagon.pose(5).retract(Vector_(3,-0.1, 0.1,-0.1))); + pose2SLAM::Values initial; + initial.insertPose(0, p0); + initial.insertPose(1, hexagon.pose(1).retract(Vector_(3,-0.1, 0.1,-0.1))); + initial.insertPose(2, hexagon.pose(2).retract(Vector_(3, 0.1,-0.1, 0.1))); + initial.insertPose(3, hexagon.pose(3).retract(Vector_(3,-0.1, 0.1,-0.1))); + initial.insertPose(4, hexagon.pose(4).retract(Vector_(3, 0.1,-0.1, 0.1))); + initial.insertPose(5, hexagon.pose(5).retract(Vector_(3,-0.1, 0.1,-0.1))); // Choose an ordering - shared_ptr ordering(new Ordering); - *ordering += kx0,kx1,kx2,kx3,kx4,kx5; + Ordering ordering; + ordering += kx0,kx1,kx2,kx3,kx4,kx5; // optimize - NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-15, 1e-15); - pose2SLAM::Optimizer optimizer0(fg, initial, ordering, params); - pose2SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt(); - - Values actual = *optimizer.values(); + LevenbergMarquardtParams params; + params.relativeErrorTol = 1e-15; + params.ordering = ordering; + Values actual = LevenbergMarquardtOptimizer(fg, initial, params).optimize(); // Check with ground truth CHECK(assert_equal((const Values&)hexagon, actual)); diff --git a/gtsam/slam/tests/testPose3SLAM.cpp b/gtsam/slam/tests/testPose3SLAM.cpp index 2edf6c4f0..688dcc7b9 100644 --- a/gtsam/slam/tests/testPose3SLAM.cpp +++ b/gtsam/slam/tests/testPose3SLAM.cpp @@ -30,6 +30,7 @@ using namespace boost::assign; #include #include +#include using namespace std; using namespace gtsam; @@ -52,35 +53,32 @@ TEST(Pose3Graph, optimizeCircle) { Pose3 gT0 = hexagon.at(PoseKey(0)), gT1 = hexagon.at(PoseKey(1)); // create a Pose graph with one equality constraint and one measurement - shared_ptr fg(new Pose3Graph); - fg->addHardConstraint(0, gT0); + Pose3Graph fg; + fg.addHardConstraint(0, gT0); Pose3 _0T1 = gT0.between(gT1); // inv(gT0)*gT1 double theta = M_PI/3.0; CHECK(assert_equal(Pose3(Rot3::yaw(-theta),Point3(radius*sin(theta),-radius*cos(theta),0)),_0T1)); - fg->addConstraint(0,1, _0T1, covariance); - fg->addConstraint(1,2, _0T1, covariance); - fg->addConstraint(2,3, _0T1, covariance); - fg->addConstraint(3,4, _0T1, covariance); - fg->addConstraint(4,5, _0T1, covariance); - fg->addConstraint(5,0, _0T1, covariance); + fg.addConstraint(0,1, _0T1, covariance); + fg.addConstraint(1,2, _0T1, covariance); + fg.addConstraint(2,3, _0T1, covariance); + fg.addConstraint(3,4, _0T1, covariance); + fg.addConstraint(4,5, _0T1, covariance); + fg.addConstraint(5,0, _0T1, covariance); // Create initial config - boost::shared_ptr initial(new Values()); - initial->insert(PoseKey(0), gT0); - initial->insert(PoseKey(1), hexagon.at(PoseKey(1)).retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1))); - initial->insert(PoseKey(2), hexagon.at(PoseKey(2)).retract(Vector_(6, 0.1,-0.1, 0.1, 0.1,-0.1, 0.1))); - initial->insert(PoseKey(3), hexagon.at(PoseKey(3)).retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1))); - initial->insert(PoseKey(4), hexagon.at(PoseKey(4)).retract(Vector_(6, 0.1,-0.1, 0.1, 0.1,-0.1, 0.1))); - initial->insert(PoseKey(5), hexagon.at(PoseKey(5)).retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1))); + Values initial; + initial.insert(PoseKey(0), gT0); + initial.insert(PoseKey(1), hexagon.at(PoseKey(1)).retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1))); + initial.insert(PoseKey(2), hexagon.at(PoseKey(2)).retract(Vector_(6, 0.1,-0.1, 0.1, 0.1,-0.1, 0.1))); + initial.insert(PoseKey(3), hexagon.at(PoseKey(3)).retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1))); + initial.insert(PoseKey(4), hexagon.at(PoseKey(4)).retract(Vector_(6, 0.1,-0.1, 0.1, 0.1,-0.1, 0.1))); + initial.insert(PoseKey(5), hexagon.at(PoseKey(5)).retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1))); // Choose an ordering and optimize - shared_ptr ordering(new Ordering); - *ordering += kx0,kx1,kx2,kx3,kx4,kx5; - NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-15, 1e-15); - pose3SLAM::Optimizer optimizer0(fg, initial, ordering, params); - pose3SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt(); + Ordering ordering; + ordering += kx0,kx1,kx2,kx3,kx4,kx5; - Values actual = *optimizer.values(); + Values actual = LevenbergMarquardtOptimizer(fg, initial, ordering).optimize(); // Check with ground truth CHECK(assert_equal(hexagon, actual,1e-4)); @@ -115,7 +113,7 @@ TEST(Pose3Graph, partial_prior_height) { // linearization EXPECT_DOUBLES_EQUAL(2.0, height.error(values), tol); - Values actual = *pose3SLAM::Optimizer::optimizeLM(graph, values); + Values actual = LevenbergMarquardtOptimizer(graph, values).optimize(); EXPECT(assert_equal(expected, actual.at(key), tol)); EXPECT_DOUBLES_EQUAL(0.0, graph.error(actual), tol); } @@ -167,7 +165,7 @@ TEST(Pose3Graph, partial_prior_xy) { Values values; values.insert(key, init); - Values actual = *pose3SLAM::Optimizer::optimizeLM(graph, values); + Values actual = LevenbergMarquardtOptimizer(graph, values).optimize(); EXPECT(assert_equal(expected, actual.at(key), tol)); EXPECT_DOUBLES_EQUAL(0.0, graph.error(actual), tol); } diff --git a/gtsam/slam/tests/testStereoFactor.cpp b/gtsam/slam/tests/testStereoFactor.cpp index fc026fb35..25cd6e16f 100644 --- a/gtsam/slam/tests/testStereoFactor.cpp +++ b/gtsam/slam/tests/testStereoFactor.cpp @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include @@ -50,45 +50,34 @@ TEST( StereoFactor, singlePoint) { //Cal3_S2 K(625, 625, 0, 320, 240, 0.5); boost::shared_ptr K(new Cal3_S2Stereo(625, 625, 0, 320, 240, 0.5)); - boost::shared_ptr graph(new visualSLAM::Graph()); + NonlinearFactorGraph graph; - graph->add(visualSLAM::PoseConstraint(PoseKey(1),camera1)); + graph.add(visualSLAM::PoseConstraint(PoseKey(1),camera1)); StereoPoint2 z14(320,320.0-50, 240); // arguments: measurement, sigma, cam#, measurement #, K, baseline (m) - graph->add(visualSLAM::StereoFactor(z14,sigma, PoseKey(1), PointKey(1), K)); + graph.add(visualSLAM::StereoFactor(z14,sigma, PoseKey(1), PointKey(1), K)); // Create a configuration corresponding to the ground truth - boost::shared_ptr values(new Values()); - values->insert(PoseKey(1), camera1); // add camera at z=6.25m looking towards origin + Values values; + values.insert(PoseKey(1), camera1); // add camera at z=6.25m looking towards origin Point3 l1(0, 0, 0); - values->insert(PointKey(1), l1); // add point at origin; + values.insert(PointKey(1), l1); // add point at origin; - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values); - - typedef gtsam::NonlinearOptimizer Optimizer; // optimization engine for this domain - - double absoluteThreshold = 1e-9; - double relativeThreshold = 1e-5; - int maxIterations = 100; - NonlinearOptimizationParameters::verbosityLevel verbose = NonlinearOptimizationParameters::SILENT; - NonlinearOptimizationParameters parameters(absoluteThreshold, relativeThreshold, 0, - maxIterations, 1.0, 10, verbose, NonlinearOptimizationParameters::BOUNDED); - - Optimizer optimizer(graph, values, ordering, make_shared(parameters)); + GaussNewtonOptimizer optimizer(graph, values); // We expect the initial to be zero because config is the ground truth DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // Iterate once, and the config should not have changed - Optimizer afterOneIteration = optimizer.iterate(); - DOUBLES_EQUAL(0.0, afterOneIteration.error(), 1e-9); + optimizer.iterate(); + DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // Complete solution - Optimizer final = optimizer.gaussNewton(); + optimizer.optimize(); - DOUBLES_EQUAL(0.0, final.error(), 1e-6); + DOUBLES_EQUAL(0.0, optimizer.error(), 1e-6); } /* ************************************************************************* */ diff --git a/gtsam/slam/tests/testVSLAM.cpp b/gtsam/slam/tests/testVSLAM.cpp index 0188daa96..bb7623954 100644 --- a/gtsam/slam/tests/testVSLAM.cpp +++ b/gtsam/slam/tests/testVSLAM.cpp @@ -23,7 +23,7 @@ using namespace boost; #include -#include +#include #include using namespace std; @@ -81,37 +81,37 @@ visualSLAM::Graph testGraph() { TEST( Graph, optimizeLM) { // build a graph - shared_ptr graph(new visualSLAM::Graph(testGraph())); + visualSLAM::Graph graph(testGraph()); // add 3 landmark constraints - graph->addPointConstraint(1, landmark1); - graph->addPointConstraint(2, landmark2); - graph->addPointConstraint(3, landmark3); + graph.addPointConstraint(1, landmark1); + graph.addPointConstraint(2, landmark2); + graph.addPointConstraint(3, landmark3); // Create an initial values structure corresponding to the ground truth - boost::shared_ptr initialEstimate(new Values); - initialEstimate->insert(PoseKey(1), camera1); - initialEstimate->insert(PoseKey(2), camera2); - initialEstimate->insert(PointKey(1), landmark1); - initialEstimate->insert(PointKey(2), landmark2); - initialEstimate->insert(PointKey(3), landmark3); - initialEstimate->insert(PointKey(4), landmark4); + Values initialEstimate; + initialEstimate.insert(PoseKey(1), camera1); + initialEstimate.insert(PoseKey(2), camera2); + initialEstimate.insert(PointKey(1), landmark1); + initialEstimate.insert(PointKey(2), landmark2); + initialEstimate.insert(PointKey(3), landmark3); + initialEstimate.insert(PointKey(4), landmark4); // Create an ordering of the variables - shared_ptr ordering(new Ordering); - *ordering += PointKey(1),PointKey(2),PointKey(3),PointKey(4),PoseKey(1),PoseKey(2); + Ordering ordering; + ordering += PointKey(1),PointKey(2),PointKey(3),PointKey(4),PoseKey(1),PoseKey(2); // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - visualSLAM::Optimizer optimizer(graph, initialEstimate, ordering); + LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, ordering); DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // Iterate once, and the config should not have changed because we started // with the ground truth - visualSLAM::Optimizer afterOneIteration = optimizer.iterate(); + optimizer.iterate(); DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // check if correct - CHECK(assert_equal(*initialEstimate,*(afterOneIteration.values()))); + CHECK(assert_equal(initialEstimate, optimizer.values())); } @@ -119,36 +119,36 @@ TEST( Graph, optimizeLM) TEST( Graph, optimizeLM2) { // build a graph - shared_ptr graph(new visualSLAM::Graph(testGraph())); + visualSLAM::Graph graph(testGraph()); // add 2 camera constraints - graph->addPoseConstraint(1, camera1); - graph->addPoseConstraint(2, camera2); + graph.addPoseConstraint(1, camera1); + graph.addPoseConstraint(2, camera2); // Create an initial values structure corresponding to the ground truth - boost::shared_ptr initialEstimate(new Values); - initialEstimate->insert(PoseKey(1), camera1); - initialEstimate->insert(PoseKey(2), camera2); - initialEstimate->insert(PointKey(1), landmark1); - initialEstimate->insert(PointKey(2), landmark2); - initialEstimate->insert(PointKey(3), landmark3); - initialEstimate->insert(PointKey(4), landmark4); + Values initialEstimate; + initialEstimate.insert(PoseKey(1), camera1); + initialEstimate.insert(PoseKey(2), camera2); + initialEstimate.insert(PointKey(1), landmark1); + initialEstimate.insert(PointKey(2), landmark2); + initialEstimate.insert(PointKey(3), landmark3); + initialEstimate.insert(PointKey(4), landmark4); // Create an ordering of the variables - shared_ptr ordering(new Ordering); - *ordering += PointKey(1),PointKey(2),PointKey(3),PointKey(4),PoseKey(1),PoseKey(2); + Ordering ordering; + ordering += PointKey(1),PointKey(2),PointKey(3),PointKey(4),PoseKey(1),PoseKey(2); // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - visualSLAM::Optimizer optimizer(graph, initialEstimate, ordering); + LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, ordering); DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // Iterate once, and the config should not have changed because we started // with the ground truth - visualSLAM::Optimizer afterOneIteration = optimizer.iterate(); + optimizer.iterate(); DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // check if correct - CHECK(assert_equal(*initialEstimate,*(afterOneIteration.values()))); + CHECK(assert_equal(initialEstimate, optimizer.values())); } @@ -156,34 +156,32 @@ TEST( Graph, optimizeLM2) TEST( Graph, CHECK_ORDERING) { // build a graph - shared_ptr graph(new visualSLAM::Graph(testGraph())); + visualSLAM::Graph graph = testGraph(); // add 2 camera constraints - graph->addPoseConstraint(1, camera1); - graph->addPoseConstraint(2, camera2); + graph.addPoseConstraint(1, camera1); + graph.addPoseConstraint(2, camera2); // Create an initial values structure corresponding to the ground truth - boost::shared_ptr initialEstimate(new Values); - initialEstimate->insert(PoseKey(1), camera1); - initialEstimate->insert(PoseKey(2), camera2); - initialEstimate->insert(PointKey(1), landmark1); - initialEstimate->insert(PointKey(2), landmark2); - initialEstimate->insert(PointKey(3), landmark3); - initialEstimate->insert(PointKey(4), landmark4); - - Ordering::shared_ptr ordering = graph->orderingCOLAMD(*initialEstimate); + Values initialEstimate; + initialEstimate.insert(PoseKey(1), camera1); + initialEstimate.insert(PoseKey(2), camera2); + initialEstimate.insert(PointKey(1), landmark1); + initialEstimate.insert(PointKey(2), landmark2); + initialEstimate.insert(PointKey(3), landmark3); + initialEstimate.insert(PointKey(4), landmark4); // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - visualSLAM::Optimizer optimizer(graph, initialEstimate, ordering); + LevenbergMarquardtOptimizer optimizer(graph, initialEstimate); DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // Iterate once, and the config should not have changed because we started // with the ground truth - visualSLAM::Optimizer afterOneIteration = optimizer.iterate(); + optimizer.iterate(); DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // check if correct - CHECK(assert_equal(*initialEstimate,*(afterOneIteration.values()))); + CHECK(assert_equal(initialEstimate, optimizer.values())); } /* ************************************************************************* */ diff --git a/gtsam/slam/visualSLAM.cpp b/gtsam/slam/visualSLAM.cpp index 67e77dc15..f6512e843 100644 --- a/gtsam/slam/visualSLAM.cpp +++ b/gtsam/slam/visualSLAM.cpp @@ -16,7 +16,6 @@ */ #include -#include #include namespace gtsam { diff --git a/gtsam/slam/visualSLAM.h b/gtsam/slam/visualSLAM.h index 5416e2142..d9be45eb3 100644 --- a/gtsam/slam/visualSLAM.h +++ b/gtsam/slam/visualSLAM.h @@ -145,7 +145,4 @@ namespace visualSLAM { }; // Graph - /// typedef for Optimizer. The current default will use the multi-frontal solver - typedef NonlinearOptimizer Optimizer; - } // namespaces diff --git a/tests/testBoundingConstraint.cpp b/tests/testBoundingConstraint.cpp index 09baec6e1..e1b685fc8 100644 --- a/tests/testBoundingConstraint.cpp +++ b/tests/testBoundingConstraint.cpp @@ -19,7 +19,7 @@ #include #include -#include +#include namespace iq2D = simulated2D::inequality_constraints; using namespace std; @@ -31,11 +31,6 @@ SharedDiagonal soft_model2 = noiseModel::Unit::Create(2); SharedDiagonal soft_model2_alt = noiseModel::Isotropic::Sigma(2, 0.1); SharedDiagonal hard_model1 = noiseModel::Constrained::All(1); -typedef NonlinearFactorGraph Graph; -typedef boost::shared_ptr shared_graph; -typedef boost::shared_ptr shared_values; -typedef NonlinearOptimizer Optimizer; - // some simple inequality constraints Symbol key(simulated2D::PoseKey(1)); double mu = 10.0; @@ -150,19 +145,19 @@ TEST( testBoundingConstraint, unary_simple_optimization1) { Point2 goal_pt(1.0, 2.0); Point2 start_pt(0.0, 1.0); - shared_graph graph(new Graph()); + NonlinearFactorGraph graph; Symbol x1('x',1); - graph->add(iq2D::PoseXInequality(x1, 1.0, true)); - graph->add(iq2D::PoseYInequality(x1, 2.0, true)); - graph->add(simulated2D::Prior(start_pt, soft_model2, x1)); + graph.add(iq2D::PoseXInequality(x1, 1.0, true)); + graph.add(iq2D::PoseYInequality(x1, 2.0, true)); + graph.add(simulated2D::Prior(start_pt, soft_model2, x1)); - shared_values initValues(new Values()); - initValues->insert(x1, start_pt); + Values initValues; + initValues.insert(x1, start_pt); - Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initValues); + Values actual = LevenbergMarquardtOptimizer(graph, initValues).optimize(); Values expected; expected.insert(x1, goal_pt); - CHECK(assert_equal(expected, *actual, tol)); + CHECK(assert_equal(expected, actual, tol)); } /* ************************************************************************* */ @@ -172,19 +167,19 @@ TEST( testBoundingConstraint, unary_simple_optimization2) { Point2 goal_pt(1.0, 2.0); Point2 start_pt(2.0, 3.0); - shared_graph graph(new Graph()); + NonlinearFactorGraph graph; Symbol x1('x',1); - graph->add(iq2D::PoseXInequality(x1, 1.0, false)); - graph->add(iq2D::PoseYInequality(x1, 2.0, false)); - graph->add(simulated2D::Prior(start_pt, soft_model2, x1)); + graph.add(iq2D::PoseXInequality(x1, 1.0, false)); + graph.add(iq2D::PoseYInequality(x1, 2.0, false)); + graph.add(simulated2D::Prior(start_pt, soft_model2, x1)); - shared_values initValues(new Values()); - initValues->insert(x1, start_pt); + Values initValues; + initValues.insert(x1, start_pt); - Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initValues); + Values actual = LevenbergMarquardtOptimizer(graph, initValues).optimize(); Values expected; expected.insert(x1, goal_pt); - CHECK(assert_equal(expected, *actual, tol)); + CHECK(assert_equal(expected, actual, tol)); } /* ************************************************************************* */ @@ -233,7 +228,7 @@ TEST( testBoundingConstraint, MaxDistance_simple_optimization) { Point2 pt1, pt2_init(5.0, 0.0), pt2_goal(2.0, 0.0); Symbol x1('x',1), x2('x',2); - Graph graph; + NonlinearFactorGraph graph; graph.add(simulated2D::equality_constraints::UnaryEqualityConstraint(pt1, x1)); graph.add(simulated2D::Prior(pt2_init, soft_model2_alt, x2)); graph.add(iq2D::PoseMaxDistConstraint(x1, x2, 2.0)); @@ -259,7 +254,7 @@ TEST( testBoundingConstraint, avoid_demo) { Point2 x1_pt, x2_init(2.0, 0.5), x2_goal(2.0, 1.0), x3_pt(4.0, 0.0), l1_pt(2.0, 0.0); Point2 odo(2.0, 0.0); - Graph graph; + NonlinearFactorGraph graph; graph.add(simulated2D::equality_constraints::UnaryEqualityConstraint(x1_pt, x1)); graph.add(simulated2D::Odometry(odo, soft_model2_alt, x1, x2)); graph.add(iq2D::LandmarkAvoid(x2, l1, radius)); diff --git a/tests/testInferenceB.cpp b/tests/testInferenceB.cpp index a4aa37b10..6ad070698 100644 --- a/tests/testInferenceB.cpp +++ b/tests/testInferenceB.cpp @@ -18,6 +18,7 @@ #include #include +#include #include #include diff --git a/tests/testMarginals.cpp b/tests/testMarginals.cpp new file mode 100644 index 000000000..a91e6b576 --- /dev/null +++ b/tests/testMarginals.cpp @@ -0,0 +1,187 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testMarginals.cpp + * @brief + * @author Richard Roberts + * @date May 14, 2012 + */ + +#include + +// for all nonlinear keys +#include + +// for points and poses +#include +#include + +// for modeling measurement uncertainty - all models included here +#include + +// add in headers for specific factors +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; + +TEST(Marginals, planarSLAMmarginals) { + + // Taken from PlanarSLAMSelfContained_advanced + + // create keys for variables + Symbol x1('x',1), x2('x',2), x3('x',3); + Symbol l1('l',1), l2('l',2); + + // create graph container and add factors to it + NonlinearFactorGraph graph; + + /* add prior */ + // gaussian for prior + SharedDiagonal prior_model = noiseModel::Diagonal::Sigmas(Vector_(3, 0.3, 0.3, 0.1)); + Pose2 prior_measurement(0.0, 0.0, 0.0); // prior at origin + graph.add(PriorFactor(x1, prior_measurement, prior_model)); // add the factor to the graph + + /* add odometry */ + // general noisemodel for odometry + SharedDiagonal odom_model = noiseModel::Diagonal::Sigmas(Vector_(3, 0.2, 0.2, 0.1)); + Pose2 odom_measurement(2.0, 0.0, 0.0); // create a measurement for both factors (the same in this case) + // create between factors to represent odometry + graph.add(BetweenFactor(x1, x2, odom_measurement, odom_model)); + graph.add(BetweenFactor(x2, x3, odom_measurement, odom_model)); + + /* add measurements */ + // general noisemodel for measurements + SharedDiagonal meas_model = noiseModel::Diagonal::Sigmas(Vector_(2, 0.1, 0.2)); + + // create the measurement values - indices are (pose id, landmark id) + Rot2 bearing11 = Rot2::fromDegrees(45), + bearing21 = Rot2::fromDegrees(90), + bearing32 = Rot2::fromDegrees(90); + double range11 = sqrt(4+4), + range21 = 2.0, + range32 = 2.0; + + // create bearing/range factors + graph.add(BearingRangeFactor(x1, l1, bearing11, range11, meas_model)); + graph.add(BearingRangeFactor(x2, l1, bearing21, range21, meas_model)); + graph.add(BearingRangeFactor(x3, l2, bearing32, range32, meas_model)); + + // linearization point for marginals + Values soln; + soln.insert(x1, Pose2(0.0, 0.0, 0.0)); + soln.insert(x2, Pose2(2.0, 0.0, 0.0)); + soln.insert(x3, Pose2(4.0, 0.0, 0.0)); + soln.insert(l1, Point2(2.0, 2.0)); + soln.insert(l2, Point2(4.0, 2.0)); + + Matrix expectedx1(3,3); + expectedx1 << + 0.09, -7.1942452e-18, -1.27897692e-17, + -7.1942452e-18, 0.09, 1.27897692e-17, + -1.27897692e-17, 1.27897692e-17, 0.01; + Matrix expectedx2(3,3); + expectedx2 << + 0.120967742, -0.00129032258, 0.00451612903, + -0.00129032258, 0.158387097, 0.0206451613, + 0.00451612903, 0.0206451613, 0.0177419355; + Matrix expectedx3(3,3); + expectedx3 << + 0.160967742, 0.00774193548, 0.00451612903, + 0.00774193548, 0.351935484, 0.0561290323, + 0.00451612903, 0.0561290323, 0.0277419355; + Matrix expectedl1(2,2); + expectedl1 << + 0.168709677, -0.0477419355, + -0.0477419355, 0.163548387; + Matrix expectedl2(2,2); + expectedl2 << + 0.293870968, -0.104516129, + -0.104516129, 0.391935484; + + // Check marginals covariances for all variables (Cholesky mode) + Marginals marginals(graph, soln, Marginals::CHOLESKY); + EXPECT(assert_equal(expectedx1, marginals.marginalCovariance(x1), 1e-8)); + EXPECT(assert_equal(expectedx2, marginals.marginalCovariance(x2), 1e-8)); + EXPECT(assert_equal(expectedx3, marginals.marginalCovariance(x3), 1e-8)); + EXPECT(assert_equal(expectedl1, marginals.marginalCovariance(l1), 1e-8)); + EXPECT(assert_equal(expectedl2, marginals.marginalCovariance(l2), 1e-8)); + + // Check marginals covariances for all variables (QR mode) + marginals = Marginals(graph, soln, Marginals::QR); + EXPECT(assert_equal(expectedx1, marginals.marginalCovariance(x1), 1e-8)); + EXPECT(assert_equal(expectedx2, marginals.marginalCovariance(x2), 1e-8)); + EXPECT(assert_equal(expectedx3, marginals.marginalCovariance(x3), 1e-8)); + EXPECT(assert_equal(expectedl1, marginals.marginalCovariance(l1), 1e-8)); + EXPECT(assert_equal(expectedl2, marginals.marginalCovariance(l2), 1e-8)); + + // Check joint marginals for 3 variables + Matrix expected_l2x1x3(8,8); + expected_l2x1x3 << + 0.293871159514111, -0.104516127560770, 0.090000180000270, -0.000000000000000, -0.020000000000000, 0.151935669757191, -0.104516127560770, -0.050967744878460, + -0.104516127560770, 0.391935664055174, 0.000000000000000, 0.090000180000270, 0.040000000000000, 0.007741936219615, 0.351935664055174, 0.056129031890193, + 0.090000180000270, 0.000000000000000, 0.090000180000270, -0.000000000000000, 0.000000000000000, 0.090000180000270, 0.000000000000000, 0.000000000000000, + -0.000000000000000, 0.090000180000270, -0.000000000000000, 0.090000180000270, 0.000000000000000, -0.000000000000000, 0.090000180000270, 0.000000000000000, + -0.020000000000000, 0.040000000000000, 0.000000000000000, 0.000000000000000, 0.010000000000000, 0.000000000000000, 0.040000000000000, 0.010000000000000, + 0.151935669757191, 0.007741936219615, 0.090000180000270, -0.000000000000000, 0.000000000000000, 0.160967924878730, 0.007741936219615, 0.004516127560770, + -0.104516127560770, 0.351935664055174, 0.000000000000000, 0.090000180000270, 0.040000000000000, 0.007741936219615, 0.351935664055174, 0.056129031890193, + -0.050967744878460, 0.056129031890193, 0.000000000000000, 0.000000000000000, 0.010000000000000, 0.004516127560770, 0.056129031890193, 0.027741936219615; + vector variables(3); + variables[0] = l2; + variables[1] = x1; + variables[2] = x3; + JointMarginal joint_l2x1x3 = marginals.jointMarginalCovariance(variables); + EXPECT(assert_equal(Matrix(expected_l2x1x3.block(0,0,2,2)), Matrix(joint_l2x1x3(l2,l2)), 1e-6)); + EXPECT(assert_equal(Matrix(expected_l2x1x3.block(2,0,3,2)), Matrix(joint_l2x1x3(x1,l2)), 1e-6)); + EXPECT(assert_equal(Matrix(expected_l2x1x3.block(5,0,3,2)), Matrix(joint_l2x1x3(x3,l2)), 1e-6)); + + EXPECT(assert_equal(Matrix(expected_l2x1x3.block(0,2,2,3)), Matrix(joint_l2x1x3(l2,x1)), 1e-6)); + EXPECT(assert_equal(Matrix(expected_l2x1x3.block(2,2,3,3)), Matrix(joint_l2x1x3(x1,x1)), 1e-6)); + EXPECT(assert_equal(Matrix(expected_l2x1x3.block(5,2,3,3)), Matrix(joint_l2x1x3(x3,x1)), 1e-6)); + + EXPECT(assert_equal(Matrix(expected_l2x1x3.block(0,5,2,3)), Matrix(joint_l2x1x3(l2,x3)), 1e-6)); + EXPECT(assert_equal(Matrix(expected_l2x1x3.block(2,5,3,3)), Matrix(joint_l2x1x3(x1,x3)), 1e-6)); + EXPECT(assert_equal(Matrix(expected_l2x1x3.block(5,5,3,3)), Matrix(joint_l2x1x3(x3,x3)), 1e-6)); + + // Check joint marginals for 2 variables (different code path than >2 variable case above) + Matrix expected_l2x1(5,5); + expected_l2x1 << + 0.293871159514111, -0.104516127560770, 0.090000180000270, -0.000000000000000, -0.020000000000000, + -0.104516127560770, 0.391935664055174, 0.000000000000000, 0.090000180000270, 0.040000000000000, + 0.090000180000270, 0.000000000000000, 0.090000180000270, -0.000000000000000, 0.000000000000000, + -0.000000000000000, 0.090000180000270, -0.000000000000000, 0.090000180000270, 0.000000000000000, + -0.020000000000000, 0.040000000000000, 0.000000000000000, 0.000000000000000, 0.010000000000000; + variables.resize(2); + variables[0] = l2; + variables[1] = x1; + JointMarginal joint_l2x1 = marginals.jointMarginalCovariance(variables); + EXPECT(assert_equal(Matrix(expected_l2x1.block(0,0,2,2)), Matrix(joint_l2x1(l2,l2)), 1e-6)); + EXPECT(assert_equal(Matrix(expected_l2x1.block(2,0,3,2)), Matrix(joint_l2x1(x1,l2)), 1e-6)); + EXPECT(assert_equal(Matrix(expected_l2x1.block(0,2,2,3)), Matrix(joint_l2x1(l2,x1)), 1e-6)); + EXPECT(assert_equal(Matrix(expected_l2x1.block(2,2,3,3)), Matrix(joint_l2x1(x1,x1)), 1e-6)); + + // Check joint marginal for 1 variable (different code path than >1 variable cases above) + variables.resize(1); + variables[0] = x1; + JointMarginal joint_x1 = marginals.jointMarginalCovariance(variables); + EXPECT(assert_equal(expectedx1, Matrix(joint_l2x1(x1,x1)), 1e-6)); +} + + + +/* ************************************************************************* */ +int main() { TestResult tr; return TestRegistry::runAllTests(tr);} +/* ************************************************************************* */ diff --git a/tests/testNonlinearEquality.cpp b/tests/testNonlinearEquality.cpp index 2ceb066e2..94ab58831 100644 --- a/tests/testNonlinearEquality.cpp +++ b/tests/testNonlinearEquality.cpp @@ -22,7 +22,7 @@ #include #include #include -#include +#include using namespace std; using namespace gtsam; @@ -35,9 +35,6 @@ typedef PriorFactor PosePrior; typedef NonlinearEquality PoseNLE; typedef boost::shared_ptr shared_poseNLE; -typedef NonlinearFactorGraph PoseGraph; -typedef NonlinearOptimizer PoseOptimizer; - Symbol key('x',1); /* ************************************************************************* */ @@ -188,25 +185,23 @@ TEST ( NonlinearEquality, allow_error_optimize ) { PoseNLE nle(key1, feasible1, error_gain); // add to a graph - boost::shared_ptr graph(new PoseGraph()); - graph->add(nle); + NonlinearFactorGraph graph; + graph.add(nle); // initialize away from the ideal Pose2 initPose(0.0, 2.0, 3.0); - boost::shared_ptr init(new Values()); - init->insert(key1, initPose); + Values init; + init.insert(key1, initPose); // optimize - boost::shared_ptr ord(new Ordering()); - ord->push_back(key1); - NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-5, 1e-5); - PoseOptimizer optimizer(graph, init, ord, params); - PoseOptimizer result = optimizer.levenbergMarquardt(); + Ordering ordering; + ordering.push_back(key1); + Values result = LevenbergMarquardtOptimizer(graph, init, ordering).optimize(); // verify Values expected; expected.insert(key1, feasible1); - EXPECT(assert_equal(expected, *result.values())); + EXPECT(assert_equal(expected, result)); } /* ************************************************************************* */ @@ -217,9 +212,9 @@ TEST ( NonlinearEquality, allow_error_optimize_with_factors ) { Pose2 feasible1(1.0, 2.0, 3.0); // initialize away from the ideal - boost::shared_ptr init(new Values()); + Values init; Pose2 initPose(0.0, 2.0, 3.0); - init->insert(key1, initPose); + init.insert(key1, initPose); double error_gain = 500.0; PoseNLE nle(key1, feasible1, error_gain); @@ -228,32 +223,25 @@ TEST ( NonlinearEquality, allow_error_optimize_with_factors ) { PosePrior prior(key1, initPose, noiseModel::Isotropic::Sigma(3, 0.1)); // add to a graph - boost::shared_ptr graph(new PoseGraph()); - graph->add(nle); - graph->add(prior); + NonlinearFactorGraph graph; + graph.add(nle); + graph.add(prior); // optimize - boost::shared_ptr ord(new Ordering()); - ord->push_back(key1); - NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-5, 1e-5); - PoseOptimizer optimizer(graph, init, ord, params); - PoseOptimizer result = optimizer.levenbergMarquardt(); + Ordering ordering; + ordering.push_back(key1); + Values actual = LevenbergMarquardtOptimizer(graph, init, ordering).optimize(); // verify Values expected; expected.insert(key1, feasible1); - EXPECT(assert_equal(expected, *result.values())); + EXPECT(assert_equal(expected, actual)); } /* ************************************************************************* */ SharedDiagonal hard_model = noiseModel::Constrained::All(2); SharedDiagonal soft_model = noiseModel::Isotropic::Sigma(2, 1.0); -typedef NonlinearFactorGraph Graph; -typedef boost::shared_ptr shared_graph; -typedef boost::shared_ptr shared_values; -typedef NonlinearOptimizer Optimizer; - /* ************************************************************************* */ TEST( testNonlinearEqualityConstraint, unary_basics ) { Point2 pt(1.0, 2.0); @@ -314,23 +302,23 @@ TEST( testNonlinearEqualityConstraint, unary_simple_optimization ) { simulated2D::Prior::shared_ptr factor( new simulated2D::Prior(badPt, soft_model, key)); - shared_graph graph(new Graph()); - graph->push_back(constraint); - graph->push_back(factor); + NonlinearFactorGraph graph; + graph.push_back(constraint); + graph.push_back(factor); - shared_values initValues(new Values()); - initValues->insert(key, badPt); + Values initValues; + initValues.insert(key, badPt); // verify error values - EXPECT(constraint->active(*initValues)); + EXPECT(constraint->active(initValues)); Values expected; expected.insert(key, truth_pt); EXPECT(constraint->active(expected)); EXPECT_DOUBLES_EQUAL(0.0, constraint->error(expected), tol); - Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initValues); - EXPECT(assert_equal(expected, *actual, tol)); + Values actual = LevenbergMarquardtOptimizer(graph, initValues).optimize(); + EXPECT(assert_equal(expected, actual, tol)); } /* ************************************************************************* */ @@ -411,20 +399,20 @@ TEST( testNonlinearEqualityConstraint, odo_simple_optimize ) { new eq2D::OdoEqualityConstraint( truth_pt1.between(truth_pt2), key1, key2)); - shared_graph graph(new Graph()); - graph->push_back(constraint1); - graph->push_back(constraint2); - graph->push_back(factor); + NonlinearFactorGraph graph; + graph.push_back(constraint1); + graph.push_back(constraint2); + graph.push_back(factor); - shared_values initValues(new Values()); - initValues->insert(key1, Point2()); - initValues->insert(key2, badPt); + Values initValues; + initValues.insert(key1, Point2()); + initValues.insert(key2, badPt); - Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initValues); + Values actual = LevenbergMarquardtOptimizer(graph, initValues).optimize(); Values expected; expected.insert(key1, truth_pt1); expected.insert(key2, truth_pt2); - CHECK(assert_equal(expected, *actual, tol)); + CHECK(assert_equal(expected, actual, tol)); } /* ********************************************************************* */ @@ -435,44 +423,44 @@ TEST (testNonlinearEqualityConstraint, two_pose ) { * constrained to a particular value */ - shared_graph graph(new Graph()); + NonlinearFactorGraph graph; Symbol x1('x',1), x2('x',2); Symbol l1('l',1), l2('l',2); Point2 pt_x1(1.0, 1.0), pt_x2(5.0, 6.0); - graph->add(eq2D::UnaryEqualityConstraint(pt_x1, x1)); - graph->add(eq2D::UnaryEqualityConstraint(pt_x2, x2)); + graph.add(eq2D::UnaryEqualityConstraint(pt_x1, x1)); + graph.add(eq2D::UnaryEqualityConstraint(pt_x2, x2)); Point2 z1(0.0, 5.0); SharedNoiseModel sigma(noiseModel::Isotropic::Sigma(2, 0.1)); - graph->add(simulated2D::Measurement(z1, sigma, x1,l1)); + graph.add(simulated2D::Measurement(z1, sigma, x1,l1)); Point2 z2(-4.0, 0.0); - graph->add(simulated2D::Measurement(z2, sigma, x2,l2)); + graph.add(simulated2D::Measurement(z2, sigma, x2,l2)); - graph->add(eq2D::PointEqualityConstraint(l1, l2)); + graph.add(eq2D::PointEqualityConstraint(l1, l2)); - shared_values initialEstimate(new Values()); - initialEstimate->insert(x1, pt_x1); - initialEstimate->insert(x2, Point2()); - initialEstimate->insert(l1, Point2(1.0, 6.0)); // ground truth - initialEstimate->insert(l2, Point2(-4.0, 0.0)); // starting with a separate reference frame + Values initialEstimate; + initialEstimate.insert(x1, pt_x1); + initialEstimate.insert(x2, Point2()); + initialEstimate.insert(l1, Point2(1.0, 6.0)); // ground truth + initialEstimate.insert(l2, Point2(-4.0, 0.0)); // starting with a separate reference frame - Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initialEstimate); + Values actual = LevenbergMarquardtOptimizer(graph, initialEstimate).optimize(); Values expected; expected.insert(x1, pt_x1); expected.insert(l1, Point2(1.0, 6.0)); expected.insert(l2, Point2(1.0, 6.0)); expected.insert(x2, Point2(5.0, 6.0)); - CHECK(assert_equal(expected, *actual, 1e-5)); + CHECK(assert_equal(expected, actual, 1e-5)); } /* ********************************************************************* */ TEST (testNonlinearEqualityConstraint, map_warp ) { // get a graph - shared_graph graph(new Graph()); + NonlinearFactorGraph graph; // keys Symbol x1('x',1), x2('x',2); @@ -480,37 +468,37 @@ TEST (testNonlinearEqualityConstraint, map_warp ) { // constant constraint on x1 Point2 pose1(1.0, 1.0); - graph->add(eq2D::UnaryEqualityConstraint(pose1, x1)); + graph.add(eq2D::UnaryEqualityConstraint(pose1, x1)); SharedDiagonal sigma = noiseModel::Isotropic::Sigma(1,0.1); // measurement from x1 to l1 Point2 z1(0.0, 5.0); - graph->add(simulated2D::Measurement(z1, sigma, x1, l1)); + graph.add(simulated2D::Measurement(z1, sigma, x1, l1)); // measurement from x2 to l2 Point2 z2(-4.0, 0.0); - graph->add(simulated2D::Measurement(z2, sigma, x2, l2)); + graph.add(simulated2D::Measurement(z2, sigma, x2, l2)); // equality constraint between l1 and l2 - graph->add(eq2D::PointEqualityConstraint(l1, l2)); + graph.add(eq2D::PointEqualityConstraint(l1, l2)); // create an initial estimate - shared_values initialEstimate(new Values()); - initialEstimate->insert(x1, Point2( 1.0, 1.0)); - initialEstimate->insert(l1, Point2( 1.0, 6.0)); - initialEstimate->insert(l2, Point2(-4.0, 0.0)); // starting with a separate reference frame - initialEstimate->insert(x2, Point2( 0.0, 0.0)); // other pose starts at origin + Values initialEstimate; + initialEstimate.insert(x1, Point2( 1.0, 1.0)); + initialEstimate.insert(l1, Point2( 1.0, 6.0)); + initialEstimate.insert(l2, Point2(-4.0, 0.0)); // starting with a separate reference frame + initialEstimate.insert(x2, Point2( 0.0, 0.0)); // other pose starts at origin // optimize - Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initialEstimate); + Values actual = LevenbergMarquardtOptimizer(graph, initialEstimate).optimize(); Values expected; expected.insert(x1, Point2(1.0, 1.0)); expected.insert(l1, Point2(1.0, 6.0)); expected.insert(l2, Point2(1.0, 6.0)); expected.insert(x2, Point2(5.0, 6.0)); - CHECK(assert_equal(expected, *actual, tol)); + CHECK(assert_equal(expected, actual, tol)); } // make a realistic calibration matrix @@ -520,9 +508,7 @@ Cal3_S2 K(fov,w,h); boost::shared_ptr shK(new Cal3_S2(K)); // typedefs for visual SLAM example -typedef boost::shared_ptr shared_vconfig; typedef visualSLAM::Graph VGraph; -typedef NonlinearOptimizer VOptimizer; // factors for visual slam typedef NonlinearEquality2 Point3Equality; @@ -546,32 +532,32 @@ TEST (testNonlinearEqualityConstraint, stereo_constrained ) { Symbol l1('l',1), l2('l',2); // create graph - VGraph::shared_graph graph(new VGraph()); + VGraph graph; // create equality constraints for poses - graph->addPoseConstraint(1, camera1.pose()); - graph->addPoseConstraint(2, camera2.pose()); + graph.addPoseConstraint(1, camera1.pose()); + graph.addPoseConstraint(2, camera2.pose()); // create factors SharedDiagonal vmodel = noiseModel::Unit::Create(3); - graph->addMeasurement(camera1.project(landmark), vmodel, 1, 1, shK); - graph->addMeasurement(camera2.project(landmark), vmodel, 2, 2, shK); + graph.addMeasurement(camera1.project(landmark), vmodel, 1, 1, shK); + graph.addMeasurement(camera2.project(landmark), vmodel, 2, 2, shK); // add equality constraint - graph->add(Point3Equality(l1, l2)); + graph.add(Point3Equality(l1, l2)); // create initial data Point3 landmark1(0.5, 5.0, 0.0); Point3 landmark2(1.5, 5.0, 0.0); - shared_vconfig initValues(new Values()); - initValues->insert(x1, pose1); - initValues->insert(x2, pose2); - initValues->insert(l1, landmark1); - initValues->insert(l2, landmark2); + Values initValues; + initValues.insert(x1, pose1); + initValues.insert(x2, pose2); + initValues.insert(l1, landmark1); + initValues.insert(l2, landmark2); // optimize - VOptimizer::shared_values actual = VOptimizer::optimizeLM(graph, initValues); + Values actual = LevenbergMarquardtOptimizer(graph, initValues).optimize(); // create config Values truthValues; @@ -581,7 +567,7 @@ TEST (testNonlinearEqualityConstraint, stereo_constrained ) { truthValues.insert(l2, landmark); // check if correct - CHECK(assert_equal(truthValues, *actual, 1e-5)); + CHECK(assert_equal(truthValues, actual, 1e-5)); } /* ************************************************************************* */ diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index e397ea443..80754ce02 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -31,11 +31,9 @@ using namespace boost; #include #include #include - -// template definitions #include -#include -#include +#include +#include using namespace gtsam; @@ -44,337 +42,219 @@ const double tol = 1e-5; Key kx(size_t i) { return Symbol('x',i); } Key kl(size_t i) { return Symbol('l',i); } -typedef NonlinearOptimizer Optimizer; - -/* ************************************************************************* */ -TEST( NonlinearOptimizer, linearizeAndOptimizeForDelta ) -{ - shared_ptr fg(new example::Graph( - example::createNonlinearFactorGraph())); - Optimizer::shared_values initial = example::sharedNoisyValues(); - - // Expected values structure is the difference between the noisy config - // and the ground-truth config. One step only because it's linear ! - Ordering ord1; ord1 += kx(2),kl(1),kx(1); - VectorValues expected(initial->dims(ord1)); - Vector dl1(2); - dl1(0) = -0.1; - dl1(1) = 0.1; - expected[ord1[kl(1)]] = dl1; - Vector dx1(2); - dx1(0) = -0.1; - dx1(1) = -0.1; - expected[ord1[kx(1)]] = dx1; - Vector dx2(2); - dx2(0) = 0.1; - dx2(1) = -0.2; - expected[ord1[kx(2)]] = dx2; - - // Check one ordering - Optimizer optimizer1(fg, initial, Optimizer::shared_ordering(new Ordering(ord1))); - - VectorValues actual1 = optimizer1.linearizeAndOptimizeForDelta(); - CHECK(assert_equal(actual1,expected)); - -// SL-FIX // Check another -// shared_ptr ord2(new Ordering()); -// *ord2 += kx(1),kx(2),kl(1); -// solver = Optimizer::shared_solver(new Optimizer::solver(ord2)); -// Optimizer optimizer2(fg, initial, solver); -// -// VectorValues actual2 = optimizer2.linearizeAndOptimizeForDelta(); -// CHECK(assert_equal(actual2,expected)); -// -// // And yet another... -// shared_ptr ord3(new Ordering()); -// *ord3 += kl(1),kx(1),kx(2); -// solver = Optimizer::shared_solver(new Optimizer::solver(ord3)); -// Optimizer optimizer3(fg, initial, solver); -// -// VectorValues actual3 = optimizer3.linearizeAndOptimizeForDelta(); -// CHECK(assert_equal(actual3,expected)); -// -// // More... -// shared_ptr ord4(new Ordering()); -// *ord4 += kx(1),kx(2), kl(1); -// solver = Optimizer::shared_solver(new Optimizer::solver(ord4)); -// Optimizer optimizer4(fg, initial, solver); -// -// VectorValues actual4 = optimizer4.linearizeAndOptimizeForDelta(); -// CHECK(assert_equal(actual4,expected)); -} - /* ************************************************************************* */ TEST( NonlinearOptimizer, iterateLM ) { // really non-linear factor graph - shared_ptr fg(new example::Graph( - example::createReallyNonlinearFactorGraph())); + example::Graph fg(example::createReallyNonlinearFactorGraph()); // config far from minimum Point2 x0(3,0); - boost::shared_ptr config(new Values); - config->insert(simulated2D::PoseKey(1), x0); - - // ordering - shared_ptr ord(new Ordering()); - ord->push_back(kx(1)); - - // create initial optimization state, with lambda=0 - Optimizer optimizer(fg, config, ord, NonlinearOptimizationParameters::newLambda(0.)); + Values config; + config.insert(simulated2D::PoseKey(1), x0); // normal iterate - Optimizer iterated1 = optimizer.iterate(); + GaussNewtonParams gnParams; + GaussNewtonOptimizer gnOptimizer(fg, config, gnParams); + gnOptimizer.iterate(); // LM iterate with lambda 0 should be the same - Optimizer iterated2 = optimizer.iterateLM(); + LevenbergMarquardtParams lmParams; + lmParams.lambdaInitial = 0.0; + LevenbergMarquardtOptimizer lmOptimizer(fg, config, lmParams); + lmOptimizer.iterate(); - // Try successive iterates. TODO: ugly pointers, better way ? - Optimizer *pointer = new Optimizer(iterated2); - for (int i=0;i<10;i++) { - Optimizer* newOptimizer = new Optimizer(pointer->iterateLM()); - delete pointer; - pointer = newOptimizer; - } - delete(pointer); - - CHECK(assert_equal(*iterated1.values(), *iterated2.values(), 1e-9)); + CHECK(assert_equal(gnOptimizer.values(), lmOptimizer.values(), 1e-9)); } /* ************************************************************************* */ TEST( NonlinearOptimizer, optimize ) { - shared_ptr fg(new example::Graph( - example::createReallyNonlinearFactorGraph())); + example::Graph fg(example::createReallyNonlinearFactorGraph()); // test error at minimum Point2 xstar(0,0); Values cstar; cstar.insert(simulated2D::PoseKey(1), xstar); - DOUBLES_EQUAL(0.0,fg->error(cstar),0.0); + DOUBLES_EQUAL(0.0,fg.error(cstar),0.0); // test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 = Point2 x0(3,3); - boost::shared_ptr c0(new Values); - c0->insert(simulated2D::PoseKey(1), x0); - DOUBLES_EQUAL(199.0,fg->error(*c0),1e-3); + Values c0; + c0.insert(simulated2D::PoseKey(1), x0); + DOUBLES_EQUAL(199.0,fg.error(c0),1e-3); // optimize parameters - shared_ptr ord(new Ordering()); - ord->push_back(kx(1)); - - // initial optimization state is the same in both cases tested - boost::shared_ptr params = boost::make_shared(); - params->relDecrease_ = 1e-5; - params->absDecrease_ = 1e-5; - Optimizer optimizer(fg, c0, ord, params); + Ordering ord; + ord.push_back(kx(1)); // Gauss-Newton - Optimizer actual1 = optimizer.gaussNewton(); - DOUBLES_EQUAL(0,fg->error(*(actual1.values())),tol); + GaussNewtonParams gnParams; + gnParams.ordering = ord; + Values actual1 = GaussNewtonOptimizer(fg, c0, gnParams).optimize(); + DOUBLES_EQUAL(0,fg.error(actual1),tol); // Levenberg-Marquardt - Optimizer actual2 = optimizer.levenbergMarquardt(); - DOUBLES_EQUAL(0,fg->error(*(actual2.values())),tol); + LevenbergMarquardtParams lmParams; + lmParams.ordering = ord; + Values actual2 = LevenbergMarquardtOptimizer(fg, c0, lmParams).optimize(); + DOUBLES_EQUAL(0,fg.error(actual2),tol); + + // Dogleg + DoglegParams dlParams; + dlParams.ordering = ord; + Values actual3 = DoglegOptimizer(fg, c0, dlParams).optimize(); + DOUBLES_EQUAL(0,fg.error(actual3),tol); } /* ************************************************************************* */ TEST( NonlinearOptimizer, SimpleLMOptimizer ) { - shared_ptr fg(new example::Graph( - example::createReallyNonlinearFactorGraph())); - - Point2 x0(3,3); - boost::shared_ptr c0(new Values); - c0->insert(simulated2D::PoseKey(1), x0); - - Optimizer::shared_values actual = Optimizer::optimizeLM(fg, c0); - DOUBLES_EQUAL(0,fg->error(*actual),tol); -} - -/* ************************************************************************* */ -TEST( NonlinearOptimizer, SimpleLMOptimizer_noshared ) -{ - example::Graph fg = example::createReallyNonlinearFactorGraph(); + example::Graph fg(example::createReallyNonlinearFactorGraph()); Point2 x0(3,3); Values c0; c0.insert(simulated2D::PoseKey(1), x0); - Optimizer::shared_values actual = Optimizer::optimizeLM(fg, c0); - DOUBLES_EQUAL(0,fg.error(*actual),tol); + Values actual = LevenbergMarquardtOptimizer(fg, c0).optimize(); + DOUBLES_EQUAL(0,fg.error(actual),tol); } /* ************************************************************************* */ TEST( NonlinearOptimizer, SimpleGNOptimizer ) { - shared_ptr fg(new example::Graph( - example::createReallyNonlinearFactorGraph())); + example::Graph fg(example::createReallyNonlinearFactorGraph()); - Point2 x0(3,3); - boost::shared_ptr c0(new Values); - c0->insert(simulated2D::PoseKey(1), x0); + Point2 x0(3,3); + Values c0; + c0.insert(simulated2D::PoseKey(1), x0); - Optimizer::shared_values actual = Optimizer::optimizeGN(fg, c0); - DOUBLES_EQUAL(0,fg->error(*actual),tol); + Values actual = GaussNewtonOptimizer(fg, c0).optimize(); + DOUBLES_EQUAL(0,fg.error(actual),tol); } /* ************************************************************************* */ -TEST( NonlinearOptimizer, SimpleGNOptimizer_noshared ) +TEST( NonlinearOptimizer, SimpleDLOptimizer ) { - example::Graph fg = example::createReallyNonlinearFactorGraph(); + example::Graph fg(example::createReallyNonlinearFactorGraph()); - Point2 x0(3,3); - Values c0; - c0.insert(simulated2D::PoseKey(1), x0); + Point2 x0(3,3); + Values c0; + c0.insert(simulated2D::PoseKey(1), x0); - Optimizer::shared_values actual = Optimizer::optimizeGN(fg, c0); - DOUBLES_EQUAL(0,fg.error(*actual),tol); + Values actual = DoglegOptimizer(fg, c0).optimize(); + DOUBLES_EQUAL(0,fg.error(actual),tol); } /* ************************************************************************* */ TEST( NonlinearOptimizer, optimization_method ) { + LevenbergMarquardtParams paramsQR; + paramsQR.factorization = LevenbergMarquardtParams::QR; + LevenbergMarquardtParams paramsLDL; + paramsLDL.factorization = LevenbergMarquardtParams::LDL; + LevenbergMarquardtParams paramsChol; + paramsLDL.factorization = LevenbergMarquardtParams::CHOLESKY; + example::Graph fg = example::createReallyNonlinearFactorGraph(); Point2 x0(3,3); Values c0; c0.insert(simulated2D::PoseKey(1), x0); - Values actualMFQR = optimize( - fg, c0, *NonlinearOptimizationParameters().newFactorization(true), MULTIFRONTAL, LM); + Values actualMFQR = LevenbergMarquardtOptimizer(fg, c0, paramsQR).optimize(); DOUBLES_EQUAL(0,fg.error(actualMFQR),tol); - Values actualMFLDL = optimize( - fg, c0, *NonlinearOptimizationParameters().newFactorization(false), MULTIFRONTAL, LM); + Values actualMFLDL = LevenbergMarquardtOptimizer(fg, c0, paramsLDL).optimize(); DOUBLES_EQUAL(0,fg.error(actualMFLDL),tol); + + Values actualMFChol = LevenbergMarquardtOptimizer(fg, c0, paramsChol).optimize(); + DOUBLES_EQUAL(0,fg.error(actualMFChol),tol); } /* ************************************************************************* */ TEST( NonlinearOptimizer, Factorization ) { - typedef NonlinearOptimizer Optimizer; + Values config; + config.insert(pose2SLAM::PoseKey(1), Pose2(0.,0.,0.)); + config.insert(pose2SLAM::PoseKey(2), Pose2(1.5,0.,0.)); - boost::shared_ptr config(new Values); - config->insert(pose2SLAM::PoseKey(1), Pose2(0.,0.,0.)); - config->insert(pose2SLAM::PoseKey(2), Pose2(1.5,0.,0.)); + pose2SLAM::Graph graph; + graph.addPrior(1, Pose2(0.,0.,0.), noiseModel::Isotropic::Sigma(3, 1e-10)); + graph.addOdometry(1,2, Pose2(1.,0.,0.), noiseModel::Isotropic::Sigma(3, 1)); - boost::shared_ptr graph(new pose2SLAM::Graph); - graph->addPrior(1, Pose2(0.,0.,0.), noiseModel::Isotropic::Sigma(3, 1e-10)); - graph->addOdometry(1,2, Pose2(1.,0.,0.), noiseModel::Isotropic::Sigma(3, 1)); + Ordering ordering; + ordering.push_back(pose2SLAM::PoseKey(1)); + ordering.push_back(pose2SLAM::PoseKey(2)); - boost::shared_ptr ordering(new Ordering); - ordering->push_back(pose2SLAM::PoseKey(1)); - ordering->push_back(pose2SLAM::PoseKey(2)); - - Optimizer optimizer(graph, config, ordering); - Optimizer optimized = optimizer.iterateLM(); + LevenbergMarquardtOptimizer optimizer(graph, config, ordering); + optimizer.iterate(); Values expected; expected.insert(pose2SLAM::PoseKey(1), Pose2(0.,0.,0.)); expected.insert(pose2SLAM::PoseKey(2), Pose2(1.,0.,0.)); - CHECK(assert_equal(expected, *optimized.values(), 1e-5)); + CHECK(assert_equal(expected, optimizer.values(), 1e-5)); } /* ************************************************************************* */ -TEST_UNSAFE(NonlinearOptimizer, NullFactor) { +TEST(NonlinearOptimizer, NullFactor) { - shared_ptr fg(new example::Graph( - example::createReallyNonlinearFactorGraph())); + example::Graph fg = example::createReallyNonlinearFactorGraph(); // Add null factor - fg->push_back(example::Graph::sharedFactor()); + fg.push_back(example::Graph::sharedFactor()); // test error at minimum Point2 xstar(0,0); Values cstar; cstar.insert(simulated2D::PoseKey(1), xstar); - DOUBLES_EQUAL(0.0,fg->error(cstar),0.0); + DOUBLES_EQUAL(0.0,fg.error(cstar),0.0); // test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 = Point2 x0(3,3); - boost::shared_ptr c0(new Values); - c0->insert(simulated2D::PoseKey(1), x0); - DOUBLES_EQUAL(199.0,fg->error(*c0),1e-3); + Values c0; + c0.insert(simulated2D::PoseKey(1), x0); + DOUBLES_EQUAL(199.0,fg.error(c0),1e-3); // optimize parameters - shared_ptr ord(new Ordering()); - ord->push_back(kx(1)); - - // initial optimization state is the same in both cases tested - boost::shared_ptr params = boost::make_shared(); - params->relDecrease_ = 1e-5; - params->absDecrease_ = 1e-5; - Optimizer optimizer(fg, c0, ord, params); + Ordering ord; + ord.push_back(kx(1)); // Gauss-Newton - Optimizer actual1 = optimizer.gaussNewton(); - DOUBLES_EQUAL(0,fg->error(*(actual1.values())),tol); + Values actual1 = GaussNewtonOptimizer(fg, c0, ord).optimize(); + DOUBLES_EQUAL(0,fg.error(actual1),tol); // Levenberg-Marquardt - Optimizer actual2 = optimizer.levenbergMarquardt(); - DOUBLES_EQUAL(0,fg->error(*(actual2.values())),tol); + Values actual2 = LevenbergMarquardtOptimizer(fg, c0, ord).optimize(); + DOUBLES_EQUAL(0,fg.error(actual2),tol); + + // Dogleg + Values actual3 = DoglegOptimizer(fg, c0, ord).optimize(); + DOUBLES_EQUAL(0,fg.error(actual3),tol); } -///* ************************************************************************* */ -// SL-FIX TEST( NonlinearOptimizer, SubgraphSolver ) -//{ -// using namespace pose2SLAM; -// typedef SubgraphSolver Solver; -// typedef NonlinearOptimizer Optimizer; -// -// // Create a graph -// boost::shared_ptr graph(new Graph); -// graph->addPrior(1, Pose2(0., 0., 0.), noiseModel::Isotropic::Sigma(3, 1e-10)); -// graph->addConstraint(1, 2, Pose2(1., 0., 0.), noiseModel::Isotropic::Sigma(3, 1)); -// -// // Create an initial config -// boost::shared_ptr config(new Values); -// config->insert(1, Pose2(0., 0., 0.)); -// config->insert(2, Pose2(1.5, 0., 0.)); -// -// // Create solver and optimizer -// Optimizer::shared_solver solver -// (new SubgraphSolver (*graph, *config)); -// Optimizer optimizer(graph, config, solver); -// -// // Optimize !!!! -// double relativeThreshold = 1e-5; -// double absoluteThreshold = 1e-5; -// Optimizer optimized = optimizer.gaussNewton(relativeThreshold, -// absoluteThreshold, Optimizer::SILENT); -// -// // Check solution -// Values expected; -// expected.insert(1, Pose2(0., 0., 0.)); -// expected.insert(2, Pose2(1., 0., 0.)); -// CHECK(assert_equal(expected, *optimized.values(), 1e-5)); -//} - /* ************************************************************************* */ -// SL-FIX TEST( NonlinearOptimizer, MultiFrontalSolver ) -//{ -// shared_ptr fg(new example::Graph( -// example::createNonlinearFactorGraph())); -// Optimizer::shared_values initial = example::sharedNoisyValues(); -// -// Values expected; -// expected.insert(simulated2D::PoseKey(1), Point2(0.0, 0.0)); -// expected.insert(simulated2D::PoseKey(2), Point2(1.5, 0.0)); -// expected.insert(simulated2D::PointKey(1), Point2(0.0, -1.0)); -// -// Optimizer::shared_solver solver; -// -// // Check one ordering -// shared_ptr ord1(new Ordering()); -// *ord1 += kx(2),kl(1),kx(1); -// solver = Optimizer::shared_solver(new Optimizer::solver(ord1)); -// Optimizer optimizer1(fg, initial, solver); -// -// Values actual = optimizer1.levenbergMarquardt(); -// CHECK(assert_equal(actual,expected)); -//} +TEST(NonlinearOptimizer, MoreOptimization) { + NonlinearFactorGraph fg; + fg.add(PriorFactor(0, Pose2(0,0,0), sharedSigma(3,1))); + fg.add(BetweenFactor(0, 1, Pose2(1,0,M_PI/2), sharedSigma(3,1))); + fg.add(BetweenFactor(1, 2, Pose2(1,0,M_PI/2), sharedSigma(3,1))); + + Values init; + init.insert(0, Pose2(3,4,-M_PI)); + init.insert(1, Pose2(10,2,-M_PI)); + init.insert(2, Pose2(11,7,-M_PI)); + + Values expected; + expected.insert(0, Pose2(0,0,0)); + expected.insert(1, Pose2(1,0,M_PI/2)); + expected.insert(2, Pose2(1,1,M_PI)); + + // Try LM and Dogleg + EXPECT(assert_equal(expected, LevenbergMarquardtOptimizer(fg, init).optimize())); + EXPECT(assert_equal(expected, DoglegOptimizer(fg, init).optimize())); +} /* ************************************************************************* */ int main() { diff --git a/tests/testRot3Optimization.cpp b/tests/testRot3Optimization.cpp index 105bc8331..df2471933 100644 --- a/tests/testRot3Optimization.cpp +++ b/tests/testRot3Optimization.cpp @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include @@ -47,8 +47,7 @@ TEST(Rot3, optimize) { fg.add(Between(Symbol('r',j), Symbol('r',(j+1)%6), Rot3::Rz(M_PI/3.0), sharedSigma(3, 0.01))); } - NonlinearOptimizationParameters params; - Values final = optimize(fg, initial, params); + Values final = GaussNewtonOptimizer(fg, initial).optimize(); EXPECT(assert_equal(truth, final, 1e-5)); }