Merged from branch 'branches/NLO' into trunk
commit
8b6ac387bc
|
@ -22,7 +22,7 @@
|
|||
#include <gtsam/geometry/SimpleCamera.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimization.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
|
||||
using namespace gtsam;
|
||||
|
||||
|
@ -95,7 +95,7 @@ int main(int argc, char* argv[]) {
|
|||
0.,0.,-1.), Point3(0.,0.,2.0)));
|
||||
|
||||
/* 4. Optimize the graph using Levenberg-Marquardt*/
|
||||
Values result = optimize<NonlinearFactorGraph> (graph, initial);
|
||||
Values result = LevenbergMarquardtOptimizer(graph, initial).optimize();
|
||||
result.print("Final result: ");
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
// pull in the planar SLAM domain with all typedefs and helper functions defined
|
||||
#include <gtsam/slam/planarSLAM.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimization-inl.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
@ -83,7 +83,7 @@ int main(int argc, char** argv) {
|
|||
initialEstimate.print("initial estimate");
|
||||
|
||||
// optimize using Levenberg-Marquardt optimization with an ordering from colamd
|
||||
planarSLAM::Values result = optimize(graph, initialEstimate);
|
||||
planarSLAM::Values result = LevenbergMarquardtOptimizer(graph, initialEstimate).optimize();
|
||||
result.print("final result");
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -35,17 +35,12 @@
|
|||
|
||||
// implementations for structures - needed if self-contained, and these should be included last
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
#include <gtsam/linear/GaussianSequentialSolver.h>
|
||||
#include <gtsam/linear/GaussianMultifrontalSolver.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/Marginals.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
// Main typedefs
|
||||
typedef NonlinearOptimizer<NonlinearFactorGraph,GaussianFactorGraph,GaussianSequentialSolver> OptimizerSeqential; // optimization engine for this domain
|
||||
typedef NonlinearOptimizer<NonlinearFactorGraph,GaussianFactorGraph,GaussianMultifrontalSolver> OptimizerMultifrontal; // optimization engine for this domain
|
||||
|
||||
/**
|
||||
* In this version of the system we make the following assumptions:
|
||||
* - All values are axis aligned
|
||||
|
@ -61,14 +56,14 @@ int main(int argc, char** argv) {
|
|||
Symbol l1('l',1), l2('l',2);
|
||||
|
||||
// create graph container and add factors to it
|
||||
NonlinearFactorGraph::shared_ptr graph(new NonlinearFactorGraph);
|
||||
NonlinearFactorGraph graph;
|
||||
|
||||
/* add prior */
|
||||
// gaussian for prior
|
||||
SharedDiagonal prior_model = noiseModel::Diagonal::Sigmas(Vector_(3, 0.3, 0.3, 0.1));
|
||||
Pose2 prior_measurement(0.0, 0.0, 0.0); // prior at origin
|
||||
PriorFactor<Pose2> posePrior(x1, prior_measurement, prior_model); // create the factor
|
||||
graph->add(posePrior); // add the factor to the graph
|
||||
graph.add(posePrior); // add the factor to the graph
|
||||
|
||||
/* add odometry */
|
||||
// general noisemodel for odometry
|
||||
|
@ -77,8 +72,8 @@ int main(int argc, char** argv) {
|
|||
// create between factors to represent odometry
|
||||
BetweenFactor<Pose2> odom12(x1, x2, odom_measurement, odom_model);
|
||||
BetweenFactor<Pose2> odom23(x2, x3, odom_measurement, odom_model);
|
||||
graph->add(odom12); // add both to graph
|
||||
graph->add(odom23);
|
||||
graph.add(odom12); // add both to graph
|
||||
graph.add(odom23);
|
||||
|
||||
/* add measurements */
|
||||
// general noisemodel for measurements
|
||||
|
@ -98,41 +93,44 @@ int main(int argc, char** argv) {
|
|||
BearingRangeFactor<Pose2, Point2> meas32(x3, l2, bearing32, range32, meas_model);
|
||||
|
||||
// add the factors
|
||||
graph->add(meas11);
|
||||
graph->add(meas21);
|
||||
graph->add(meas32);
|
||||
graph.add(meas11);
|
||||
graph.add(meas21);
|
||||
graph.add(meas32);
|
||||
|
||||
graph->print("Full Graph");
|
||||
graph.print("Full Graph");
|
||||
|
||||
// initialize to noisy points
|
||||
boost::shared_ptr<Values> initial(new Values);
|
||||
initial->insert(x1, Pose2(0.5, 0.0, 0.2));
|
||||
initial->insert(x2, Pose2(2.3, 0.1,-0.2));
|
||||
initial->insert(x3, Pose2(4.1, 0.1, 0.1));
|
||||
initial->insert(l1, Point2(1.8, 2.1));
|
||||
initial->insert(l2, Point2(4.1, 1.8));
|
||||
Values initial;
|
||||
initial.insert(x1, Pose2(0.5, 0.0, 0.2));
|
||||
initial.insert(x2, Pose2(2.3, 0.1,-0.2));
|
||||
initial.insert(x3, Pose2(4.1, 0.1, 0.1));
|
||||
initial.insert(l1, Point2(1.8, 2.1));
|
||||
initial.insert(l2, Point2(4.1, 1.8));
|
||||
|
||||
initial->print("initial estimate");
|
||||
initial.print("initial estimate");
|
||||
|
||||
// optimize using Levenberg-Marquardt optimization with an ordering from colamd
|
||||
|
||||
// first using sequential elimination
|
||||
OptimizerSeqential::shared_values resultSequential = OptimizerSeqential::optimizeLM(*graph, *initial);
|
||||
resultSequential->print("final result (solved with a sequential solver)");
|
||||
LevenbergMarquardtParams lmParams;
|
||||
lmParams.elimination = LevenbergMarquardtParams::SEQUENTIAL;
|
||||
Values resultSequential = LevenbergMarquardtOptimizer(graph, initial, lmParams).optimize();
|
||||
resultSequential.print("final result (solved with a sequential solver)");
|
||||
|
||||
// then using multifrontal, advanced interface
|
||||
// Note how we create an optimizer, call LM, then we get access to covariances
|
||||
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*initial);
|
||||
OptimizerMultifrontal optimizerMF(graph, initial, ordering);
|
||||
OptimizerMultifrontal resultMF = optimizerMF.levenbergMarquardt();
|
||||
resultMF.values()->print("final result (solved with a multifrontal solver)");
|
||||
// Note that we keep the original optimizer object so we can use the COLAMD
|
||||
// ordering it computes.
|
||||
LevenbergMarquardtOptimizer optimizer(graph, initial);
|
||||
Values resultMultifrontal = optimizer.optimize();
|
||||
resultMultifrontal.print("final result (solved with a multifrontal solver)");
|
||||
|
||||
// Print marginals covariances for all variables
|
||||
print(resultMF.marginalCovariance(x1), "x1 covariance");
|
||||
print(resultMF.marginalCovariance(x2), "x2 covariance");
|
||||
print(resultMF.marginalCovariance(x3), "x3 covariance");
|
||||
print(resultMF.marginalCovariance(l1), "l1 covariance");
|
||||
print(resultMF.marginalCovariance(l2), "l2 covariance");
|
||||
Marginals marginals(graph, resultMultifrontal, Marginals::CHOLESKY);
|
||||
print(marginals.marginalCovariance(x1), "x1 covariance");
|
||||
print(marginals.marginalCovariance(x2), "x2 covariance");
|
||||
print(marginals.marginalCovariance(x3), "x3 covariance");
|
||||
print(marginals.marginalCovariance(l1), "l1 covariance");
|
||||
print(marginals.marginalCovariance(l2), "l2 covariance");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -22,7 +22,8 @@
|
|||
|
||||
// pull in the Pose2 SLAM domain with all typedefs and helper functions defined
|
||||
#include <gtsam/slam/pose2SLAM.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimization.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/Marginals.h>
|
||||
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <gtsam/base/Matrix.h>
|
||||
|
@ -34,13 +35,13 @@ using namespace pose2SLAM;
|
|||
|
||||
int main(int argc, char** argv) {
|
||||
/* 1. create graph container and add factors to it */
|
||||
shared_ptr<Graph> graph(new Graph);
|
||||
Graph graph;
|
||||
|
||||
/* 2.a add prior */
|
||||
// gaussian for prior
|
||||
SharedDiagonal prior_model = noiseModel::Diagonal::Sigmas(Vector_(3, 0.3, 0.3, 0.1));
|
||||
Pose2 prior_measurement(0.0, 0.0, 0.0); // prior at origin
|
||||
graph->addPrior(1, prior_measurement, prior_model); // add directly to graph
|
||||
graph.addPrior(1, prior_measurement, prior_model); // add directly to graph
|
||||
|
||||
/* 2.b add odometry */
|
||||
// general noisemodel for odometry
|
||||
|
@ -48,33 +49,36 @@ int main(int argc, char** argv) {
|
|||
|
||||
/* Pose2 measurements take (x,y,theta), where theta is taken from the positive x-axis*/
|
||||
Pose2 odom_measurement(2.0, 0.0, 0.0); // create a measurement for both factors (the same in this case)
|
||||
graph->addOdometry(1, 2, odom_measurement, odom_model);
|
||||
graph->addOdometry(2, 3, odom_measurement, odom_model);
|
||||
graph->print("full graph");
|
||||
graph.addOdometry(1, 2, odom_measurement, odom_model);
|
||||
graph.addOdometry(2, 3, odom_measurement, odom_model);
|
||||
graph.print("full graph");
|
||||
|
||||
/* 3. Create the data structure to hold the initial estimate to the solution
|
||||
* initialize to noisy points */
|
||||
shared_ptr<pose2SLAM::Values> initial(new pose2SLAM::Values);
|
||||
initial->insertPose(1, Pose2(0.5, 0.0, 0.2));
|
||||
initial->insertPose(2, Pose2(2.3, 0.1,-0.2));
|
||||
initial->insertPose(3, Pose2(4.1, 0.1, 0.1));
|
||||
initial->print("initial estimate");
|
||||
pose2SLAM::Values initial;
|
||||
initial.insertPose(1, Pose2(0.5, 0.0, 0.2));
|
||||
initial.insertPose(2, Pose2(2.3, 0.1,-0.2));
|
||||
initial.insertPose(3, Pose2(4.1, 0.1, 0.1));
|
||||
initial.print("initial estimate");
|
||||
|
||||
/* 4.2.1 Alternatively, you can go through the process step by step
|
||||
* Choose an ordering */
|
||||
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*initial);
|
||||
Ordering ordering = *graph.orderingCOLAMD(initial);
|
||||
|
||||
/* 4.2.2 set up solver and optimize */
|
||||
NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-15, 1e-15);
|
||||
Optimizer optimizer(graph, initial, ordering, params);
|
||||
Optimizer optimizer_result = optimizer.levenbergMarquardt();
|
||||
LevenbergMarquardtParams params;
|
||||
params.absoluteErrorTol = 1e-15;
|
||||
params.relativeErrorTol = 1e-15;
|
||||
params.ordering = ordering;
|
||||
LevenbergMarquardtOptimizer optimizer(graph, initial, params);
|
||||
|
||||
pose2SLAM::Values result = *optimizer_result.values();
|
||||
pose2SLAM::Values result = optimizer.optimize();
|
||||
result.print("final result");
|
||||
|
||||
/* Get covariances */
|
||||
Matrix covariance1 = optimizer_result.marginalCovariance(PoseKey(1));
|
||||
Matrix covariance2 = optimizer_result.marginalCovariance(PoseKey(2));
|
||||
Marginals marginals(graph, result, Marginals::CHOLESKY);
|
||||
Matrix covariance1 = marginals.marginalCovariance(PoseKey(1));
|
||||
Matrix covariance2 = marginals.marginalCovariance(PoseKey(2));
|
||||
|
||||
print(covariance1, "Covariance1");
|
||||
print(covariance2, "Covariance2");
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
|
||||
// pull in the Pose2 SLAM domain with all typedefs and helper functions defined
|
||||
#include <gtsam/slam/pose2SLAM.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimization.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
@ -61,7 +61,7 @@ int main(int argc, char** argv) {
|
|||
|
||||
/* 4 Single Step Optimization
|
||||
* optimize using Levenberg-Marquardt optimization with an ordering from colamd */
|
||||
pose2SLAM::Values result = optimize<Graph>(graph, initial);
|
||||
pose2SLAM::Values result = graph.optimize(initial);
|
||||
result.print("final result");
|
||||
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#include <gtsam/linear/NoiseModel.h>
|
||||
#include <gtsam/nonlinear/Symbol.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimization.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
|
||||
/*
|
||||
* TODO: make factors independent of RotValues
|
||||
|
@ -105,7 +105,7 @@ int main() {
|
|||
* initial estimate. This will yield a new RotValues structure
|
||||
* with the final state of the optimization.
|
||||
*/
|
||||
Values result = optimize(graph, initial);
|
||||
Values result = LevenbergMarquardtOptimizer(graph, initial).optimize();
|
||||
result.print("final result");
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
using namespace boost;
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/slam/visualSLAM.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
|
||||
|
@ -129,26 +129,27 @@ int main(int argc, char* argv[]) {
|
|||
readAllData();
|
||||
|
||||
// Create a graph using the 2D measurements (features) and the calibration data
|
||||
boost::shared_ptr<Graph> graph(new Graph(setupGraph(g_measurements, measurementSigma, g_calib)));
|
||||
Graph graph(setupGraph(g_measurements, measurementSigma, g_calib));
|
||||
|
||||
// Create an initial Values structure using groundtruth values as the initial estimates
|
||||
boost::shared_ptr<Values> initialEstimates(new Values(initialize(g_landmarks, g_poses)));
|
||||
Values initialEstimates(initialize(g_landmarks, g_poses));
|
||||
cout << "*******************************************************" << endl;
|
||||
initialEstimates->print("INITIAL ESTIMATES: ");
|
||||
initialEstimates.print("INITIAL ESTIMATES: ");
|
||||
|
||||
// Add prior factor for all poses in the graph
|
||||
map<int, Pose3>::iterator poseit = g_poses.begin();
|
||||
for (; poseit != g_poses.end(); poseit++)
|
||||
graph->addPosePrior(poseit->first, poseit->second, noiseModel::Unit::Create(1));
|
||||
graph.addPosePrior(poseit->first, poseit->second, noiseModel::Unit::Create(1));
|
||||
|
||||
// Optimize the graph
|
||||
cout << "*******************************************************" << endl;
|
||||
NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newVerbosity(Optimizer::Parameters::DAMPED);
|
||||
visualSLAM::Optimizer::shared_values result = visualSLAM::Optimizer::optimizeGN(graph, initialEstimates, params);
|
||||
LevenbergMarquardtParams params;
|
||||
params.verbosityLM = LevenbergMarquardtParams::DAMPED;
|
||||
visualSLAM::Values result = LevenbergMarquardtOptimizer(graph, initialEstimates, params).optimize();
|
||||
|
||||
// Print final results
|
||||
cout << "*******************************************************" << endl;
|
||||
result->print("FINAL RESULTS: ");
|
||||
result.print("FINAL RESULTS: ");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -120,7 +120,7 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
|
||||
/// @}
|
||||
/// @name Transformations
|
||||
/// @name Transformations and mesaurement functions
|
||||
/// @{
|
||||
|
||||
/**
|
||||
|
@ -147,6 +147,36 @@ namespace gtsam {
|
|||
*/
|
||||
static Point3 backproject_from_camera(const Point2& p, const double scale);
|
||||
|
||||
/**
|
||||
* Calculate range to a landmark
|
||||
* @param point 3D location of landmark
|
||||
* @return range (double)
|
||||
*/
|
||||
double range(const Point3& point,
|
||||
boost::optional<Matrix&> H1=boost::none,
|
||||
boost::optional<Matrix&> H2=boost::none) const {
|
||||
return pose_.range(point, H1, H2); }
|
||||
|
||||
/**
|
||||
* Calculate range to another pose
|
||||
* @param pose Other SO(3) pose
|
||||
* @return range (double)
|
||||
*/
|
||||
double range(const Pose3& pose,
|
||||
boost::optional<Matrix&> H1=boost::none,
|
||||
boost::optional<Matrix&> H2=boost::none) const {
|
||||
return pose_.range(pose, H1, H2); }
|
||||
|
||||
/**
|
||||
* Calculate range to another camera
|
||||
* @param camera Other camera
|
||||
* @return range (double)
|
||||
*/
|
||||
double range(const CalibratedCamera& camera,
|
||||
boost::optional<Matrix&> H1=boost::none,
|
||||
boost::optional<Matrix&> H2=boost::none) const {
|
||||
return pose_.range(camera.pose_, H1, H2); }
|
||||
|
||||
private:
|
||||
|
||||
/// @}
|
||||
|
|
|
@ -161,9 +161,9 @@ namespace gtsam {
|
|||
return PinholeCamera(pose3, K);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// measurement functions and derivatives
|
||||
/* ************************************************************************* */
|
||||
/// @}
|
||||
/// @name Transformations and measurement functions
|
||||
/// @{
|
||||
|
||||
/**
|
||||
* projects a 3-dimensional point in camera coordinates into the
|
||||
|
@ -186,10 +186,6 @@ namespace gtsam {
|
|||
return std::make_pair(k_.uncalibrate(pn), pc.z()>0);
|
||||
}
|
||||
|
||||
/// @}
|
||||
/// @name Transformations
|
||||
/// @{
|
||||
|
||||
/** project a point from world coordinate to the image
|
||||
* @param pw is a point in the world coordinate
|
||||
* @param H1 is the jacobian w.r.t. pose3
|
||||
|
@ -270,6 +266,77 @@ namespace gtsam {
|
|||
return backproject(pi, scale);
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate range to a landmark
|
||||
* @param point 3D location of landmark
|
||||
* @return range (double)
|
||||
*/
|
||||
double range(const Point3& point,
|
||||
boost::optional<Matrix&> H1=boost::none,
|
||||
boost::optional<Matrix&> H2=boost::none) const {
|
||||
double result = pose_.range(point, H1, H2);
|
||||
if(H1) {
|
||||
// Add columns of zeros to Jacobian for calibration
|
||||
Matrix& H1r(*H1);
|
||||
H1r.conservativeResize(Eigen::NoChange, pose_.dim() + k_.dim());
|
||||
H1r.block(0, pose_.dim(), 1, k_.dim()) = Matrix::Zero(1, k_.dim());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate range to another pose
|
||||
* @param pose Other SO(3) pose
|
||||
* @return range (double)
|
||||
*/
|
||||
double range(const Pose3& pose,
|
||||
boost::optional<Matrix&> H1=boost::none,
|
||||
boost::optional<Matrix&> H2=boost::none) const {
|
||||
double result = pose_.range(pose, H1, H2);
|
||||
if(H1) {
|
||||
// Add columns of zeros to Jacobian for calibration
|
||||
Matrix& H1r(*H1);
|
||||
H1r.conservativeResize(Eigen::NoChange, pose_.dim() + k_.dim());
|
||||
H1r.block(0, pose_.dim(), 1, k_.dim()) = Matrix::Zero(1, k_.dim());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate range to another camera
|
||||
* @param camera Other camera
|
||||
* @return range (double)
|
||||
*/
|
||||
template<class CalibrationB>
|
||||
double range(const PinholeCamera<CalibrationB>& camera,
|
||||
boost::optional<Matrix&> H1=boost::none,
|
||||
boost::optional<Matrix&> H2=boost::none) const {
|
||||
double result = pose_.range(camera.pose_, H1, H2);
|
||||
if(H1) {
|
||||
// Add columns of zeros to Jacobian for calibration
|
||||
Matrix& H1r(*H1);
|
||||
H1r.conservativeResize(Eigen::NoChange, pose_.dim() + k_.dim());
|
||||
H1r.block(0, pose_.dim(), 1, k_.dim()) = Matrix::Zero(1, k_.dim());
|
||||
}
|
||||
if(H2) {
|
||||
// Add columns of zeros to Jacobian for calibration
|
||||
Matrix& H2r(*H2);
|
||||
H2r.conservativeResize(Eigen::NoChange, camera.pose().dim() + camera.calibration().dim());
|
||||
H2r.block(0, camera.pose().dim(), 1, camera.calibration().dim()) = Matrix::Zero(1, camera.calibration().dim());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate range to another camera
|
||||
* @param camera Other camera
|
||||
* @return range (double)
|
||||
*/
|
||||
double range(const CalibratedCamera& camera,
|
||||
boost::optional<Matrix&> H1=boost::none,
|
||||
boost::optional<Matrix&> H2=boost::none) const {
|
||||
return pose_.range(camera.pose_, H1, H2); }
|
||||
|
||||
private:
|
||||
|
||||
/// @}
|
||||
|
|
|
@ -217,10 +217,10 @@ namespace gtsam {
|
|||
|
||||
/**
|
||||
* Calculate range to another pose
|
||||
* @param point SO(3) pose of landmark
|
||||
* @param pose Other SO(3) pose
|
||||
* @return range (double)
|
||||
*/
|
||||
double range(const Pose3& point,
|
||||
double range(const Pose3& pose,
|
||||
boost::optional<Matrix&> H1=boost::none,
|
||||
boost::optional<Matrix&> H2=boost::none) const;
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ public:
|
|||
}
|
||||
|
||||
/** BayesNet with 1 conditional */
|
||||
BayesNet(const sharedConditional& conditional) { push_back(conditional); }
|
||||
explicit BayesNet(const sharedConditional& conditional) { push_back(conditional); }
|
||||
|
||||
/// @}
|
||||
/// @name Testable
|
||||
|
|
|
@ -145,7 +145,7 @@ namespace gtsam {
|
|||
BayesTree() {}
|
||||
|
||||
/** Create a Bayes Tree from a Bayes Net (requires CONDITIONAL is IndexConditional *or* CONDITIONAL::Combine) */
|
||||
BayesTree(const BayesNet<CONDITIONAL>& bayesNet);
|
||||
explicit BayesTree(const BayesNet<CONDITIONAL>& bayesNet);
|
||||
|
||||
/** Copy constructor */
|
||||
BayesTree(const This& other);
|
||||
|
|
|
@ -42,7 +42,7 @@ void GaussianMultifrontalSolver::replaceFactors(const FactorGraph<GaussianFactor
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
BayesTree<GaussianConditional>::shared_ptr GaussianMultifrontalSolver::eliminate() const {
|
||||
GaussianBayesTree::shared_ptr GaussianMultifrontalSolver::eliminate() const {
|
||||
if (useQR_)
|
||||
return Base::eliminate(&EliminateQR);
|
||||
else
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <gtsam/inference/GenericMultifrontalSolver.h>
|
||||
#include <gtsam/linear/GaussianBayesTree.h>
|
||||
#include <gtsam/linear/GaussianJunctionTree.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
|
||||
|
@ -86,7 +87,7 @@ public:
|
|||
* Eliminate the factor graph sequentially. Uses a column elimination tree
|
||||
* to recursively eliminate.
|
||||
*/
|
||||
BayesTree<GaussianConditional>::shared_ptr eliminate() const;
|
||||
GaussianBayesTree::shared_ptr eliminate() const;
|
||||
|
||||
/**
|
||||
* Compute the least-squares solution of the GaussianFactorGraph. This
|
||||
|
|
|
@ -41,7 +41,7 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
size_t vardim = 3;
|
||||
size_t blockdim = 3;
|
||||
size_t nVars = 500;
|
||||
int nVars = 500;
|
||||
size_t blocksPerVar = 5;
|
||||
size_t varsPerBlock = 2;
|
||||
size_t varSpread = 10;
|
||||
|
@ -70,7 +70,7 @@ int main(int argc, char *argv[]) {
|
|||
for(size_t trial=0; trial<nTrials; ++trial) {
|
||||
blockGfgs.push_back(GaussianFactorGraph());
|
||||
SharedDiagonal noise = sharedSigma(blockdim, 1.0);
|
||||
for(size_t c=0; c<nVars; ++c) {
|
||||
for(int c=0; c<nVars; ++c) {
|
||||
for(size_t d=0; d<blocksPerVar; ++d) {
|
||||
vector<pair<Index, Matrix> > terms; terms.reserve(varsPerBlock);
|
||||
if(c == 0 && d == 0)
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
/**
|
||||
* @file DoglegOptimizer-inl.h
|
||||
* @brief Nonlinear factor graph optimizer using Powell's Dogleg algorithm
|
||||
* @author Richard Roberts
|
||||
*/
|
||||
|
||||
#pragma once
|
|
@ -0,0 +1,68 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file DoglegOptimizer.cpp
|
||||
* @brief
|
||||
* @author Richard Roberts
|
||||
* @created Feb 26, 2012
|
||||
*/
|
||||
|
||||
#include <gtsam/nonlinear/DoglegOptimizer.h>
|
||||
|
||||
#include <gtsam/inference/EliminationTree.h>
|
||||
#include <gtsam/linear/GaussianJunctionTree.h>
|
||||
#include <gtsam/nonlinear/DoglegOptimizerImpl.h>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
void DoglegOptimizer::iterate(void) {
|
||||
|
||||
// Linearize graph
|
||||
const Ordering& ordering = *params_.ordering;
|
||||
GaussianFactorGraph::shared_ptr linear = graph_.linearize(state_.values, ordering);
|
||||
|
||||
// Get elimination method
|
||||
GaussianFactorGraph::Eliminate eliminationMethod = params_.getEliminationFunction();
|
||||
|
||||
// Pull out parameters we'll use
|
||||
const bool dlVerbose = (params_.verbosityDL > DoglegParams::SILENT);
|
||||
|
||||
// Do Dogleg iteration with either Multifrontal or Sequential elimination
|
||||
DoglegOptimizerImpl::IterationResult result;
|
||||
|
||||
if(params_.elimination == DoglegParams::MULTIFRONTAL) {
|
||||
GaussianBayesTree bt;
|
||||
bt.insert(GaussianJunctionTree(*linear).eliminate(eliminationMethod));
|
||||
result = DoglegOptimizerImpl::Iterate(state_.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, bt, graph_, state_.values, ordering, state_.error, dlVerbose);
|
||||
|
||||
} else if(params_.elimination == DoglegParams::SEQUENTIAL) {
|
||||
GaussianBayesNet::shared_ptr bn = EliminationTree<GaussianFactor>::Create(*linear)->eliminate(eliminationMethod);
|
||||
result = DoglegOptimizerImpl::Iterate(state_.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, graph_, state_.values, ordering, state_.error, dlVerbose);
|
||||
|
||||
} else {
|
||||
throw runtime_error("Optimization parameter is invalid: DoglegParams::elimination");
|
||||
}
|
||||
|
||||
// Maybe show output
|
||||
if(params_.verbosity >= NonlinearOptimizerParams::DELTA) result.dx_d.print("delta");
|
||||
|
||||
// Create new state with new values and new error
|
||||
state_.values = state_.values.retract(result.dx_d, ordering);
|
||||
state_.error = result.f_error;
|
||||
state_.Delta = result.Delta;
|
||||
++state_.iterations;
|
||||
}
|
||||
|
||||
} /* namespace gtsam */
|
|
@ -1,67 +1,154 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file DoglegOptimizer.h
|
||||
* @brief Nonlinear factor graph optimizer using Powell's Dogleg algorithm
|
||||
* @brief
|
||||
* @author Richard Roberts
|
||||
* @created Feb 26, 2012
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/SuccessiveLinearizationOptimizer.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* A class to perform nonlinear optimization using Powell's dogleg algorithm.
|
||||
* This class is functional, meaning every method is \c const, and returns a new
|
||||
* copy of the class.
|
||||
*
|
||||
* \tparam VALUES The Values or TupleValues type to hold the values to be
|
||||
* estimated.
|
||||
*
|
||||
* \tparam GAUSSIAN_SOLVER The linear solver to use at each iteration,
|
||||
* currently either GaussianSequentialSolver or GaussianMultifrontalSolver.
|
||||
* The latter is typically faster, especially for non-trivial problems.
|
||||
class DoglegOptimizer;
|
||||
|
||||
/** Parameters for Levenberg-Marquardt optimization. Note that this parameters
|
||||
* class inherits from NonlinearOptimizerParams, which specifies the parameters
|
||||
* common to all nonlinear optimization algorithms. This class also contains
|
||||
* all of those parameters.
|
||||
*/
|
||||
template<class VALUES, class GAUSSIAN_SOLVER>
|
||||
class DoglegOptimizer {
|
||||
class DoglegParams : public SuccessiveLinearizationParams {
|
||||
public:
|
||||
/** See DoglegParams::dlVerbosity */
|
||||
enum VerbosityDL {
|
||||
SILENT,
|
||||
VERBOSE
|
||||
};
|
||||
|
||||
double deltaInitial; ///< The initial trust region radius (default: 1.0)
|
||||
VerbosityDL verbosityDL; ///< The verbosity level for Dogleg (default: SILENT), see also NonlinearOptimizerParams::verbosity
|
||||
|
||||
DoglegParams() :
|
||||
deltaInitial(1.0), verbosityDL(SILENT) {}
|
||||
|
||||
virtual ~DoglegParams() {}
|
||||
|
||||
virtual void print(const std::string& str = "") const {
|
||||
SuccessiveLinearizationParams::print(str);
|
||||
std::cout << " deltaInitial: " << deltaInitial << "\n";
|
||||
std::cout.flush();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* State for DoglegOptimizer
|
||||
*/
|
||||
class DoglegState : public NonlinearOptimizerState {
|
||||
public:
|
||||
|
||||
double Delta;
|
||||
|
||||
DoglegState() {}
|
||||
|
||||
virtual ~DoglegState() {}
|
||||
|
||||
protected:
|
||||
DoglegState(const NonlinearFactorGraph& graph, const Values& values, const DoglegParams& params, unsigned int iterations = 0) :
|
||||
NonlinearOptimizerState(graph, values, iterations), Delta(params.deltaInitial) {}
|
||||
|
||||
typedef DoglegOptimizer<VALUES, GAUSSIAN_SOLVER> This; ///< Typedef to this class
|
||||
friend class DoglegOptimizer;
|
||||
};
|
||||
|
||||
const sharedGraph graph_;
|
||||
const sharedValues values_;
|
||||
const double error_;
|
||||
/**
|
||||
* This class performs Dogleg nonlinear optimization
|
||||
*/
|
||||
class DoglegOptimizer : public NonlinearOptimizer {
|
||||
|
||||
public:
|
||||
|
||||
typedef VALUES ValuesType; ///< Typedef of the VALUES template parameter
|
||||
typedef GAUSSIAN_SOLVER SolverType; ///< Typedef of the GAUSSIAN_SOLVER template parameter
|
||||
typedef NonlinearFactorGraph<VALUES> GraphType; ///< A nonlinear factor graph templated on ValuesType
|
||||
typedef boost::shared_ptr<DoglegOptimizer> shared_ptr;
|
||||
|
||||
typedef boost::shared_ptr<const GraphType> sharedGraph; ///< A shared_ptr to GraphType
|
||||
typedef boost::shared_ptr<const ValuesType> sharedValues; ///< A shared_ptr to ValuesType
|
||||
/// @name Standard interface
|
||||
/// @{
|
||||
|
||||
|
||||
/**
|
||||
* Construct a DoglegOptimizer from the factor graph to optimize and the
|
||||
* initial estimate of the variable values, using the default variable
|
||||
* ordering method, currently COLAMD.
|
||||
* @param graph The factor graph to optimize
|
||||
* @param initialization An initial estimate of the variable values
|
||||
/** Standard constructor, requires a nonlinear factor graph, initial
|
||||
* variable assignments, and optimization parameters. For convenience this
|
||||
* version takes plain objects instead of shared pointers, but internally
|
||||
* copies the objects.
|
||||
* @param graph The nonlinear factor graph to optimize
|
||||
* @param initialValues The initial variable assignments
|
||||
* @param params The optimization parameters
|
||||
*/
|
||||
DoglegOptimizer(sharedGraph graph, sharedValues initialization);
|
||||
DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
|
||||
const DoglegParams& params = DoglegParams()) :
|
||||
NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph, initialValues)), state_(graph, initialValues, params_) {}
|
||||
|
||||
/**
|
||||
* Construct a DoglegOptimizer from the factor graph to optimize and the
|
||||
* initial estimate of the variable values, using the default variable
|
||||
* ordering method, currently COLAMD. (this non-shared-pointer version
|
||||
* incurs a performance hit for copying, see DoglegOptimizer(sharedGraph, sharedValues)).
|
||||
* @param graph The factor graph to optimize
|
||||
* @param initialization An initial estimate of the variable values
|
||||
/** Standard constructor, requires a nonlinear factor graph, initial
|
||||
* variable assignments, and optimization parameters. For convenience this
|
||||
* version takes plain objects instead of shared pointers, but internally
|
||||
* copies the objects.
|
||||
* @param graph The nonlinear factor graph to optimize
|
||||
* @param initialValues The initial variable assignments
|
||||
* @param params The optimization parameters
|
||||
*/
|
||||
DoglegOptimizer(const GraphType& graph, const ValuesType& initialization);
|
||||
DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) :
|
||||
NonlinearOptimizer(graph) {
|
||||
params_.ordering = ordering;
|
||||
state_ = DoglegState(graph, initialValues, params_); }
|
||||
|
||||
/// @}
|
||||
|
||||
/// @name Advanced interface
|
||||
/// @{
|
||||
|
||||
/** Virtual destructor */
|
||||
virtual ~DoglegOptimizer() {}
|
||||
|
||||
/** Perform a single iteration, returning a new NonlinearOptimizer class
|
||||
* containing the updated variable assignments, which may be retrieved with
|
||||
* values().
|
||||
*/
|
||||
virtual void iterate();
|
||||
|
||||
/** Access the parameters */
|
||||
const DoglegParams& params() const { return params_; }
|
||||
|
||||
/** Access the last state */
|
||||
const DoglegState& state() const { return state_; }
|
||||
|
||||
/** Access the current trust region radius Delta */
|
||||
double Delta() const { return state_.Delta; }
|
||||
|
||||
/// @}
|
||||
|
||||
protected:
|
||||
DoglegParams params_;
|
||||
DoglegState state_;
|
||||
|
||||
/** Access the parameters (base class version) */
|
||||
virtual const NonlinearOptimizerParams& _params() const { return params_; }
|
||||
|
||||
/** Access the state (base class version) */
|
||||
virtual const NonlinearOptimizerState& _state() const { return state_; }
|
||||
|
||||
/** Internal function for computing a COLAMD ordering if no ordering is specified */
|
||||
DoglegParams ensureHasOrdering(DoglegParams params, const NonlinearFactorGraph& graph, const Values& values) const {
|
||||
if(!params.ordering)
|
||||
params.ordering = *graph.orderingCOLAMD(values);
|
||||
return params;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file GaussNewtonOptimizer.cpp
|
||||
* @brief
|
||||
* @author Richard Roberts
|
||||
* @created Feb 26, 2012
|
||||
*/
|
||||
|
||||
#include <gtsam/inference/EliminationTree.h>
|
||||
#include <gtsam/linear/GaussianJunctionTree.h>
|
||||
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
void GaussNewtonOptimizer::iterate() {
|
||||
|
||||
const NonlinearOptimizerState& current = state_;
|
||||
|
||||
// Linearize graph
|
||||
GaussianFactorGraph::shared_ptr linear = graph_.linearize(current.values, *params_.ordering);
|
||||
|
||||
// Optimize
|
||||
VectorValues delta;
|
||||
{
|
||||
GaussianFactorGraph::Eliminate eliminationMethod = params_.getEliminationFunction();
|
||||
if(params_.elimination == GaussNewtonParams::MULTIFRONTAL)
|
||||
delta = GaussianJunctionTree(*linear).optimize(eliminationMethod);
|
||||
else if(params_.elimination == GaussNewtonParams::SEQUENTIAL)
|
||||
delta = gtsam::optimize(*EliminationTree<GaussianFactor>::Create(*linear)->eliminate(eliminationMethod));
|
||||
else
|
||||
throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::elimination");
|
||||
}
|
||||
|
||||
// Maybe show output
|
||||
if(params_.verbosity >= NonlinearOptimizerParams::DELTA) delta.print("delta");
|
||||
|
||||
// Create new state with new values and new error
|
||||
state_.values = current.values.retract(delta, *params_.ordering);
|
||||
state_.error = graph_.error(state_.values);
|
||||
++ state_.iterations;
|
||||
}
|
||||
|
||||
} /* namespace gtsam */
|
|
@ -0,0 +1,117 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file GaussNewtonOptimizer.h
|
||||
* @brief
|
||||
* @author Richard Roberts
|
||||
* @created Feb 26, 2012
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/nonlinear/SuccessiveLinearizationOptimizer.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
class GaussNewtonOptimizer;
|
||||
|
||||
/** Parameters for Gauss-Newton optimization, inherits from
|
||||
* NonlinearOptimizationParams.
|
||||
*/
|
||||
class GaussNewtonParams : public SuccessiveLinearizationParams {
|
||||
};
|
||||
|
||||
class GaussNewtonState : public NonlinearOptimizerState {
|
||||
protected:
|
||||
GaussNewtonState(const NonlinearFactorGraph& graph, const Values& values, unsigned int iterations = 0) :
|
||||
NonlinearOptimizerState(graph, values, iterations) {}
|
||||
|
||||
friend class GaussNewtonOptimizer;
|
||||
};
|
||||
|
||||
/**
|
||||
* This class performs Gauss-Newton nonlinear optimization
|
||||
*/
|
||||
class GaussNewtonOptimizer : public NonlinearOptimizer {
|
||||
|
||||
public:
|
||||
|
||||
/// @name Standard interface
|
||||
/// @{
|
||||
|
||||
/** Standard constructor, requires a nonlinear factor graph, initial
|
||||
* variable assignments, and optimization parameters. For convenience this
|
||||
* version takes plain objects instead of shared pointers, but internally
|
||||
* copies the objects.
|
||||
* @param graph The nonlinear factor graph to optimize
|
||||
* @param initialValues The initial variable assignments
|
||||
* @param params The optimization parameters
|
||||
*/
|
||||
GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
|
||||
const GaussNewtonParams& params = GaussNewtonParams()) :
|
||||
NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph, initialValues)), state_(graph, initialValues) {}
|
||||
|
||||
/** Standard constructor, requires a nonlinear factor graph, initial
|
||||
* variable assignments, and optimization parameters. For convenience this
|
||||
* version takes plain objects instead of shared pointers, but internally
|
||||
* copies the objects.
|
||||
* @param graph The nonlinear factor graph to optimize
|
||||
* @param initialValues The initial variable assignments
|
||||
* @param params The optimization parameters
|
||||
*/
|
||||
GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) :
|
||||
NonlinearOptimizer(graph), state_(graph, initialValues) {
|
||||
params_.ordering = ordering; }
|
||||
|
||||
/// @}
|
||||
|
||||
/// @name Advanced interface
|
||||
/// @{
|
||||
|
||||
/** Virtual destructor */
|
||||
virtual ~GaussNewtonOptimizer() {}
|
||||
|
||||
/** Perform a single iteration, returning a new NonlinearOptimizer class
|
||||
* containing the updated variable assignments, which may be retrieved with
|
||||
* values().
|
||||
*/
|
||||
virtual void iterate();
|
||||
|
||||
/** Access the parameters */
|
||||
const GaussNewtonParams& params() const { return params_; }
|
||||
|
||||
/** Access the last state */
|
||||
const GaussNewtonState& state() const { return state_; }
|
||||
|
||||
/// @}
|
||||
|
||||
protected:
|
||||
|
||||
GaussNewtonParams params_;
|
||||
GaussNewtonState state_;
|
||||
|
||||
/** Access the parameters (base class version) */
|
||||
virtual const NonlinearOptimizerParams& _params() const { return params_; }
|
||||
|
||||
/** Access the state (base class version) */
|
||||
virtual const NonlinearOptimizerState& _state() const { return state_; }
|
||||
|
||||
/** Internal function for computing a COLAMD ordering if no ordering is specified */
|
||||
GaussNewtonParams ensureHasOrdering(GaussNewtonParams params, const NonlinearFactorGraph& graph, const Values& values) const {
|
||||
if(!params.ordering)
|
||||
params.ordering = *graph.orderingCOLAMD(values);
|
||||
return params;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file LevenbergMarquardtOptimizer.cpp
|
||||
* @brief
|
||||
* @author Richard Roberts
|
||||
* @created Feb 26, 2012
|
||||
*/
|
||||
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
|
||||
#include <gtsam/base/cholesky.h> // For NegativeMatrixException
|
||||
#include <gtsam/inference/EliminationTree.h>
|
||||
#include <gtsam/linear/GaussianJunctionTree.h>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
void LevenbergMarquardtOptimizer::iterate() {
|
||||
|
||||
// Linearize graph
|
||||
GaussianFactorGraph::shared_ptr linear = graph_.linearize(state_.values, *params_.ordering);
|
||||
|
||||
// Get elimination method
|
||||
GaussianFactorGraph::Eliminate eliminationMethod = params_.getEliminationFunction();
|
||||
|
||||
// Pull out parameters we'll use
|
||||
const NonlinearOptimizerParams::Verbosity nloVerbosity = params_.verbosity;
|
||||
const LevenbergMarquardtParams::VerbosityLM lmVerbosity = params_.verbosityLM;
|
||||
|
||||
// Keep increasing lambda until we make make progress
|
||||
while(true) {
|
||||
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA)
|
||||
cout << "trying lambda = " << state_.lambda << endl;
|
||||
|
||||
// Add prior-factors
|
||||
// TODO: replace this dampening with a backsubstitution approach
|
||||
GaussianFactorGraph dampedSystem(*linear);
|
||||
{
|
||||
double sigma = 1.0 / sqrt(state_.lambda);
|
||||
dampedSystem.reserve(dampedSystem.size() + dimensions_.size());
|
||||
// for each of the variables, add a prior
|
||||
for(Index j=0; j<dimensions_.size(); ++j) {
|
||||
size_t dim = (dimensions_)[j];
|
||||
Matrix A = eye(dim);
|
||||
Vector b = zero(dim);
|
||||
SharedDiagonal model = noiseModel::Isotropic::Sigma(dim,sigma);
|
||||
GaussianFactor::shared_ptr prior(new JacobianFactor(j, A, b, model));
|
||||
dampedSystem.push_back(prior);
|
||||
}
|
||||
}
|
||||
if (lmVerbosity >= LevenbergMarquardtParams::DAMPED) dampedSystem.print("damped");
|
||||
|
||||
// Try solving
|
||||
try {
|
||||
|
||||
// Optimize
|
||||
VectorValues delta;
|
||||
if(params_.elimination == SuccessiveLinearizationParams::MULTIFRONTAL)
|
||||
delta = GaussianJunctionTree(dampedSystem).optimize(eliminationMethod);
|
||||
else if(params_.elimination == SuccessiveLinearizationParams::SEQUENTIAL)
|
||||
delta = gtsam::optimize(*EliminationTree<GaussianFactor>::Create(dampedSystem)->eliminate(eliminationMethod));
|
||||
else
|
||||
throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination");
|
||||
|
||||
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "linear delta norm = " << delta.vector().norm() << endl;
|
||||
if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA) delta.print("delta");
|
||||
|
||||
// update values
|
||||
Values newValues = state_.values.retract(delta, *params_.ordering);
|
||||
|
||||
// create new optimization state with more adventurous lambda
|
||||
double error = graph_.error(newValues);
|
||||
|
||||
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "next error = " << error << endl;
|
||||
|
||||
if(error <= state_.error) {
|
||||
state_.values.swap(newValues);
|
||||
state_.error = error;
|
||||
state_.lambda /= params_.lambdaFactor;
|
||||
break;
|
||||
} else {
|
||||
// Either we're not cautious, or the same lambda was worse than the current error.
|
||||
// The more adventurous lambda was worse too, so make lambda more conservative
|
||||
// and keep the same values.
|
||||
if(state_.lambda >= params_.lambdaUpperBound) {
|
||||
if(nloVerbosity >= NonlinearOptimizerParams::ERROR)
|
||||
cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl;
|
||||
break;
|
||||
} else {
|
||||
state_.lambda *= params_.lambdaFactor;
|
||||
}
|
||||
}
|
||||
} catch(const NegativeMatrixException& e) {
|
||||
if(lmVerbosity >= LevenbergMarquardtParams::LAMBDA)
|
||||
cout << "Negative matrix, increasing lambda" << endl;
|
||||
// Either we're not cautious, or the same lambda was worse than the current error.
|
||||
// The more adventurous lambda was worse too, so make lambda more conservative
|
||||
// and keep the same values.
|
||||
if(state_.lambda >= params_.lambdaUpperBound) {
|
||||
if(nloVerbosity >= NonlinearOptimizerParams::ERROR)
|
||||
cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl;
|
||||
break;
|
||||
} else {
|
||||
state_.lambda *= params_.lambdaFactor;
|
||||
}
|
||||
} catch(...) {
|
||||
throw;
|
||||
}
|
||||
} // end while
|
||||
|
||||
// Increment the iteration counter
|
||||
++state_.iterations;
|
||||
}
|
||||
|
||||
} /* namespace gtsam */
|
|
@ -0,0 +1,165 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file LevenbergMarquardtOptimizer.h
|
||||
* @brief
|
||||
* @author Richard Roberts
|
||||
* @created Feb 26, 2012
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/nonlinear/SuccessiveLinearizationOptimizer.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
class LevenbergMarquardtOptimizer;
|
||||
|
||||
/** Parameters for Levenberg-Marquardt optimization. Note that this parameters
|
||||
* class inherits from NonlinearOptimizerParams, which specifies the parameters
|
||||
* common to all nonlinear optimization algorithms. This class also contains
|
||||
* all of those parameters.
|
||||
*/
|
||||
class LevenbergMarquardtParams : public SuccessiveLinearizationParams {
|
||||
public:
|
||||
/** See LevenbergMarquardtParams::lmVerbosity */
|
||||
enum VerbosityLM {
|
||||
SILENT,
|
||||
LAMBDA,
|
||||
TRYLAMBDA,
|
||||
TRYCONFIG,
|
||||
TRYDELTA,
|
||||
DAMPED
|
||||
};
|
||||
|
||||
double lambdaInitial; ///< The initial Levenberg-Marquardt damping term (default: 1e-5)
|
||||
double lambdaFactor; ///< The amount by which to multiply or divide lambda when adjusting lambda (default: 10.0)
|
||||
double lambdaUpperBound; ///< The maximum lambda to try before assuming the optimization has failed (default: 1e5)
|
||||
VerbosityLM verbosityLM; ///< The verbosity level for Levenberg-Marquardt (default: SILENT), see also NonlinearOptimizerParams::verbosity
|
||||
|
||||
LevenbergMarquardtParams() :
|
||||
lambdaInitial(1e-5), lambdaFactor(10.0), lambdaUpperBound(1e5), verbosityLM(SILENT) {}
|
||||
|
||||
virtual ~LevenbergMarquardtParams() {}
|
||||
|
||||
virtual void print(const std::string& str = "") const {
|
||||
SuccessiveLinearizationParams::print(str);
|
||||
std::cout << " lambdaInitial: " << lambdaInitial << "\n";
|
||||
std::cout << " lambdaFactor: " << lambdaFactor << "\n";
|
||||
std::cout << " lambdaUpperBound: " << lambdaUpperBound << "\n";
|
||||
std::cout.flush();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* State for LevenbergMarquardtOptimizer
|
||||
*/
|
||||
class LevenbergMarquardtState : public NonlinearOptimizerState {
|
||||
public:
|
||||
|
||||
double lambda;
|
||||
|
||||
LevenbergMarquardtState() {}
|
||||
|
||||
virtual ~LevenbergMarquardtState() {}
|
||||
|
||||
protected:
|
||||
LevenbergMarquardtState(const NonlinearFactorGraph& graph, const Values& initialValues, const LevenbergMarquardtParams& params, unsigned int iterations = 0) :
|
||||
NonlinearOptimizerState(graph, initialValues, iterations), lambda(params.lambdaInitial) {}
|
||||
|
||||
friend class LevenbergMarquardtOptimizer;
|
||||
};
|
||||
|
||||
/**
|
||||
* This class performs Levenberg-Marquardt nonlinear optimization
|
||||
*/
|
||||
class LevenbergMarquardtOptimizer : public NonlinearOptimizer {
|
||||
|
||||
public:
|
||||
|
||||
typedef boost::shared_ptr<LevenbergMarquardtOptimizer> shared_ptr;
|
||||
|
||||
/// @name Standard interface
|
||||
/// @{
|
||||
|
||||
/** Standard constructor, requires a nonlinear factor graph, initial
|
||||
* variable assignments, and optimization parameters. For convenience this
|
||||
* version takes plain objects instead of shared pointers, but internally
|
||||
* copies the objects.
|
||||
* @param graph The nonlinear factor graph to optimize
|
||||
* @param initialValues The initial variable assignments
|
||||
* @param params The optimization parameters
|
||||
*/
|
||||
LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
|
||||
const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) :
|
||||
NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph, initialValues)),
|
||||
state_(graph, initialValues, params_), dimensions_(initialValues.dims(*params_.ordering)) {}
|
||||
|
||||
/** Standard constructor, requires a nonlinear factor graph, initial
|
||||
* variable assignments, and optimization parameters. For convenience this
|
||||
* version takes plain objects instead of shared pointers, but internally
|
||||
* copies the objects.
|
||||
* @param graph The nonlinear factor graph to optimize
|
||||
* @param initialValues The initial variable assignments
|
||||
* @param params The optimization parameters
|
||||
*/
|
||||
LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) :
|
||||
NonlinearOptimizer(graph), dimensions_(initialValues.dims(ordering)) {
|
||||
params_.ordering = ordering;
|
||||
state_ = LevenbergMarquardtState(graph, initialValues, params_); }
|
||||
|
||||
/** Access the current damping value */
|
||||
double lambda() const { return state_.lambda; }
|
||||
|
||||
/// @}
|
||||
|
||||
/// @name Advanced interface
|
||||
/// @{
|
||||
|
||||
/** Virtual destructor */
|
||||
virtual ~LevenbergMarquardtOptimizer() {}
|
||||
|
||||
/** Perform a single iteration, returning a new NonlinearOptimizer class
|
||||
* containing the updated variable assignments, which may be retrieved with
|
||||
* values().
|
||||
*/
|
||||
virtual void iterate();
|
||||
|
||||
/** Access the parameters */
|
||||
const LevenbergMarquardtParams& params() const { return params_; }
|
||||
|
||||
/** Access the last state */
|
||||
const LevenbergMarquardtState& state() const { return state_; }
|
||||
|
||||
/// @}
|
||||
|
||||
protected:
|
||||
|
||||
LevenbergMarquardtParams params_;
|
||||
LevenbergMarquardtState state_;
|
||||
std::vector<size_t> dimensions_;
|
||||
|
||||
/** Access the parameters (base class version) */
|
||||
virtual const NonlinearOptimizerParams& _params() const { return params_; }
|
||||
|
||||
/** Access the state (base class version) */
|
||||
virtual const NonlinearOptimizerState& _state() const { return state_; }
|
||||
|
||||
/** Internal function for computing a COLAMD ordering if no ordering is specified */
|
||||
LevenbergMarquardtParams ensureHasOrdering(LevenbergMarquardtParams params, const NonlinearFactorGraph& graph, const Values& values) const {
|
||||
if(!params.ordering)
|
||||
params.ordering = *graph.orderingCOLAMD(values);
|
||||
return params;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,143 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file Marginals.cpp
|
||||
* @brief
|
||||
* @author Richard Roberts
|
||||
* @date May 14, 2012
|
||||
*/
|
||||
|
||||
#include <gtsam/3rdparty/Eigen/Eigen/Dense>
|
||||
#include <gtsam/linear/GaussianSequentialSolver.h>
|
||||
#include <gtsam/linear/GaussianMultifrontalSolver.h>
|
||||
#include <gtsam/nonlinear/Marginals.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
Marginals::Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization) {
|
||||
|
||||
// Compute COLAMD ordering
|
||||
ordering_ = *graph.orderingCOLAMD(solution);
|
||||
|
||||
// Linearize graph
|
||||
graph_ = *graph.linearize(solution, ordering_);
|
||||
|
||||
// Store values
|
||||
values_ = solution;
|
||||
|
||||
// Compute BayesTree
|
||||
factorization_ = factorization;
|
||||
if(factorization_ == CHOLESKY)
|
||||
bayesTree_ = *GaussianMultifrontalSolver(graph_, false).eliminate();
|
||||
else if(factorization_ == QR)
|
||||
bayesTree_ = *GaussianMultifrontalSolver(graph_, true).eliminate();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix Marginals::marginalCovariance(Key variable) const {
|
||||
return marginalInformation(variable).inverse();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix Marginals::marginalInformation(Key variable) const {
|
||||
// Get linear key
|
||||
Index index = ordering_[variable];
|
||||
|
||||
// Compute marginal
|
||||
GaussianFactor::shared_ptr marginalFactor;
|
||||
if(factorization_ == CHOLESKY)
|
||||
marginalFactor = bayesTree_.marginalFactor(index, EliminatePreferCholesky);
|
||||
else if(factorization_ == QR)
|
||||
marginalFactor = bayesTree_.marginalFactor(index, EliminateQR);
|
||||
|
||||
// Get information matrix (only store upper-right triangle)
|
||||
if(typeid(*marginalFactor) == typeid(JacobianFactor)) {
|
||||
JacobianFactor::constABlock A = static_cast<const JacobianFactor&>(*marginalFactor).getA();
|
||||
return A.transpose() * A; // Compute A'A
|
||||
} else if(typeid(*marginalFactor) == typeid(HessianFactor)) {
|
||||
const HessianFactor& hessian = static_cast<const HessianFactor&>(*marginalFactor);
|
||||
const size_t dim = hessian.getDim(hessian.begin());
|
||||
return hessian.info().topLeftCorner(dim,dim).selfadjointView<Eigen::Upper>(); // Take the non-augmented part of the information matrix
|
||||
} else {
|
||||
throw runtime_error("Internal error: Marginals::marginalInformation expected either a JacobianFactor or HessianFactor");
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JointMarginal Marginals::jointMarginalCovariance(const std::vector<Key>& variables) const {
|
||||
JointMarginal info = jointMarginalInformation(variables);
|
||||
info.fullMatrix_ = info.fullMatrix_.inverse();
|
||||
return info;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JointMarginal Marginals::jointMarginalInformation(const std::vector<Key>& variables) const {
|
||||
|
||||
// If 2 variables, we can use the BayesTree::joint function, otherwise we
|
||||
// have to use sequential elimination.
|
||||
if(variables.size() == 1) {
|
||||
Matrix info = marginalInformation(variables.front());
|
||||
std::vector<size_t> dims;
|
||||
dims.push_back(info.rows());
|
||||
Ordering indices;
|
||||
indices.insert(variables.front(), 0);
|
||||
return JointMarginal(info, dims, indices);
|
||||
|
||||
} else {
|
||||
// Convert keys to linear indices
|
||||
vector<Index> indices(variables.size());
|
||||
for(size_t i=0; i<variables.size(); ++i) { indices[i] = ordering_[variables[i]]; }
|
||||
|
||||
// Compute joint factor graph
|
||||
GaussianFactorGraph jointFG;
|
||||
if(variables.size() == 2) {
|
||||
if(factorization_ == CHOLESKY)
|
||||
jointFG = *bayesTree_.joint(indices[0], indices[1], EliminatePreferCholesky);
|
||||
else if(factorization_ == QR)
|
||||
jointFG = *bayesTree_.joint(indices[0], indices[1], EliminateQR);
|
||||
} else {
|
||||
if(factorization_ == CHOLESKY)
|
||||
jointFG = *GaussianSequentialSolver(graph_, false).jointFactorGraph(indices);
|
||||
else if(factorization_ == QR)
|
||||
jointFG = *GaussianSequentialSolver(graph_, true).jointFactorGraph(indices);
|
||||
}
|
||||
|
||||
// Conversion from variable keys to position in factor graph variables,
|
||||
// which are sorted in index order.
|
||||
Ordering variableConversion;
|
||||
{
|
||||
FastMap<Index,Key> usedIndices;
|
||||
for(size_t i=0; i<variables.size(); ++i)
|
||||
usedIndices.insert(make_pair(indices[i], variables[i]));
|
||||
size_t slot = 0;
|
||||
typedef pair<Index,Key> Index_Key;
|
||||
BOOST_FOREACH(const Index_Key& index_key, usedIndices) {
|
||||
variableConversion.insert(index_key.second, slot);
|
||||
++ slot;
|
||||
}
|
||||
}
|
||||
|
||||
// Get dimensions from factor graph
|
||||
std::vector<size_t> dims(indices.size(), 0);
|
||||
for(size_t i = 0; i < variables.size(); ++i)
|
||||
dims[i] = values_.at(variables[i]).dim();
|
||||
|
||||
// Get information matrix
|
||||
Matrix augmentedInfo = jointFG.denseHessian();
|
||||
Matrix info = augmentedInfo.topLeftCorner(augmentedInfo.rows()-1, augmentedInfo.cols()-1);
|
||||
|
||||
return JointMarginal(info, dims, variableConversion);
|
||||
}
|
||||
}
|
||||
|
||||
} /* namespace gtsam */
|
|
@ -0,0 +1,116 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file Marginals.h
|
||||
* @brief A class for computing marginals in a NonlinearFactorGraph
|
||||
* @author Richard Roberts
|
||||
* @date May 14, 2012
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/blockMatrices.h>
|
||||
#include <gtsam/linear/GaussianBayesTree.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
class JointMarginal;
|
||||
|
||||
/**
|
||||
* A class for computing Gaussian marginals of variables in a NonlinearFactorGraph
|
||||
*/
|
||||
class Marginals {
|
||||
|
||||
public:
|
||||
|
||||
/** The linear factorization mode - either CHOLESKY (faster and suitable for most problems) or QR (slower but more numerically stable for poorly-conditioned problems). */
|
||||
enum Factorization {
|
||||
CHOLESKY,
|
||||
QR
|
||||
};
|
||||
|
||||
/** Construct a marginals class.
|
||||
* @param graph The factor graph defining the full joint density on all variables.
|
||||
* @param solution The linearization point about which to compute Gaussian marginals (usually the MLE as obtained from a NonlinearOptimizer).
|
||||
* @param factorization The linear decomposition mode - either Marginals::CHOLESKY (faster and suitable for most problems) or Marginals::QR (slower but more numerically stable for poorly-conditioned problems).
|
||||
*/
|
||||
Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization = CHOLESKY);
|
||||
|
||||
/** Compute the marginal covariance of a single variable */
|
||||
Matrix marginalCovariance(Key variable) const;
|
||||
|
||||
/** Compute the marginal information matrix of a single variable. You can
|
||||
* use LLt(const Matrix&) or RtR(const Matrix&) to obtain the square-root information
|
||||
* matrix. */
|
||||
Matrix marginalInformation(Key variable) const;
|
||||
|
||||
/** Compute the joint marginal covariance of several variables */
|
||||
JointMarginal jointMarginalCovariance(const std::vector<Key>& variables) const;
|
||||
|
||||
/** Compute the joint marginal information of several variables */
|
||||
JointMarginal jointMarginalInformation(const std::vector<Key>& variables) const;
|
||||
|
||||
protected:
|
||||
|
||||
GaussianFactorGraph graph_;
|
||||
Values values_;
|
||||
Ordering ordering_;
|
||||
Factorization factorization_;
|
||||
GaussianBayesTree bayesTree_;
|
||||
};
|
||||
|
||||
/**
|
||||
* A class to store and access a joint marginal, returned from Marginals::jointMarginalCovariance and Marginals::jointMarginalInformation
|
||||
*/
|
||||
class JointMarginal {
|
||||
|
||||
protected:
|
||||
typedef SymmetricBlockView<Matrix> BlockView;
|
||||
|
||||
public:
|
||||
/** A block view of the joint marginal - this stores a reference to the
|
||||
* JointMarginal object, so the JointMarginal object must be kept in scope
|
||||
* while this block view is needed, otherwise assign this block object to a
|
||||
* Matrix to store it.
|
||||
*/
|
||||
typedef BlockView::constBlock Block;
|
||||
|
||||
/** Access a block, corresponding to a pair of variables, of the joint
|
||||
* marginal. Each block is accessed by its "vertical position",
|
||||
* corresponding to the variable with nonlinear Key \c iVariable and
|
||||
* "horizontal position", corresponding to the variable with nonlinear Key
|
||||
* \c jVariable.
|
||||
*
|
||||
* For example, if we have the joint marginal on a 2D pose "x3" and a 2D
|
||||
* landmark "l2", then jointMarginal(Symbol('x',3), Symbol('l',2)) will
|
||||
* return the 3x2 block of the joint covariance matrix corresponding to x3
|
||||
* and l2.
|
||||
* @param iVariable The nonlinear Key specifying the "vertical position" of the requested block
|
||||
* @param jVariable The nonlinear Key specifying the "horizontal position" of the requested block
|
||||
*/
|
||||
Block operator()(Key iVariable, Key jVariable) const {
|
||||
return blockView_(indices_[iVariable], indices_[jVariable]); }
|
||||
|
||||
protected:
|
||||
Matrix fullMatrix_;
|
||||
BlockView blockView_;
|
||||
Ordering indices_;
|
||||
|
||||
JointMarginal(const Matrix& fullMatrix, const std::vector<size_t>& dims, const Ordering& indices) :
|
||||
fullMatrix_(fullMatrix), blockView_(fullMatrix_, dims.begin(), dims.end()), indices_(indices) {}
|
||||
|
||||
friend class Marginals;
|
||||
};
|
||||
|
||||
} /* namespace gtsam */
|
|
@ -1,134 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file NonlinearOptimization-inl.h
|
||||
* @date Oct 17, 2010
|
||||
* @author Kai Ni
|
||||
* @brief Easy interfaces for NonlinearOptimizer
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/linear/GaussianSequentialSolver.h>
|
||||
#include <gtsam/linear/GaussianMultifrontalSolver.h>
|
||||
|
||||
#if ENABLE_SPCG
|
||||
#include <gtsam/linear/SubgraphSolver.h>
|
||||
#endif
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* The Elimination solver
|
||||
*/
|
||||
template<class G>
|
||||
Values optimizeSequential(const G& graph, const Values& initialEstimate,
|
||||
const NonlinearOptimizationParameters& parameters, bool useLM) {
|
||||
|
||||
// Use a variable ordering from COLAMD
|
||||
Ordering::shared_ptr ordering = graph.orderingCOLAMD(initialEstimate);
|
||||
|
||||
// Create an nonlinear Optimizer that uses a Sequential Solver
|
||||
typedef NonlinearOptimizer<G, GaussianFactorGraph, GaussianSequentialSolver> Optimizer;
|
||||
Optimizer optimizer(boost::make_shared<const G>(graph),
|
||||
boost::make_shared<const Values>(initialEstimate), ordering,
|
||||
boost::make_shared<NonlinearOptimizationParameters>(parameters));
|
||||
|
||||
// Now optimize using either LM or GN methods.
|
||||
if (useLM)
|
||||
return *optimizer.levenbergMarquardt().values();
|
||||
else
|
||||
return *optimizer.gaussNewton().values();
|
||||
}
|
||||
|
||||
/**
|
||||
* The multifrontal solver
|
||||
*/
|
||||
template<class G>
|
||||
Values optimizeMultiFrontal(const G& graph, const Values& initialEstimate,
|
||||
const NonlinearOptimizationParameters& parameters, bool useLM) {
|
||||
|
||||
// Use a variable ordering from COLAMD
|
||||
Ordering::shared_ptr ordering = graph.orderingCOLAMD(initialEstimate);
|
||||
|
||||
// Create an nonlinear Optimizer that uses a Multifrontal Solver
|
||||
typedef NonlinearOptimizer<G, GaussianFactorGraph, GaussianMultifrontalSolver> Optimizer;
|
||||
Optimizer optimizer(boost::make_shared<const G>(graph),
|
||||
boost::make_shared<const Values>(initialEstimate), ordering,
|
||||
boost::make_shared<NonlinearOptimizationParameters>(parameters));
|
||||
|
||||
// now optimize using either LM or GN methods
|
||||
if (useLM)
|
||||
return *optimizer.levenbergMarquardt().values();
|
||||
else
|
||||
return *optimizer.gaussNewton().values();
|
||||
}
|
||||
|
||||
#if ENABLE_SPCG
|
||||
/**
|
||||
* The sparse preconditioned conjugate gradient solver
|
||||
*/
|
||||
template<class G>
|
||||
Values optimizeSPCG(const G& graph, const Values& initialEstimate,
|
||||
const NonlinearOptimizationParameters& parameters = NonlinearOptimizationParameters(),
|
||||
bool useLM = true) {
|
||||
|
||||
// initial optimization state is the same in both cases tested
|
||||
typedef SubgraphSolver<G,GaussianFactorGraph,Values> Solver;
|
||||
typedef boost::shared_ptr<Solver> shared_Solver;
|
||||
typedef NonlinearOptimizer<G, GaussianFactorGraph, Solver> SPCGOptimizer;
|
||||
shared_Solver solver = boost::make_shared<Solver>(
|
||||
graph, initialEstimate, IterativeOptimizationParameters());
|
||||
SPCGOptimizer optimizer(
|
||||
boost::make_shared<const G>(graph),
|
||||
boost::make_shared<const Values>(initialEstimate),
|
||||
solver->ordering(),
|
||||
solver,
|
||||
boost::make_shared<NonlinearOptimizationParameters>(parameters));
|
||||
|
||||
// choose nonlinear optimization method
|
||||
if (useLM)
|
||||
return *optimizer.levenbergMarquardt().values();
|
||||
else
|
||||
return *optimizer.gaussNewton().values();
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* optimization that returns the values
|
||||
*/
|
||||
template<class G>
|
||||
Values optimize(const G& graph, const Values& initialEstimate, const NonlinearOptimizationParameters& parameters,
|
||||
const LinearSolver& solver,
|
||||
const NonlinearOptimizationMethod& nonlinear_method) {
|
||||
switch (solver) {
|
||||
case SEQUENTIAL:
|
||||
return optimizeSequential<G>(graph, initialEstimate, parameters,
|
||||
nonlinear_method == LM);
|
||||
case MULTIFRONTAL:
|
||||
return optimizeMultiFrontal<G>(graph, initialEstimate, parameters,
|
||||
nonlinear_method == LM);
|
||||
#if ENABLE_SPCG
|
||||
case SPCG:
|
||||
// return optimizeSPCG<G,Values>(graph, initialEstimate, parameters,
|
||||
// nonlinear_method == LM);
|
||||
throw runtime_error("optimize: SPCG not supported yet due to the specific pose constraint");
|
||||
#endif
|
||||
}
|
||||
throw runtime_error("optimize: undefined solver");
|
||||
}
|
||||
|
||||
} //namespace gtsam
|
|
@ -1,56 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file NonlinearOptimization.h
|
||||
* @date Oct 14, 2010
|
||||
* @author Kai Ni
|
||||
* @brief Easy interfaces for NonlinearOptimizer
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizationParameters.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* all the nonlinear optimization methods
|
||||
*/
|
||||
typedef enum {
|
||||
LM, // Levenberg Marquardt
|
||||
GN // Gaussian-Newton
|
||||
} NonlinearOptimizationMethod;
|
||||
|
||||
/**
|
||||
* all the linear solver types
|
||||
*/
|
||||
typedef enum {
|
||||
SEQUENTIAL, // Sequential elimination
|
||||
MULTIFRONTAL, // Multi-frontal elimination
|
||||
#if ENABLE_SPCG
|
||||
SPCG, // Subgraph Preconditioned Conjugate Gradient
|
||||
#endif
|
||||
} LinearSolver;
|
||||
|
||||
/**
|
||||
* optimization that returns the values
|
||||
*/
|
||||
template<class G>
|
||||
Values optimize(const G& graph, const Values& initialEstimate,
|
||||
const NonlinearOptimizationParameters& parameters = NonlinearOptimizationParameters(),
|
||||
const LinearSolver& solver = MULTIFRONTAL,
|
||||
const NonlinearOptimizationMethod& nonlinear_method = LM);
|
||||
|
||||
}
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearOptimization-inl.h>
|
|
@ -1,121 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file NonlinearOptimizationParameters.cpp
|
||||
* @date Jan 28, 2012
|
||||
* @author Alex Cunningham
|
||||
* @brief Implements parameters structure
|
||||
*/
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include <boost/make_shared.hpp>
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearOptimizationParameters.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
using namespace std;
|
||||
|
||||
/* ************************************************************************* */
|
||||
NonlinearOptimizationParameters::NonlinearOptimizationParameters() :
|
||||
absDecrease_(1e-6), relDecrease_(1e-6), sumError_(0.0), maxIterations_(
|
||||
100), lambda_(1e-5), lambdaFactor_(10.0), verbosity_(SILENT), lambdaMode_(
|
||||
BOUNDED), useQR_(false) {
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
NonlinearOptimizationParameters::NonlinearOptimizationParameters(double absDecrease, double relDecrease,
|
||||
double sumError, int iIters, double lambda,
|
||||
double lambdaFactor, verbosityLevel v,
|
||||
LambdaMode lambdaMode, bool useQR) :
|
||||
absDecrease_(absDecrease), relDecrease_(relDecrease), sumError_(
|
||||
sumError), maxIterations_(iIters), lambda_(lambda), lambdaFactor_(
|
||||
lambdaFactor), verbosity_(v), lambdaMode_(lambdaMode), useQR_(useQR) {
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
NonlinearOptimizationParameters::NonlinearOptimizationParameters(
|
||||
const NonlinearOptimizationParameters ¶meters) :
|
||||
absDecrease_(parameters.absDecrease_), relDecrease_(
|
||||
parameters.relDecrease_), sumError_(parameters.sumError_), maxIterations_(
|
||||
parameters.maxIterations_), lambda_(parameters.lambda_), lambdaFactor_(
|
||||
parameters.lambdaFactor_), verbosity_(parameters.verbosity_), lambdaMode_(
|
||||
parameters.lambdaMode_), useQR_(parameters.useQR_) {
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void NonlinearOptimizationParameters::print(const std::string& s) const {
|
||||
cout << "NonlinearOptimizationParameters " << s << endl;
|
||||
cout << "absolute decrease threshold: " << absDecrease_ << endl;
|
||||
cout << "relative decrease threshold: " << relDecrease_ << endl;
|
||||
cout << " error sum threshold: " << sumError_ << endl;
|
||||
cout << " maximum nr. of iterations: " << maxIterations_ << endl;
|
||||
cout << " initial lambda value: " << lambda_ << endl;
|
||||
cout << " factor to multiply lambda: " << lambdaFactor_ << endl;
|
||||
cout << " verbosity level: " << verbosity_ << endl;
|
||||
cout << " lambda mode: " << lambdaMode_ << endl;
|
||||
cout << " use QR: " << useQR_ << endl;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
NonlinearOptimizationParameters::shared_ptr
|
||||
NonlinearOptimizationParameters::newLambda_(double lambda) const {
|
||||
shared_ptr ptr(
|
||||
boost::make_shared < NonlinearOptimizationParameters > (*this));
|
||||
ptr->lambda_ = lambda;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
NonlinearOptimizationParameters::shared_ptr
|
||||
NonlinearOptimizationParameters::newVerbosity(verbosityLevel verbosity) {
|
||||
shared_ptr ptr(boost::make_shared<NonlinearOptimizationParameters>());
|
||||
ptr->verbosity_ = verbosity;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
NonlinearOptimizationParameters::shared_ptr
|
||||
NonlinearOptimizationParameters::newMaxIterations(int maxIterations) {
|
||||
shared_ptr ptr(boost::make_shared<NonlinearOptimizationParameters>());
|
||||
ptr->maxIterations_ = maxIterations;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
NonlinearOptimizationParameters::shared_ptr
|
||||
NonlinearOptimizationParameters::newLambda(double lambda) {
|
||||
shared_ptr ptr(boost::make_shared<NonlinearOptimizationParameters>());
|
||||
ptr->lambda_ = lambda;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
NonlinearOptimizationParameters::shared_ptr
|
||||
NonlinearOptimizationParameters::newDecreaseThresholds(double absDecrease,
|
||||
double relDecrease) {
|
||||
shared_ptr ptr(boost::make_shared<NonlinearOptimizationParameters>());
|
||||
ptr->absDecrease_ = absDecrease;
|
||||
ptr->relDecrease_ = relDecrease;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
NonlinearOptimizationParameters::shared_ptr
|
||||
NonlinearOptimizationParameters::newFactorization(bool useQR) {
|
||||
shared_ptr ptr(boost::make_shared<NonlinearOptimizationParameters>());
|
||||
ptr->useQR_ = useQR;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
} // \namespace gtsam
|
|
@ -1,138 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file NonlinearOptimizationParameters.h
|
||||
* @date Oct 14, 2010
|
||||
* @author Kai Ni
|
||||
* @brief structure to store parameters for nonlinear optimization
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* a container for all related parameters
|
||||
* \nosubgrouping
|
||||
*/
|
||||
struct NonlinearOptimizationParameters {
|
||||
|
||||
typedef boost::shared_ptr<NonlinearOptimizationParameters> shared_ptr;
|
||||
typedef NonlinearOptimizationParameters This;
|
||||
|
||||
double absDecrease_; ///< threshold for the absolute decrease per iteration
|
||||
|
||||
/**
|
||||
* Relative decrease threshold, where relative error = (new-current)/current.
|
||||
* This can be set to 0 if there is a possibility for negative error values.
|
||||
*/
|
||||
double relDecrease_; ///< threshold for the relative decrease per iteration
|
||||
|
||||
double sumError_; ///< threshold for the sum of error
|
||||
size_t maxIterations_; ///< maximum number of iterations
|
||||
double lambda_; ///< starting lambda value
|
||||
double lambdaFactor_; ///< factor by which lambda is multiplied
|
||||
|
||||
/// verbosity levels
|
||||
typedef enum {
|
||||
SILENT,
|
||||
ERROR,
|
||||
LAMBDA,
|
||||
TRYLAMBDA,
|
||||
VALUES,
|
||||
DELTA,
|
||||
TRYCONFIG,
|
||||
TRYDELTA,
|
||||
LINEAR,
|
||||
DAMPED
|
||||
} verbosityLevel;
|
||||
verbosityLevel verbosity_; ///< verbosity
|
||||
|
||||
/// trust-region method mode
|
||||
typedef enum {
|
||||
FAST, BOUNDED, CAUTIOUS
|
||||
} LambdaMode;
|
||||
LambdaMode lambdaMode_; ///<trust-region method mode
|
||||
|
||||
/// if true, solve whole system with QR, otherwise use LDL when possible
|
||||
bool useQR_;
|
||||
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
/// Default constructor
|
||||
NonlinearOptimizationParameters();
|
||||
|
||||
/// Constructor from doubles
|
||||
NonlinearOptimizationParameters(double absDecrease, double relDecrease,
|
||||
double sumError, int iIters = 100, double lambda = 1e-5,
|
||||
double lambdaFactor = 10, verbosityLevel v = SILENT,
|
||||
LambdaMode lambdaMode = BOUNDED, bool useQR = false);
|
||||
|
||||
/// Copy constructor
|
||||
NonlinearOptimizationParameters(
|
||||
const NonlinearOptimizationParameters ¶meters);
|
||||
|
||||
/// @}
|
||||
/// @name Standard Interface
|
||||
/// @{
|
||||
|
||||
/// print
|
||||
void print(const std::string& s="") const;
|
||||
|
||||
/// a copy of old instance except new lambda
|
||||
shared_ptr newLambda_(double lambda) const;
|
||||
|
||||
/// @}
|
||||
/// @name Advanced Interface
|
||||
/// @{
|
||||
|
||||
/// a copy of old instance except new verbosity
|
||||
static shared_ptr newVerbosity(verbosityLevel verbosity);
|
||||
|
||||
/// a copy of old instance except new maxIterations
|
||||
static shared_ptr newMaxIterations(int maxIterations);
|
||||
|
||||
/// a copy of old instance except new lambda
|
||||
static shared_ptr newLambda(double lambda);
|
||||
|
||||
/// a copy of old instance except new thresholds
|
||||
static shared_ptr newDecreaseThresholds(double absDecrease,
|
||||
double relDecrease);
|
||||
|
||||
/// a copy of old instance except new QR flag
|
||||
static shared_ptr newFactorization(bool useQR);
|
||||
|
||||
/// @}
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
template<class ARCHIVE>
|
||||
void serialize(ARCHIVE & ar, const unsigned int version)
|
||||
{
|
||||
ar & BOOST_SERIALIZATION_NVP(absDecrease_);
|
||||
ar & BOOST_SERIALIZATION_NVP(relDecrease_);
|
||||
ar & BOOST_SERIALIZATION_NVP(sumError_);
|
||||
ar & BOOST_SERIALIZATION_NVP(maxIterations_);
|
||||
ar & BOOST_SERIALIZATION_NVP(lambda_);
|
||||
ar & BOOST_SERIALIZATION_NVP(lambdaFactor_);
|
||||
ar & BOOST_SERIALIZATION_NVP(verbosity_);
|
||||
ar & BOOST_SERIALIZATION_NVP(lambdaMode_);
|
||||
ar & BOOST_SERIALIZATION_NVP(useQR_);
|
||||
}
|
||||
};
|
||||
}
|
|
@ -1,356 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file NonlinearOptimizer-inl.h
|
||||
* This is a template definition file, include it where needed (only!)
|
||||
* so that the appropriate code is generated and link errors avoided.
|
||||
* @brief: Encapsulates nonlinear optimization state
|
||||
* @author Frank Dellaert
|
||||
* @date Sep 7, 2009
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
#include <boost/tuple/tuple.hpp>
|
||||
#include <gtsam/base/cholesky.h>
|
||||
#include <gtsam/nonlinear/DoglegOptimizerImpl.h>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class G, class L, class S, class W>
|
||||
NonlinearOptimizer<G, L, S, W>::NonlinearOptimizer(shared_graph graph,
|
||||
shared_values values, shared_ordering ordering, shared_parameters parameters) :
|
||||
graph_(graph), values_(values), error_(graph->error(*values)), ordering_(ordering),
|
||||
parameters_(parameters), iterations_(0),
|
||||
dimensions_(new vector<size_t>(values->dims(*ordering))),
|
||||
structure_(new VariableIndex(*graph->symbolic(*ordering))) {
|
||||
if (!graph) throw std::invalid_argument(
|
||||
"NonlinearOptimizer constructor: graph = NULL");
|
||||
if (!values) throw std::invalid_argument(
|
||||
"NonlinearOptimizer constructor: values = NULL");
|
||||
if (!ordering) throw std::invalid_argument(
|
||||
"NonlinearOptimizer constructor: ordering = NULL");
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// FIXME: remove this constructor
|
||||
template<class G, class L, class S, class W>
|
||||
NonlinearOptimizer<G, L, S, W>::NonlinearOptimizer(shared_graph graph,
|
||||
shared_values values, shared_ordering ordering,
|
||||
shared_solver spcg_solver, shared_parameters parameters) :
|
||||
graph_(graph), values_(values), error_(graph->error(*values)), ordering_(ordering),
|
||||
parameters_(parameters), iterations_(0),
|
||||
dimensions_(new vector<size_t>(values->dims(*ordering))),
|
||||
spcg_solver_(spcg_solver) {
|
||||
if (!graph) throw std::invalid_argument(
|
||||
"NonlinearOptimizer constructor: graph = NULL");
|
||||
if (!values) throw std::invalid_argument(
|
||||
"NonlinearOptimizer constructor: values = NULL");
|
||||
if (!spcg_solver) throw std::invalid_argument(
|
||||
"NonlinearOptimizer constructor: spcg_solver = NULL");
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// One iteration of Gauss Newton
|
||||
/* ************************************************************************* */
|
||||
template<class G, class L, class S, class W>
|
||||
NonlinearOptimizer<G, L, S, W> NonlinearOptimizer<G, L, S, W>::iterate() const {
|
||||
|
||||
Parameters::verbosityLevel verbosity = parameters_->verbosity_ ;
|
||||
|
||||
// FIXME: get rid of spcg solver
|
||||
shared_solver solver;
|
||||
if (spcg_solver_) { // special case for SPCG
|
||||
spcg_solver_->replaceFactors(linearize());
|
||||
solver = spcg_solver_;
|
||||
} else { // normal case
|
||||
solver = createSolver();
|
||||
}
|
||||
|
||||
VectorValues delta = *solver->optimize();
|
||||
|
||||
// maybe show output
|
||||
if (verbosity >= Parameters::DELTA) delta.print("delta");
|
||||
|
||||
// take old values and update it
|
||||
shared_values newValues(new Values(values_->retract(delta, *ordering_)));
|
||||
|
||||
// maybe show output
|
||||
if (verbosity >= Parameters::VALUES) newValues->print("newValues");
|
||||
|
||||
NonlinearOptimizer newOptimizer = newValues_(newValues);
|
||||
++ newOptimizer.iterations_;
|
||||
|
||||
if (verbosity >= Parameters::ERROR) cout << "error: " << newOptimizer.error_ << endl;
|
||||
return newOptimizer;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class G, class L, class S, class W>
|
||||
NonlinearOptimizer<G, L, S, W> NonlinearOptimizer<G, L, S, W>::gaussNewton() const {
|
||||
static W writer(error_);
|
||||
|
||||
if (error_ < parameters_->sumError_ ) {
|
||||
if ( parameters_->verbosity_ >= Parameters::ERROR)
|
||||
cout << "Exiting, as error = " << error_
|
||||
<< " < sumError (" << parameters_->sumError_ << ")" << endl;
|
||||
return *this;
|
||||
}
|
||||
|
||||
// linearize, solve, update
|
||||
NonlinearOptimizer next = iterate();
|
||||
|
||||
writer.write(next.error_);
|
||||
|
||||
// check convergence
|
||||
bool converged = gtsam::check_convergence(*parameters_, error_, next.error_);
|
||||
|
||||
// return converged state or iterate
|
||||
if (converged) return next;
|
||||
else return next.gaussNewton();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Iteratively try to do tempered Gauss-Newton steps until we succeed.
|
||||
// Form damped system with given lambda, and return a new, more optimistic
|
||||
// optimizer if error decreased or iterate with a larger lambda if not.
|
||||
// TODO: in theory we can't infinitely recurse, but maybe we should put a max.
|
||||
// Reminder: the parameters are Graph type $G$, Values class type $T$,
|
||||
// linear system class $L$, the non linear solver type $S$, and the writer type $W$
|
||||
template<class G, class L, class S, class W>
|
||||
NonlinearOptimizer<G, L, S, W> NonlinearOptimizer<G, L, S, W>::try_lambda(const L& linearSystem) {
|
||||
|
||||
const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ;
|
||||
const Parameters::LambdaMode lambdaMode = parameters_->lambdaMode_ ;
|
||||
const double factor = parameters_->lambdaFactor_ ;
|
||||
double lambda = parameters_->lambda_ ;
|
||||
|
||||
if( lambdaMode >= Parameters::CAUTIOUS) throw runtime_error("CAUTIOUS mode not working yet, please use BOUNDED.");
|
||||
|
||||
double next_error = error_;
|
||||
shared_values next_values = values_;
|
||||
|
||||
// Keep increasing lambda until we make make progress
|
||||
while(true) {
|
||||
if (verbosity >= Parameters::TRYLAMBDA) cout << "trying lambda = " << lambda << endl;
|
||||
|
||||
// add prior-factors
|
||||
// TODO: replace this dampening with a backsubstitution approach
|
||||
typename L::shared_ptr dampedSystem(new L(linearSystem));
|
||||
{
|
||||
double sigma = 1.0 / sqrt(lambda);
|
||||
dampedSystem->reserve(dampedSystem->size() + dimensions_->size());
|
||||
// for each of the variables, add a prior
|
||||
for(Index j=0; j<dimensions_->size(); ++j) {
|
||||
size_t dim = (*dimensions_)[j];
|
||||
Matrix A = eye(dim);
|
||||
Vector b = zero(dim);
|
||||
SharedDiagonal model = noiseModel::Isotropic::Sigma(dim,sigma);
|
||||
typename L::sharedFactor prior(new JacobianFactor(j, A, b, model));
|
||||
dampedSystem->push_back(prior);
|
||||
}
|
||||
}
|
||||
if (verbosity >= Parameters::DAMPED) dampedSystem->print("damped");
|
||||
|
||||
// Create a new solver using the damped linear system
|
||||
// FIXME: remove spcg specific code
|
||||
if (spcg_solver_) spcg_solver_->replaceFactors(dampedSystem);
|
||||
shared_solver solver = (spcg_solver_) ? spcg_solver_ : shared_solver(
|
||||
new S(dampedSystem, structure_, parameters_->useQR_));
|
||||
|
||||
// Try solving
|
||||
try {
|
||||
VectorValues delta = *solver->optimize();
|
||||
if (verbosity >= Parameters::TRYLAMBDA) cout << "linear delta norm = " << delta.vector().norm() << endl;
|
||||
if (verbosity >= Parameters::TRYDELTA) delta.print("delta");
|
||||
|
||||
// update values
|
||||
shared_values newValues(new Values(values_->retract(delta, *ordering_)));
|
||||
|
||||
// create new optimization state with more adventurous lambda
|
||||
double error = graph_->error(*newValues);
|
||||
|
||||
if (verbosity >= Parameters::TRYLAMBDA) cout << "next error = " << error << endl;
|
||||
|
||||
if( error <= error_ ) {
|
||||
next_values = newValues;
|
||||
next_error = error;
|
||||
lambda /= factor;
|
||||
break;
|
||||
}
|
||||
else {
|
||||
// Either we're not cautious, or the same lambda was worse than the current error.
|
||||
// The more adventurous lambda was worse too, so make lambda more conservative
|
||||
// and keep the same values.
|
||||
if(lambdaMode >= Parameters::BOUNDED && lambda >= 1.0e5) {
|
||||
if(verbosity >= Parameters::ERROR)
|
||||
cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl;
|
||||
break;
|
||||
} else {
|
||||
lambda *= factor;
|
||||
}
|
||||
}
|
||||
} catch(const NegativeMatrixException& e) {
|
||||
if(verbosity >= Parameters::LAMBDA)
|
||||
cout << "Negative matrix, increasing lambda" << endl;
|
||||
// Either we're not cautious, or the same lambda was worse than the current error.
|
||||
// The more adventurous lambda was worse too, so make lambda more conservative
|
||||
// and keep the same values.
|
||||
if(lambdaMode >= Parameters::BOUNDED && lambda >= 1.0e5) {
|
||||
if(verbosity >= Parameters::ERROR)
|
||||
cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl;
|
||||
break;
|
||||
} else {
|
||||
lambda *= factor;
|
||||
}
|
||||
} catch(...) {
|
||||
throw;
|
||||
}
|
||||
} // end while
|
||||
|
||||
return newValuesErrorLambda_(next_values, next_error, lambda);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// One iteration of Levenberg Marquardt
|
||||
// Reminder: the parameters are Graph type $G$, Values class type $T$,
|
||||
// linear system class $L$, the non linear solver type $S$, and the writer type $W$
|
||||
template<class G, class L, class S, class W>
|
||||
NonlinearOptimizer<G, L, S, W> NonlinearOptimizer<G, L, S, W>::iterateLM() {
|
||||
|
||||
const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ;
|
||||
const double lambda = parameters_->lambda_ ;
|
||||
|
||||
// show output
|
||||
if (verbosity >= Parameters::VALUES) values_->print("values");
|
||||
if (verbosity >= Parameters::ERROR) cout << "error: " << error_ << endl;
|
||||
if (verbosity >= Parameters::LAMBDA) cout << "lambda = " << lambda << endl;
|
||||
|
||||
// linearize all factors once
|
||||
boost::shared_ptr<L> linear(new L(*graph_->linearize(*values_, *ordering_)));
|
||||
|
||||
if (verbosity >= Parameters::LINEAR) linear->print("linear");
|
||||
|
||||
// try lambda steps with successively larger lambda until we achieve descent
|
||||
if (verbosity >= Parameters::LAMBDA) cout << "Trying Lambda for the first time" << endl;
|
||||
|
||||
return try_lambda(*linear);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Reminder: the parameters are Graph type $G$, Values class type $T$,
|
||||
// linear system class $L$, the non linear solver type $S$, and the writer type $W$
|
||||
template<class G, class L, class S, class W>
|
||||
NonlinearOptimizer<G, L, S, W> NonlinearOptimizer<G, L, S, W>::levenbergMarquardt() {
|
||||
|
||||
// Initialize
|
||||
bool converged = false;
|
||||
const Parameters::verbosityLevel verbosity = parameters_->verbosity_ ;
|
||||
|
||||
// check if we're already close enough
|
||||
if (error_ < parameters_->sumError_) {
|
||||
if ( verbosity >= Parameters::ERROR )
|
||||
cout << "Exiting, as sumError = " << error_ << " < " << parameters_->sumError_ << endl;
|
||||
return *this;
|
||||
}
|
||||
|
||||
// for the case that maxIterations_ = 0
|
||||
iterations_ = 1;
|
||||
if (iterations_ >= parameters_->maxIterations_)
|
||||
return *this;
|
||||
|
||||
// Iterative loop that implements Levenberg-Marquardt
|
||||
while (true) {
|
||||
double previous_error = error_;
|
||||
// do one iteration of LM
|
||||
NonlinearOptimizer next = iterateLM();
|
||||
error_ = next.error_;
|
||||
values_ = next.values_;
|
||||
parameters_ = next.parameters_;
|
||||
iterations_ = next.iterations_;
|
||||
|
||||
// check convergence
|
||||
// TODO: move convergence checks here and incorporate in verbosity levels
|
||||
// TODO: build into iterations somehow as an instance variable
|
||||
converged = gtsam::check_convergence(*parameters_, previous_error, error_);
|
||||
|
||||
if(iterations_ >= parameters_->maxIterations_ || converged == true) {
|
||||
if (verbosity >= Parameters::VALUES) values_->print("final values");
|
||||
if (verbosity >= Parameters::ERROR && iterations_ >= parameters_->maxIterations_) cout << "Terminating because reached maximum iterations" << endl;
|
||||
if (verbosity >= Parameters::ERROR) cout << "final error: " << error_ << endl;
|
||||
if (verbosity >= Parameters::LAMBDA) cout << "final lambda = " << lambda() << endl;
|
||||
return *this;
|
||||
}
|
||||
iterations_++;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class G, class L, class S, class W>
|
||||
NonlinearOptimizer<G, L, S, W> NonlinearOptimizer<G, L, S, W>::iterateDogLeg() {
|
||||
|
||||
S solver(*graph_->linearize(*values_, *ordering_), parameters_->useQR_);
|
||||
DoglegOptimizerImpl::IterationResult result = DoglegOptimizerImpl::Iterate(
|
||||
parameters_->lambda_, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *solver.eliminate(),
|
||||
*graph_, *values_, *ordering_, error_, parameters_->verbosity_ > Parameters::ERROR);
|
||||
shared_values newValues(new Values(values_->retract(result.dx_d, *ordering_)));
|
||||
return newValuesErrorLambda_(newValues, result.f_error, result.Delta);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class G, class L, class S, class W>
|
||||
NonlinearOptimizer<G, L, S, W> NonlinearOptimizer<G, L, S, W>::dogLeg() {
|
||||
static W writer(error_);
|
||||
|
||||
// check if we're already close enough
|
||||
if (error_ < parameters_->sumError_) {
|
||||
if ( parameters_->verbosity_ >= Parameters::ERROR )
|
||||
cout << "Exiting, as sumError = " << error_ << " < " << parameters_->sumError_ << endl;
|
||||
return *this;
|
||||
}
|
||||
|
||||
// for the case that maxIterations_ = 0
|
||||
iterations_ = 1;
|
||||
if (iterations_ >= parameters_->maxIterations_)
|
||||
return *this;
|
||||
|
||||
// Iterative loop that runs Dog Leg
|
||||
while (true) {
|
||||
double previous_error = error_;
|
||||
// do one iteration of LM
|
||||
NonlinearOptimizer next = iterateDogLeg();
|
||||
writer.write(next.error_);
|
||||
error_ = next.error_;
|
||||
values_ = next.values_;
|
||||
parameters_ = next.parameters_;
|
||||
|
||||
// check convergence
|
||||
// TODO: move convergence checks here and incorporate in verbosity levels
|
||||
// TODO: build into iterations somehow as an instance variable
|
||||
bool converged = gtsam::check_convergence(*parameters_, previous_error, error_);
|
||||
|
||||
if(iterations_ >= parameters_->maxIterations_ || converged == true) {
|
||||
if (parameters_->verbosity_ >= Parameters::VALUES) values_->print("final values");
|
||||
if (parameters_->verbosity_ >= Parameters::ERROR) cout << "final error: " << error_ << endl;
|
||||
if (parameters_->verbosity_ >= Parameters::LAMBDA) cout << "final Delta (called lambda) = " << lambda() << endl;
|
||||
return *this;
|
||||
}
|
||||
iterations_++;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -24,22 +24,50 @@ using namespace std;
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
void NonlinearOptimizer::defaultOptimize() {
|
||||
|
||||
bool check_convergence (
|
||||
const NonlinearOptimizationParameters ¶meters,
|
||||
double currentError, double newError) {
|
||||
return check_convergence(parameters.relDecrease_,
|
||||
parameters.absDecrease_,
|
||||
parameters.sumError_,
|
||||
currentError, newError,
|
||||
parameters.verbosity_) ;
|
||||
const NonlinearOptimizerParams& params = this->_params();
|
||||
double currentError = this->error();
|
||||
|
||||
// check if we're already close enough
|
||||
if(currentError <= params.errorTol) {
|
||||
if (params.verbosity >= NonlinearOptimizerParams::ERROR)
|
||||
cout << "Exiting, as error = " << currentError << " < " << params.errorTol << endl;
|
||||
return;
|
||||
}
|
||||
|
||||
// Maybe show output
|
||||
if (params.verbosity >= NonlinearOptimizerParams::VALUES) this->values().print("Initial values");
|
||||
if (params.verbosity >= NonlinearOptimizerParams::ERROR) cout << "Initial error: " << currentError << endl;
|
||||
|
||||
// Return if we already have too many iterations
|
||||
if(this->iterations() >= params.maxIterations)
|
||||
return;
|
||||
|
||||
// Iterative loop
|
||||
do {
|
||||
// Do next iteration
|
||||
currentError = this->error();
|
||||
this->iterate();
|
||||
|
||||
// Maybe show output
|
||||
if(params.verbosity >= NonlinearOptimizerParams::VALUES) this->values().print("newValues");
|
||||
if(params.verbosity >= NonlinearOptimizerParams::ERROR) cout << "newError: " << this->error() << endl;
|
||||
} while(this->iterations() < params.maxIterations &&
|
||||
!checkConvergence(params.relativeErrorTol, params.absoluteErrorTol,
|
||||
params.errorTol, currentError, this->error(), params.verbosity));
|
||||
|
||||
// Printing if verbose
|
||||
if (params.verbosity >= NonlinearOptimizerParams::ERROR &&
|
||||
this->iterations() >= params.maxIterations)
|
||||
cout << "Terminating because reached maximum iterations" << endl;
|
||||
}
|
||||
|
||||
bool check_convergence(
|
||||
double relativeErrorTreshold,
|
||||
double absoluteErrorTreshold,
|
||||
double errorThreshold,
|
||||
double currentError, double newError, int verbosity) {
|
||||
/* ************************************************************************* */
|
||||
bool checkConvergence(double relativeErrorTreshold, double absoluteErrorTreshold,
|
||||
double errorThreshold, double currentError, double newError,
|
||||
NonlinearOptimizerParams::Verbosity verbosity) {
|
||||
|
||||
if ( verbosity >= 2 ) {
|
||||
if ( newError <= errorThreshold )
|
||||
|
@ -53,7 +81,7 @@ bool check_convergence(
|
|||
// check if diverges
|
||||
double absoluteDecrease = currentError - newError;
|
||||
if (verbosity >= 2) {
|
||||
if (absoluteDecrease < absoluteErrorTreshold)
|
||||
if (absoluteDecrease <= absoluteErrorTreshold)
|
||||
cout << "absoluteDecrease: " << setprecision(12) << absoluteDecrease << " < " << absoluteErrorTreshold << endl;
|
||||
else
|
||||
cout << "absoluteDecrease: " << setprecision(12) << absoluteDecrease << " >= " << absoluteErrorTreshold << endl;
|
||||
|
@ -62,13 +90,13 @@ bool check_convergence(
|
|||
// calculate relative error decrease and update currentError
|
||||
double relativeDecrease = absoluteDecrease / currentError;
|
||||
if (verbosity >= 2) {
|
||||
if (relativeDecrease < relativeErrorTreshold)
|
||||
if (relativeDecrease <= relativeErrorTreshold)
|
||||
cout << "relativeDecrease: " << setprecision(12) << relativeDecrease << " < " << relativeErrorTreshold << endl;
|
||||
else
|
||||
cout << "relativeDecrease: " << setprecision(12) << relativeDecrease << " >= " << relativeErrorTreshold << endl;
|
||||
}
|
||||
bool converged = (relativeErrorTreshold && (relativeDecrease < relativeErrorTreshold))
|
||||
|| (absoluteDecrease < absoluteErrorTreshold);
|
||||
bool converged = (relativeErrorTreshold && (relativeDecrease <= relativeErrorTreshold))
|
||||
|| (absoluteDecrease <= absoluteErrorTreshold);
|
||||
if (verbosity >= 1 && converged) {
|
||||
if(absoluteDecrease >= 0.0)
|
||||
cout << "converged" << endl;
|
||||
|
|
|
@ -11,449 +11,229 @@
|
|||
|
||||
/**
|
||||
* @file NonlinearOptimizer.h
|
||||
* @brief Encapsulates nonlinear optimization state
|
||||
* @author Frank Dellaert
|
||||
* @brief Base class and parameters for nonlinear optimization algorithms
|
||||
* @author Richard Roberts
|
||||
* @date Sep 7, 2009
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/linear/GaussianMultifrontalSolver.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizationParameters.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
class NullOptimizerWriter {
|
||||
class NonlinearOptimizer;
|
||||
|
||||
/** The common parameters for Nonlinear optimizers. Most optimizers
|
||||
* deriving from NonlinearOptimizer also subclass the parameters.
|
||||
*/
|
||||
class NonlinearOptimizerParams {
|
||||
public:
|
||||
NullOptimizerWriter(double error) {} ///Contructor
|
||||
virtual ~NullOptimizerWriter() {}
|
||||
virtual void write(double error) {} ///Capturing the values of the parameters after the optimization
|
||||
}; ///
|
||||
/** See NonlinearOptimizerParams::verbosity */
|
||||
enum Verbosity {
|
||||
SILENT,
|
||||
ERROR,
|
||||
VALUES,
|
||||
DELTA,
|
||||
LINEAR
|
||||
};
|
||||
|
||||
size_t maxIterations; ///< The maximum iterations to stop iterating (default 100)
|
||||
double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5)
|
||||
double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5)
|
||||
double errorTol; ///< The maximum total error to stop iterating (default 0.0)
|
||||
Verbosity verbosity; ///< The printing verbosity during optimization (default SILENT)
|
||||
|
||||
NonlinearOptimizerParams() :
|
||||
maxIterations(100.0), relativeErrorTol(1e-5), absoluteErrorTol(1e-5),
|
||||
errorTol(0.0), verbosity(SILENT) {}
|
||||
|
||||
virtual void print(const std::string& str = "") const {
|
||||
std::cout << str << "\n";
|
||||
std::cout << "relative decrease threshold: " << relativeErrorTol << "\n";
|
||||
std::cout << "absolute decrease threshold: " << absoluteErrorTol << "\n";
|
||||
std::cout << " total error threshold: " << errorTol << "\n";
|
||||
std::cout << " maximum iterations: " << maxIterations << "\n";
|
||||
std::cout << " verbosity level: " << verbosity << std::endl;
|
||||
}
|
||||
|
||||
virtual ~NonlinearOptimizerParams() {}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* The class NonlinearOptimizer encapsulates an optimization state.
|
||||
* Typically it is instantiated with a NonlinearFactorGraph and initial values
|
||||
* and then one of the optimization routines is called. These iterate
|
||||
* until convergence. All methods are functional and return a new state.
|
||||
*
|
||||
* The class is parameterized by the Graph type $G$, Values class type $Values$,
|
||||
* linear system class $L$, the non linear solver type $S$, and the writer type $W$
|
||||
*
|
||||
* The values class type $Values$ is in order to be able to optimize over non-vector values structures.
|
||||
*
|
||||
* A nonlinear system solver $S$
|
||||
* Concept NonLinearSolver<G,Values,L> implements
|
||||
* linearize: G * Values -> L
|
||||
* solve : L -> Values
|
||||
*
|
||||
* The writer $W$ generates output to disk or the screen.
|
||||
*
|
||||
* For example, in a 2D case, $G$ can be pose2SLAM::Graph, $Values$ can be Pose2Values,
|
||||
* $L$ can be GaussianFactorGraph and $S$ can be Factorization<pose2SLAM::Graph, Pose2Values>.
|
||||
* The solver class has two main functions: linearize and optimize. The first one
|
||||
* linearizes the nonlinear cost function around the current estimate, and the second
|
||||
* one optimizes the linearized system using various methods.
|
||||
*
|
||||
* To use the optimizer in code, include <gtsam/NonlinearOptimizer-inl.h> in your cpp file
|
||||
* \nosubgrouping
|
||||
* Base class for a nonlinear optimization state, including the current estimate
|
||||
* of the variable values, error, and number of iterations. Optimizers derived
|
||||
* from NonlinearOptimizer usually also define a derived state class containing
|
||||
* additional state specific to the algorithm (for example, Dogleg state
|
||||
* contains the current trust region radius).
|
||||
*/
|
||||
template<class G, class L = GaussianFactorGraph, class GS = GaussianMultifrontalSolver, class W = NullOptimizerWriter>
|
||||
class NonlinearOptimizer {
|
||||
class NonlinearOptimizerState {
|
||||
public:
|
||||
|
||||
// For performance reasons in recursion, we store values in a shared_ptr
|
||||
typedef boost::shared_ptr<const Values> shared_values; ///Prevent memory leaks in Values
|
||||
typedef boost::shared_ptr<const G> shared_graph; /// Prevent memory leaks in Graph
|
||||
typedef boost::shared_ptr<L> shared_linear; /// Not sure
|
||||
typedef boost::shared_ptr<const Ordering> shared_ordering; ///ordering parameters
|
||||
typedef boost::shared_ptr<GS> shared_solver; /// Solver
|
||||
typedef NonlinearOptimizationParameters Parameters; ///These take the parameters defined in NonLinearOptimizationParameters.h
|
||||
typedef boost::shared_ptr<const Parameters> shared_parameters ; ///
|
||||
typedef boost::shared_ptr<VariableIndex> shared_structure; // TODO: make this const
|
||||
/** The current estimate of the variable values. */
|
||||
Values values;
|
||||
|
||||
private:
|
||||
/** The factor graph error on the current values. */
|
||||
double error;
|
||||
|
||||
typedef NonlinearOptimizer<G, L, GS> This;
|
||||
typedef boost::shared_ptr<const std::vector<size_t> > shared_dimensions;
|
||||
/** The number of optimization iterations performed. */
|
||||
unsigned int iterations;
|
||||
|
||||
/// keep a reference to const version of the graph
|
||||
/// These normally do not change
|
||||
const shared_graph graph_;
|
||||
NonlinearOptimizerState() {}
|
||||
|
||||
// keep a values structure and its error
|
||||
// These typically change once per iteration (in a functional way)
|
||||
shared_values values_;
|
||||
/** Virtual destructor */
|
||||
virtual ~NonlinearOptimizerState() {}
|
||||
|
||||
// current error for this state
|
||||
double error_;
|
||||
protected:
|
||||
NonlinearOptimizerState(const NonlinearFactorGraph& graph, const Values& values, unsigned int iterations = 0) :
|
||||
values(values), error(graph.error(values)), iterations(iterations) {}
|
||||
|
||||
// the variable ordering
|
||||
const shared_ordering ordering_;
|
||||
NonlinearOptimizerState(const Values& values, double error, unsigned int iterations) :
|
||||
values(values), error(error), iterations(iterations) {}
|
||||
|
||||
// storage for parameters, lambda, thresholds, etc.
|
||||
shared_parameters parameters_;
|
||||
|
||||
// for performance track
|
||||
size_t iterations_;
|
||||
|
||||
// The dimensions of each linearized variable
|
||||
const shared_dimensions dimensions_;
|
||||
|
||||
// storage of structural components that don't change between iterations
|
||||
// used at creation of solvers in each iteration
|
||||
// TODO: make this structure component specific to solver type
|
||||
const shared_structure structure_;
|
||||
|
||||
// solver used only for SPCG
|
||||
// FIXME: remove this!
|
||||
shared_solver spcg_solver_;
|
||||
|
||||
/// @name Advanced Constructors
|
||||
/// @{
|
||||
|
||||
/**
|
||||
* Constructor that does not do any computation
|
||||
*/
|
||||
NonlinearOptimizer(shared_graph graph, shared_values values, const double error,
|
||||
shared_ordering ordering, shared_parameters parameters, shared_dimensions dimensions,
|
||||
size_t iterations, shared_structure structure)
|
||||
: graph_(graph), values_(values), error_(error), ordering_(ordering),
|
||||
parameters_(parameters), iterations_(iterations), dimensions_(dimensions),
|
||||
structure_(structure) {}
|
||||
|
||||
/** constructors to replace specific components */
|
||||
|
||||
///TODO: comment
|
||||
This newValues_(shared_values newValues) const {
|
||||
return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues),
|
||||
ordering_, parameters_, dimensions_, iterations_, structure_); }
|
||||
|
||||
///TODO: comment
|
||||
This newValuesErrorLambda_(shared_values newValues, double newError, double newLambda) const {
|
||||
return NonlinearOptimizer(graph_, newValues, newError, ordering_,
|
||||
parameters_->newLambda_(newLambda), dimensions_, iterations_, structure_); }
|
||||
|
||||
///TODO: comment
|
||||
This newIterations_(int iterations) const {
|
||||
return NonlinearOptimizer(graph_, values_, error_, ordering_, parameters_, dimensions_,
|
||||
iterations, structure_); }
|
||||
|
||||
///TODO: comment
|
||||
This newLambda_(double newLambda) const {
|
||||
return NonlinearOptimizer(graph_, values_, error_, ordering_,
|
||||
parameters_->newLambda_(newLambda), dimensions_, iterations_, structure_); }
|
||||
|
||||
///TODO: comment
|
||||
This newValuesLambda_(shared_values newValues, double newLambda) const {
|
||||
return NonlinearOptimizer(graph_, newValues, graph_->error(*newValues),
|
||||
ordering_, parameters_->newLambda_(newLambda), dimensions_, iterations_, structure_); }
|
||||
|
||||
///TODO: comment
|
||||
This newParameters_(shared_parameters parameters) const {
|
||||
return NonlinearOptimizer(graph_, values_, error_, ordering_, parameters, dimensions_,
|
||||
iterations_, structure_); }
|
||||
|
||||
/// @}
|
||||
|
||||
public:
|
||||
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
/**
|
||||
* Constructor that evaluates new error
|
||||
*/
|
||||
NonlinearOptimizer(shared_graph graph,
|
||||
shared_values values,
|
||||
shared_ordering ordering,
|
||||
shared_parameters parameters = boost::make_shared<Parameters>());
|
||||
|
||||
/**
|
||||
* Constructor that takes a solver directly - only useful for SPCG
|
||||
* FIXME: REMOVE THIS FUNCTION!
|
||||
*/
|
||||
NonlinearOptimizer(shared_graph graph,
|
||||
shared_values values,
|
||||
shared_ordering ordering,
|
||||
shared_solver spcg_solver,
|
||||
shared_parameters parameters = boost::make_shared<Parameters>());
|
||||
|
||||
/**
|
||||
* Copy constructor
|
||||
*/
|
||||
NonlinearOptimizer(const NonlinearOptimizer<G, L, GS> &optimizer) :
|
||||
graph_(optimizer.graph_), values_(optimizer.values_), error_(optimizer.error_),
|
||||
ordering_(optimizer.ordering_), parameters_(optimizer.parameters_),
|
||||
iterations_(optimizer.iterations_), dimensions_(optimizer.dimensions_), structure_(optimizer.structure_) {}
|
||||
|
||||
// access to member variables
|
||||
|
||||
/// @}
|
||||
/// @name Standard Interface
|
||||
/// @{
|
||||
|
||||
/// print
|
||||
void print(const std::string& s="") const {
|
||||
cout << "NonlinearOptimizer " << s << endl;
|
||||
cout << " current error: " << error_ << endl;
|
||||
cout << "iterations count: " << iterations_ << endl;
|
||||
}
|
||||
|
||||
/** Return current error */
|
||||
double error() const { return error_; }
|
||||
|
||||
/** Return current lambda */
|
||||
double lambda() const { return parameters_->lambda_; }
|
||||
|
||||
/** Return the values */
|
||||
shared_values values() const { return values_; }
|
||||
|
||||
/** Return the graph */
|
||||
shared_graph graph() const { return graph_; }
|
||||
|
||||
/** Return the itertions */
|
||||
size_t iterations() const { return iterations_; }
|
||||
|
||||
/** Return the ordering */
|
||||
shared_ordering ordering() const { return ordering_; }
|
||||
|
||||
/** Return the parameters */
|
||||
shared_parameters parameters() const { return parameters_; }
|
||||
|
||||
/** Return the structure variable (variable index) */
|
||||
shared_structure structure() const { return structure_; }
|
||||
|
||||
/**
|
||||
* Return a linearized graph at the current graph/values/ordering
|
||||
*/
|
||||
shared_linear linearize() const {
|
||||
return shared_linear(new L(*graph_->linearize(*values_, *ordering_)));
|
||||
}
|
||||
|
||||
/**
|
||||
* create a solver around the current graph/values
|
||||
* NOTE: this will actually solve a linear system
|
||||
*/
|
||||
shared_solver createSolver() const {
|
||||
return shared_solver(new GS(linearize(), structure_, parameters_->useQR_));
|
||||
}
|
||||
|
||||
/**
|
||||
* Return mean and covariance on a single variable
|
||||
*/
|
||||
Matrix marginalCovariance(Key j) const {
|
||||
return createSolver()->marginalCovariance((*ordering_)[j]);
|
||||
}
|
||||
|
||||
/**
|
||||
* linearize and optimize
|
||||
* This returns an VectorValues, i.e., vectors in tangent space of Values
|
||||
*/
|
||||
VectorValues linearizeAndOptimizeForDelta() const {
|
||||
return *createSolver()->optimize();
|
||||
}
|
||||
|
||||
/**
|
||||
* Do one Gauss-Newton iteration and return next state
|
||||
* suggested interface
|
||||
*/
|
||||
NonlinearOptimizer iterate() const;
|
||||
|
||||
///
|
||||
///Optimize a solution for a non linear factor graph
|
||||
///param relativeTreshold
|
||||
///@param absoluteTreshold
|
||||
///@param verbosity Integer specifying how much output to provide
|
||||
///
|
||||
|
||||
// suggested interface
|
||||
NonlinearOptimizer gaussNewton() const;
|
||||
|
||||
/// @}
|
||||
/// @name Advanced Interface
|
||||
/// @{
|
||||
|
||||
/** Recursively try to do tempered Gauss-Newton steps until we succeed */
|
||||
NonlinearOptimizer try_lambda(const L& linear);
|
||||
|
||||
/**
|
||||
* One iteration of Levenberg Marquardt
|
||||
*/
|
||||
NonlinearOptimizer iterateLM();
|
||||
|
||||
///
|
||||
///Optimize using Levenberg-Marquardt. Really Levenberg's
|
||||
///algorithm at this moment, as we just add I*\lambda to Hessian
|
||||
///H'H. The probabilistic explanation is very simple: every
|
||||
///variable gets an extra Gaussian prior that biases staying at
|
||||
///current value, with variance 1/lambda. This is done very easily
|
||||
///(but perhaps wastefully) by adding a prior factor for each of
|
||||
///the variables, after linearization.
|
||||
///
|
||||
///@param relativeThreshold
|
||||
///@param absoluteThreshold
|
||||
///@param verbosity Integer specifying how much output to provide
|
||||
///@param lambdaFactor Factor by which to decrease/increase lambda
|
||||
///
|
||||
NonlinearOptimizer levenbergMarquardt();
|
||||
|
||||
/**
|
||||
* One iteration of the dog leg algorithm
|
||||
*/
|
||||
NonlinearOptimizer iterateDogLeg();
|
||||
|
||||
/**
|
||||
* Optimize using the Dog Leg algorithm
|
||||
*/
|
||||
NonlinearOptimizer dogLeg();
|
||||
|
||||
// static interfaces to LM, Dog leg, and GN optimization techniques
|
||||
|
||||
///Static interface to Dog leg optimization using default ordering
|
||||
///@param graph Nonlinear factor graph to optimize
|
||||
///@param values Initial values
|
||||
///@param parameters Optimization parameters
|
||||
///@return an optimized values structure
|
||||
static shared_values optimizeLM(shared_graph graph,
|
||||
shared_values values,
|
||||
shared_parameters parameters = boost::make_shared<Parameters>()) {
|
||||
|
||||
// Use a variable ordering from COLAMD
|
||||
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values);
|
||||
// initial optimization state is the same in both cases tested
|
||||
//GS solver(*graph->linearize(*values, *ordering));
|
||||
|
||||
NonlinearOptimizer optimizer(graph, values, ordering, parameters);
|
||||
NonlinearOptimizer result = optimizer.levenbergMarquardt();
|
||||
return result.values();
|
||||
}
|
||||
|
||||
///TODO: comment
|
||||
static shared_values optimizeLM(shared_graph graph,
|
||||
shared_values values,
|
||||
Parameters::verbosityLevel verbosity) {
|
||||
return optimizeLM(graph, values, Parameters::newVerbosity(verbosity));
|
||||
}
|
||||
|
||||
/**
|
||||
* Static interface to LM optimization (no shared_ptr arguments) - see above
|
||||
*/
|
||||
static shared_values optimizeLM(const G& graph,
|
||||
const Values& values,
|
||||
const Parameters parameters = Parameters()) {
|
||||
return optimizeLM(boost::make_shared<const G>(graph),
|
||||
boost::make_shared<const Values>(values),
|
||||
boost::make_shared<Parameters>(parameters));
|
||||
}
|
||||
|
||||
///TODO: comment
|
||||
static shared_values optimizeLM(const G& graph,
|
||||
const Values& values,
|
||||
Parameters::verbosityLevel verbosity) {
|
||||
return optimizeLM(boost::make_shared<const G>(graph),
|
||||
boost::make_shared<const Values>(values),
|
||||
verbosity);
|
||||
}
|
||||
|
||||
///Static interface to Dog leg optimization using default ordering
|
||||
///@param graph Nonlinear factor graph to optimize
|
||||
///@param values Initial values
|
||||
///@param parameters Optimization parameters
|
||||
///@return an optimized values structure
|
||||
static shared_values optimizeDogLeg(shared_graph graph,
|
||||
shared_values values,
|
||||
shared_parameters parameters = boost::make_shared<Parameters>()) {
|
||||
|
||||
// Use a variable ordering from COLAMD
|
||||
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values);
|
||||
// initial optimization state is the same in both cases tested
|
||||
//GS solver(*graph->linearize(*values, *ordering));
|
||||
|
||||
NonlinearOptimizer optimizer(graph, values, ordering, parameters);
|
||||
NonlinearOptimizer result = optimizer.dogLeg();
|
||||
return result.values();
|
||||
}
|
||||
|
||||
///
|
||||
///Static interface to Dog leg optimization using default ordering and thresholds
|
||||
///@param graph Nonlinear factor graph to optimize
|
||||
///@param values Initial values
|
||||
///@param verbosity Integer specifying how much output to provide
|
||||
///@return an optimized values structure
|
||||
///
|
||||
static shared_values optimizeDogLeg(shared_graph graph,
|
||||
shared_values values,
|
||||
Parameters::verbosityLevel verbosity) {
|
||||
return optimizeDogLeg(graph, values, Parameters::newVerbosity(verbosity)->newLambda_(1.0));
|
||||
}
|
||||
|
||||
/**
|
||||
* Static interface to Dogleg optimization (no shared_ptr arguments) - see above
|
||||
*/
|
||||
static shared_values optimizeDogLeg(const G& graph,
|
||||
const Values& values,
|
||||
const Parameters parameters = Parameters()) {
|
||||
return optimizeDogLeg(boost::make_shared<const G>(graph),
|
||||
boost::make_shared<const Values>(values),
|
||||
boost::make_shared<Parameters>(parameters));
|
||||
}
|
||||
|
||||
///TODO: comment
|
||||
static shared_values optimizeDogLeg(const G& graph,
|
||||
const Values& values,
|
||||
Parameters::verbosityLevel verbosity) {
|
||||
return optimizeDogLeg(boost::make_shared<const G>(graph),
|
||||
boost::make_shared<const Values>(values),
|
||||
verbosity);
|
||||
}
|
||||
|
||||
///
|
||||
///Static interface to GN optimization using default ordering and thresholds
|
||||
///@param graph Nonlinear factor graph to optimize
|
||||
///@param values Initial values
|
||||
///@param verbosity Integer specifying how much output to provide
|
||||
///@return an optimized values structure
|
||||
///
|
||||
static shared_values optimizeGN(shared_graph graph,
|
||||
shared_values values,
|
||||
shared_parameters parameters = boost::make_shared<Parameters>()) {
|
||||
|
||||
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values);
|
||||
// initial optimization state is the same in both cases tested
|
||||
GS solver(*graph->linearize(*values, *ordering));
|
||||
|
||||
NonlinearOptimizer optimizer(graph, values, ordering, parameters);
|
||||
NonlinearOptimizer result = optimizer.gaussNewton();
|
||||
return result.values();
|
||||
}
|
||||
|
||||
/**
|
||||
* Static interface to GN optimization (no shared_ptr arguments) - see above
|
||||
*/
|
||||
static shared_values optimizeGN(const G& graph, const Values& values, const Parameters parameters = Parameters()) {
|
||||
return optimizeGN(boost::make_shared<const G>(graph),
|
||||
boost::make_shared<const Values>(values),
|
||||
boost::make_shared<Parameters>(parameters));
|
||||
}
|
||||
friend class NonlinearOptimizer;
|
||||
};
|
||||
|
||||
/**
|
||||
* Check convergence
|
||||
*/
|
||||
bool check_convergence (
|
||||
double relativeErrorTreshold,
|
||||
double absoluteErrorTreshold,
|
||||
double errorThreshold,
|
||||
double currentError, double newError, int verbosity);
|
||||
* This is the abstract interface for classes that can optimize for the
|
||||
* maximum-likelihood estimate of a NonlinearFactorGraph.
|
||||
*
|
||||
* To use a class derived from this interface, construct the class with a
|
||||
* NonlinearFactorGraph and an initial Values variable assignment. Next, call the
|
||||
* optimize() method, which returns a new NonlinearOptimizer object containing
|
||||
* the optimized variable assignment. Call the values() method to retrieve the
|
||||
* optimized estimate. Alternatively, to take a shortcut, instead of calling
|
||||
* optimize(), call optimized(), which performs full optimization and returns
|
||||
* the resulting Values instead of the new optimizer.
|
||||
*
|
||||
* Note: This class is immutable, optimize() and iterate() return new
|
||||
* NonlinearOptimizer objects, so be sure to use the returned object and not
|
||||
* simply keep the unchanged original.
|
||||
*
|
||||
* Simple and compact example:
|
||||
* \code
|
||||
// One-liner to do full optimization and use the result.
|
||||
// Note use of "optimized()" to directly return Values, instead of "optimize()" that returns a new optimizer.
|
||||
Values::const_shared_ptr result = DoglegOptimizer(graph, initialValues).optimized();
|
||||
\endcode
|
||||
*
|
||||
* Example exposing more functionality and details:
|
||||
* \code
|
||||
// Create initial optimizer
|
||||
DoglegOptimizer initial(graph, initialValues);
|
||||
|
||||
///TODO: comment
|
||||
bool check_convergence (
|
||||
const NonlinearOptimizationParameters ¶meters,
|
||||
double currentError, double newError);
|
||||
// Run full optimization until convergence.
|
||||
// Note use of "optimize()" to return a new optimizer, instead of "optimized()" that returns only the Values.
|
||||
// NonlinearOptimizer pointers are always returned, though they are actually a derived optimizer type.
|
||||
NonlinearOptimizer::auto_ptr final = initial->optimize();
|
||||
|
||||
// The new optimizer has results and statistics
|
||||
cout << "Converged in " << final->iterations() << " iterations "
|
||||
"with final error " << final->error() << endl;
|
||||
|
||||
// The values are a const_shared_ptr (boost::shared_ptr<const Values>)
|
||||
Values::const_shared_ptr result = final->values();
|
||||
|
||||
// Use the results
|
||||
useTheResult(result);
|
||||
\endcode
|
||||
*
|
||||
* Example of setting parameters before optimization:
|
||||
* \code
|
||||
// Each derived optimizer type has its own parameters class, which inherits from NonlinearOptimizerParams
|
||||
DoglegParams params;
|
||||
params.factorization = DoglegParams::QR;
|
||||
params.relativeErrorTol = 1e-3;
|
||||
params.absoluteErrorTol = 1e-3;
|
||||
|
||||
// Optimize
|
||||
Values::const_shared_ptr result = DoglegOptimizer(graph, initialValues, params).optimized();
|
||||
\endcode
|
||||
*
|
||||
* This interface also exposes an iterate() method, which performs one
|
||||
* iteration, returning a NonlinearOptimizer containing the adjusted variable
|
||||
* assignment. The optimize() method simply calls iterate() multiple times,
|
||||
* until the error changes less than a threshold. We expose iterate() so that
|
||||
* you can easily control what happens between iterations, such as drawing or
|
||||
* printing, moving points from behind the camera to in front, etc.
|
||||
*
|
||||
* To modify the graph, values, or parameters between iterations, call the
|
||||
* update() functions, which preserve all other state (for example, the trust
|
||||
* region size in DoglegOptimizer). Derived optimizer classes also have
|
||||
* additional update methods, not in this abstract interface, for updating
|
||||
* algorithm-specific state.
|
||||
*
|
||||
* For more flexibility, since all functions are virtual, you may override them
|
||||
* in your own derived class.
|
||||
*/
|
||||
class NonlinearOptimizer {
|
||||
|
||||
public:
|
||||
|
||||
/** A shared pointer to this class */
|
||||
typedef boost::shared_ptr<const NonlinearOptimizer> shared_ptr;
|
||||
|
||||
/// @name Standard interface
|
||||
/// @{
|
||||
|
||||
/** Optimize for the maximum-likelihood estimate, returning a new
|
||||
* NonlinearOptimizer class containing the optimized variable assignments,
|
||||
* which may be retrieved with values().
|
||||
*
|
||||
* This function simply calls iterate() in a loop, checking for convergence
|
||||
* with check_convergence(). For fine-grain control over the optimization
|
||||
* process, you may call iterate() and check_convergence() yourself, and if
|
||||
* needed modify the optimization state between iterations.
|
||||
*/
|
||||
virtual const Values& optimize() { defaultOptimize(); return values(); }
|
||||
|
||||
double error() const { return _state().error; }
|
||||
|
||||
unsigned int iterations() const { return _state().iterations; }
|
||||
|
||||
const Values& values() const { return _state().values; }
|
||||
|
||||
/// @}
|
||||
|
||||
/// @name Advanced interface
|
||||
/// @{
|
||||
|
||||
/** Virtual destructor */
|
||||
virtual ~NonlinearOptimizer() {}
|
||||
|
||||
/** Perform a single iteration, returning a new NonlinearOptimizer class
|
||||
* containing the updated variable assignments, which may be retrieved with
|
||||
* values().
|
||||
*/
|
||||
virtual void iterate() = 0;
|
||||
|
||||
/// @}
|
||||
|
||||
protected:
|
||||
|
||||
NonlinearFactorGraph graph_;
|
||||
|
||||
/** A default implementation of the optimization loop, which calls iterate()
|
||||
* until checkConvergence returns true.
|
||||
*/
|
||||
void defaultOptimize();
|
||||
|
||||
virtual const NonlinearOptimizerState& _state() const = 0;
|
||||
|
||||
virtual const NonlinearOptimizerParams& _params() const = 0;
|
||||
|
||||
/** Constructor for initial construction of base classes. */
|
||||
NonlinearOptimizer(const NonlinearFactorGraph& graph) : graph_(graph) {}
|
||||
|
||||
};
|
||||
|
||||
/** Check whether the relative error decrease is less than relativeErrorTreshold,
|
||||
* the absolute error decrease is less than absoluteErrorTreshold, <emph>or</emph>
|
||||
* the error itself is less than errorThreshold.
|
||||
*/
|
||||
bool checkConvergence(double relativeErrorTreshold,
|
||||
double absoluteErrorTreshold, double errorThreshold,
|
||||
double currentError, double newError, NonlinearOptimizerParams::Verbosity verbosity);
|
||||
|
||||
} // gtsam
|
||||
|
||||
/// @}
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer-inl.h>
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SuccessiveLinearizationOptimizer.h
|
||||
* @brief
|
||||
* @author Richard Roberts
|
||||
* @date Apr 1, 2012
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
class SuccessiveLinearizationParams : public NonlinearOptimizerParams {
|
||||
public:
|
||||
/** See SuccessiveLinearizationParams::elimination */
|
||||
enum Elimination {
|
||||
MULTIFRONTAL,
|
||||
SEQUENTIAL
|
||||
};
|
||||
|
||||
/** See SuccessiveLinearizationParams::factorization */
|
||||
enum Factorization {
|
||||
CHOLESKY,
|
||||
LDL,
|
||||
QR,
|
||||
};
|
||||
|
||||
Elimination elimination; ///< The elimination algorithm to use (default: MULTIFRONTAL)
|
||||
Factorization factorization; ///< The numerical factorization (default: LDL)
|
||||
boost::optional<Ordering> ordering; ///< The variable elimination ordering, or empty to use COLAMD (default: empty)
|
||||
|
||||
SuccessiveLinearizationParams() :
|
||||
elimination(MULTIFRONTAL), factorization(LDL) {}
|
||||
|
||||
virtual ~SuccessiveLinearizationParams() {}
|
||||
|
||||
virtual void print(const std::string& str = "") const {
|
||||
NonlinearOptimizerParams::print(str);
|
||||
if(elimination == MULTIFRONTAL)
|
||||
std::cout << " elimination method: MULTIFRONTAL\n";
|
||||
else if(elimination == SEQUENTIAL)
|
||||
std::cout << " elimination method: SEQUENTIAL\n";
|
||||
else
|
||||
std::cout << " elimination method: (invalid)\n";
|
||||
|
||||
if(factorization == CHOLESKY)
|
||||
std::cout << " factorization method: CHOLESKY\n";
|
||||
else if(factorization == LDL)
|
||||
std::cout << " factorization method: LDL\n";
|
||||
else if(factorization == QR)
|
||||
std::cout << " factorization method: QR\n";
|
||||
else
|
||||
std::cout << " factorization method: (invalid)\n";
|
||||
|
||||
if(ordering)
|
||||
std::cout << " ordering: custom\n";
|
||||
else
|
||||
std::cout << " ordering: COLAMD\n";
|
||||
|
||||
std::cout.flush();
|
||||
}
|
||||
|
||||
GaussianFactorGraph::Eliminate getEliminationFunction() const {
|
||||
if(factorization == SuccessiveLinearizationParams::CHOLESKY)
|
||||
return EliminatePreferCholesky;
|
||||
else if(factorization == SuccessiveLinearizationParams::LDL)
|
||||
return EliminatePreferLDL;
|
||||
else if(factorization == SuccessiveLinearizationParams::QR)
|
||||
return EliminateQR;
|
||||
else
|
||||
throw runtime_error("Nonlinear optimization parameter \"factorization\" is invalid");
|
||||
}
|
||||
};
|
||||
|
||||
} /* namespace gtsam */
|
|
@ -40,31 +40,6 @@ namespace gtsam {
|
|||
ValueCloneAllocator() {}
|
||||
};
|
||||
|
||||
#if 0
|
||||
/* ************************************************************************* */
|
||||
class ValueAutomaticCasting {
|
||||
Key key_;
|
||||
const Value& value_;
|
||||
|
||||
public:
|
||||
ValueAutomaticCasting(Key key, const Value& value) : key_(key), value_(value) {}
|
||||
|
||||
template<class ValueType>
|
||||
class ConvertibleToValue : public ValueType {
|
||||
};
|
||||
|
||||
template<class ValueType>
|
||||
operator const ConvertibleToValue<ValueType>& () const {
|
||||
// Check the type and throw exception if incorrect
|
||||
if(typeid(ValueType) != typeid(value_))
|
||||
throw ValuesIncorrectType(key_, typeid(ValueType), typeid(value_));
|
||||
|
||||
// We have already checked the type, so do a "blind" static_cast, not dynamic_cast
|
||||
return static_cast<const ConvertibleToValue<ValueType>&>(value_);
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<typename ValueType>
|
||||
const ValueType& Values::at(Key j) const {
|
||||
|
@ -83,27 +58,6 @@ namespace gtsam {
|
|||
return static_cast<const ValueType&>(*item->second);
|
||||
}
|
||||
|
||||
#if 0
|
||||
/* ************************************************************************* */
|
||||
inline ValueAutomaticCasting Values::at(Key j) const {
|
||||
// Find the item
|
||||
KeyValueMap::const_iterator item = values_.find(j);
|
||||
|
||||
// Throw exception if it does not exist
|
||||
if(item == values_.end())
|
||||
throw ValuesKeyDoesNotExist("retrieve", j);
|
||||
|
||||
return ValueAutomaticCasting(item->first, *item->second);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if 0
|
||||
/* ************************************************************************* */
|
||||
inline ValueAutomaticCasting Values::operator[](Key j) const {
|
||||
return at(j);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<typename ValueType>
|
||||
boost::optional<const ValueType&> Values::exists(Key j) const {
|
||||
|
|
|
@ -84,6 +84,9 @@ namespace gtsam {
|
|||
/// A shared_ptr to this class
|
||||
typedef boost::shared_ptr<Values> shared_ptr;
|
||||
|
||||
/// A const shared_ptr to this class
|
||||
typedef boost::shared_ptr<const Values> const_shared_ptr;
|
||||
|
||||
/// A key-value pair, which you get by dereferencing iterators
|
||||
struct KeyValuePair {
|
||||
const Key key; ///< The key
|
||||
|
@ -299,22 +302,6 @@ namespace gtsam {
|
|||
*/
|
||||
const Value& at(Key j) const;
|
||||
|
||||
#if 0
|
||||
/** Retrieve a variable by key \c j. This non-templated version returns a
|
||||
* special ValueAutomaticCasting object that may be assigned to the proper
|
||||
* value.
|
||||
* @param j Retrieve the value associated with this key
|
||||
* @return A ValueAutomaticCasting object that may be assignmed to a Value
|
||||
* of the proper type.
|
||||
*/
|
||||
ValueAutomaticCasting at(Key j) const;
|
||||
#endif
|
||||
|
||||
#if 0
|
||||
/** operator[] syntax for at(Key j) */
|
||||
ValueAutomaticCasting operator[](Key j) const;
|
||||
#endif
|
||||
|
||||
/** Check if a value exists with key \c j. See exists<>(Key j)
|
||||
* and exists(const TypedKey& j) for versions that return the value if it
|
||||
* exists. */
|
||||
|
@ -390,6 +377,9 @@ namespace gtsam {
|
|||
/** Replace all keys and variables */
|
||||
Values& operator=(const Values& rhs);
|
||||
|
||||
/** Swap the contents of two Values without copying data */
|
||||
void swap(Values& other) { values_.swap(other.values_); }
|
||||
|
||||
/** Remove all variables from the config */
|
||||
void clear() { values_.clear(); }
|
||||
|
||||
|
|
|
@ -17,8 +17,6 @@
|
|||
|
||||
#include <gtsam/slam/planarSLAM.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimization.h>
|
||||
|
||||
// Use planarSLAM namespace for specific SLAM instance
|
||||
namespace planarSLAM {
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#include <gtsam/slam/BearingRangeFactor.h>
|
||||
#include <gtsam/nonlinear/NonlinearEquality.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimization.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
|
||||
|
@ -110,15 +110,10 @@ namespace planarSLAM {
|
|||
|
||||
/// Optimize
|
||||
Values optimize(const Values& initialEstimate) {
|
||||
typedef NonlinearOptimizer<Graph> Optimizer;
|
||||
return *Optimizer::optimizeLM(*this, initialEstimate,
|
||||
NonlinearOptimizationParameters::LAMBDA);
|
||||
return LevenbergMarquardtOptimizer(*this, initialEstimate).optimize();
|
||||
}
|
||||
};
|
||||
|
||||
/// Optimizer
|
||||
typedef NonlinearOptimizer<Graph> Optimizer;
|
||||
|
||||
} // planarSLAM
|
||||
|
||||
|
||||
|
|
|
@ -21,8 +21,6 @@
|
|||
|
||||
// Use pose2SLAM namespace for specific SLAM instance
|
||||
|
||||
template class gtsam::NonlinearOptimizer<pose2SLAM::Graph, gtsam::GaussianFactorGraph, gtsam::GaussianSequentialSolver>;
|
||||
|
||||
namespace pose2SLAM {
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/nonlinear/NonlinearEquality.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/linear/GaussianSequentialSolver.h>
|
||||
#include <gtsam/linear/GaussianMultifrontalSolver.h>
|
||||
|
||||
|
@ -97,20 +97,10 @@ namespace pose2SLAM {
|
|||
|
||||
/// Optimize
|
||||
Values optimize(const Values& initialEstimate) {
|
||||
typedef NonlinearOptimizer<Graph> Optimizer;
|
||||
return *Optimizer::optimizeLM(*this, initialEstimate,
|
||||
NonlinearOptimizationParameters::LAMBDA);
|
||||
return LevenbergMarquardtOptimizer(*this, initialEstimate).optimize();
|
||||
}
|
||||
};
|
||||
|
||||
/// The sequential optimizer
|
||||
typedef NonlinearOptimizer<Graph, GaussianFactorGraph,
|
||||
GaussianSequentialSolver> OptimizerSequential;
|
||||
|
||||
/// The multifrontal optimizer
|
||||
typedef NonlinearOptimizer<Graph, GaussianFactorGraph,
|
||||
GaussianMultifrontalSolver> Optimizer;
|
||||
|
||||
} // pose2SLAM
|
||||
|
||||
|
||||
|
|
|
@ -61,9 +61,6 @@ namespace gtsam {
|
|||
void addHardConstraint(Index i, const Pose3& p);
|
||||
};
|
||||
|
||||
/// Optimizer
|
||||
typedef NonlinearOptimizer<Graph> Optimizer;
|
||||
|
||||
} // pose3SLAM
|
||||
|
||||
/**
|
||||
|
|
|
@ -16,10 +16,12 @@ using namespace boost;
|
|||
#include <gtsam/geometry/Cal3_S2.h>
|
||||
#include <gtsam/geometry/PinholeCamera.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
#include <gtsam/nonlinear/NonlinearEquality.h>
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/slam/RangeFactor.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
@ -62,8 +64,6 @@ double getGaussian()
|
|||
return sqrt(-2.0f * (double)log(S) / S) * V1;
|
||||
}
|
||||
|
||||
typedef NonlinearOptimizer<Graph> Optimizer;
|
||||
|
||||
const SharedNoiseModel sigma1(noiseModel::Unit::Create(1));
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -148,11 +148,11 @@ TEST( GeneralSFMFactor, optimize_defaultK ) {
|
|||
vector<GeneralCamera> X = genCameraDefaultCalibration();
|
||||
|
||||
// add measurement with noise
|
||||
shared_ptr<Graph> graph(new Graph());
|
||||
Graph graph;
|
||||
for ( size_t j = 0 ; j < X.size() ; ++j) {
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i) {
|
||||
Point2 pt = X[j].project(L[i]) ;
|
||||
graph->addMeasurement(j, i, pt, sigma1);
|
||||
graph.addMeasurement(j, i, pt, sigma1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -160,26 +160,24 @@ TEST( GeneralSFMFactor, optimize_defaultK ) {
|
|||
|
||||
// add initial
|
||||
const double noise = baseline*0.1;
|
||||
boost::shared_ptr<Values> values(new Values);
|
||||
Values values;
|
||||
for ( size_t i = 0 ; i < X.size() ; ++i )
|
||||
values->insert(Symbol('x',i), X[i]) ;
|
||||
values.insert(Symbol('x',i), X[i]) ;
|
||||
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i ) {
|
||||
Point3 pt(L[i].x()+noise*getGaussian(),
|
||||
L[i].y()+noise*getGaussian(),
|
||||
L[i].z()+noise*getGaussian());
|
||||
values->insert(Symbol('l',i), pt) ;
|
||||
values.insert(Symbol('l',i), pt) ;
|
||||
}
|
||||
|
||||
graph->addCameraConstraint(0, X[0]);
|
||||
graph.addCameraConstraint(0, X[0]);
|
||||
|
||||
// Create an ordering of the variables
|
||||
shared_ptr<Ordering> ordering = getOrdering(X,L);
|
||||
NonlinearOptimizationParameters::shared_ptr params (
|
||||
new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-5, 10, NonlinearOptimizationParameters::SILENT));
|
||||
Optimizer optimizer(graph, values, ordering, params);
|
||||
Optimizer optimizer2 = optimizer.levenbergMarquardt();
|
||||
EXPECT(optimizer2.error() < 0.5 * 1e-5 * nMeasurements);
|
||||
Ordering ordering = *getOrdering(X,L);
|
||||
LevenbergMarquardtOptimizer optimizer(graph, values, ordering);
|
||||
Values final = optimizer.optimize();
|
||||
EXPECT(optimizer.error() < 0.5 * 1e-5 * nMeasurements);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -187,11 +185,11 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) {
|
|||
vector<Point3> L = genPoint3();
|
||||
vector<GeneralCamera> X = genCameraVariableCalibration();
|
||||
// add measurement with noise
|
||||
shared_ptr<Graph> graph(new Graph());
|
||||
Graph graph;
|
||||
for ( size_t j = 0 ; j < X.size() ; ++j) {
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i) {
|
||||
Point2 pt = X[j].project(L[i]) ;
|
||||
graph->addMeasurement(j, i, pt, sigma1);
|
||||
graph.addMeasurement(j, i, pt, sigma1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -199,9 +197,9 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) {
|
|||
|
||||
// add initial
|
||||
const double noise = baseline*0.1;
|
||||
boost::shared_ptr<Values> values(new Values);
|
||||
Values values;
|
||||
for ( size_t i = 0 ; i < X.size() ; ++i )
|
||||
values->insert(Symbol('x',i), X[i]) ;
|
||||
values.insert(Symbol('x',i), X[i]) ;
|
||||
|
||||
// add noise only to the first landmark
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i ) {
|
||||
|
@ -209,22 +207,20 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) {
|
|||
Point3 pt(L[i].x()+noise*getGaussian(),
|
||||
L[i].y()+noise*getGaussian(),
|
||||
L[i].z()+noise*getGaussian());
|
||||
values->insert(Symbol('l',i), pt) ;
|
||||
values.insert(Symbol('l',i), pt) ;
|
||||
}
|
||||
else {
|
||||
values->insert(Symbol('l',i), L[i]) ;
|
||||
values.insert(Symbol('l',i), L[i]) ;
|
||||
}
|
||||
}
|
||||
|
||||
graph->addCameraConstraint(0, X[0]);
|
||||
graph.addCameraConstraint(0, X[0]);
|
||||
const double reproj_error = 1e-5;
|
||||
|
||||
shared_ptr<Ordering> ordering = getOrdering(X,L);
|
||||
NonlinearOptimizationParameters::shared_ptr params (
|
||||
new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-5, 10, NonlinearOptimizationParameters::SILENT));
|
||||
Optimizer optimizer(graph, values, ordering, params);
|
||||
Optimizer optimizer2 = optimizer.levenbergMarquardt();
|
||||
EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements);
|
||||
Ordering ordering = *getOrdering(X,L);
|
||||
LevenbergMarquardtOptimizer optimizer(graph, values, ordering);
|
||||
Values final = optimizer.optimize();
|
||||
EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -235,40 +231,37 @@ TEST( GeneralSFMFactor, optimize_varK_FixCameras ) {
|
|||
|
||||
// add measurement with noise
|
||||
const double noise = baseline*0.1;
|
||||
shared_ptr<Graph> graph(new Graph());
|
||||
Graph graph;
|
||||
for ( size_t j = 0 ; j < X.size() ; ++j) {
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i) {
|
||||
Point2 pt = X[j].project(L[i]) ;
|
||||
graph->addMeasurement(j, i, pt, sigma1);
|
||||
graph.addMeasurement(j, i, pt, sigma1);
|
||||
}
|
||||
}
|
||||
|
||||
const size_t nMeasurements = L.size()*X.size();
|
||||
|
||||
boost::shared_ptr<Values> values(new Values);
|
||||
Values values;
|
||||
for ( size_t i = 0 ; i < X.size() ; ++i )
|
||||
values->insert(Symbol('x',i), X[i]) ;
|
||||
values.insert(Symbol('x',i), X[i]) ;
|
||||
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i ) {
|
||||
Point3 pt(L[i].x()+noise*getGaussian(),
|
||||
L[i].y()+noise*getGaussian(),
|
||||
L[i].z()+noise*getGaussian());
|
||||
//Point3 pt(L[i].x(), L[i].y(), L[i].z());
|
||||
values->insert(Symbol('l',i), pt) ;
|
||||
values.insert(Symbol('l',i), pt) ;
|
||||
}
|
||||
|
||||
for ( size_t i = 0 ; i < X.size() ; ++i )
|
||||
graph->addCameraConstraint(i, X[i]);
|
||||
graph.addCameraConstraint(i, X[i]);
|
||||
|
||||
const double reproj_error = 1e-5 ;
|
||||
|
||||
shared_ptr<Ordering> ordering = getOrdering(X,L);
|
||||
NonlinearOptimizationParameters::shared_ptr params (
|
||||
new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-3, 10, NonlinearOptimizationParameters::SILENT));
|
||||
Optimizer optimizer(graph, values, ordering, params);
|
||||
|
||||
Optimizer optimizer2 = optimizer.levenbergMarquardt();
|
||||
EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements);
|
||||
Ordering ordering = *getOrdering(X,L);
|
||||
LevenbergMarquardtOptimizer optimizer(graph, values, ordering);
|
||||
Values final = optimizer.optimize();
|
||||
EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -278,17 +271,17 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) {
|
|||
vector<GeneralCamera> X = genCameraVariableCalibration();
|
||||
|
||||
// add measurement with noise
|
||||
shared_ptr<Graph> graph(new Graph());
|
||||
Graph graph;
|
||||
for ( size_t j = 0 ; j < X.size() ; ++j) {
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i) {
|
||||
Point2 pt = X[j].project(L[i]) ;
|
||||
graph->addMeasurement(j, i, pt, sigma1);
|
||||
graph.addMeasurement(j, i, pt, sigma1);
|
||||
}
|
||||
}
|
||||
|
||||
const size_t nMeasurements = L.size()*X.size();
|
||||
|
||||
boost::shared_ptr<Values> values(new Values);
|
||||
Values values;
|
||||
for ( size_t i = 0 ; i < X.size() ; ++i ) {
|
||||
const double
|
||||
rot_noise = 1e-5,
|
||||
|
@ -296,7 +289,7 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) {
|
|||
focal_noise = 1,
|
||||
skew_noise = 1e-5;
|
||||
if ( i == 0 ) {
|
||||
values->insert(Symbol('x',i), X[i]) ;
|
||||
values.insert(Symbol('x',i), X[i]) ;
|
||||
}
|
||||
else {
|
||||
|
||||
|
@ -307,29 +300,25 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) {
|
|||
skew_noise, // s
|
||||
trans_noise, trans_noise // ux, uy
|
||||
) ;
|
||||
values->insert(Symbol('x',i), X[i].retract(delta)) ;
|
||||
values.insert(Symbol('x',i), X[i].retract(delta)) ;
|
||||
}
|
||||
}
|
||||
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i ) {
|
||||
values->insert(Symbol('l',i), L[i]) ;
|
||||
values.insert(Symbol('l',i), L[i]) ;
|
||||
}
|
||||
|
||||
// fix X0 and all landmarks, allow only the X[1] to move
|
||||
graph->addCameraConstraint(0, X[0]);
|
||||
graph.addCameraConstraint(0, X[0]);
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i )
|
||||
graph->addPoint3Constraint(i, L[i]);
|
||||
graph.addPoint3Constraint(i, L[i]);
|
||||
|
||||
const double reproj_error = 1e-5 ;
|
||||
|
||||
shared_ptr<Ordering> ordering = getOrdering(X,L);
|
||||
NonlinearOptimizationParameters::shared_ptr params (
|
||||
new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-3, 10, NonlinearOptimizationParameters::SILENT));
|
||||
Optimizer optimizer(graph, values, ordering, params);
|
||||
|
||||
Optimizer optimizer2 = optimizer.levenbergMarquardt();
|
||||
|
||||
EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements);
|
||||
Ordering ordering = *getOrdering(X,L);
|
||||
LevenbergMarquardtOptimizer optimizer(graph, values, ordering);
|
||||
Values final = optimizer.optimize();
|
||||
EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -338,11 +327,11 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) {
|
|||
vector<GeneralCamera> X = genCameraVariableCalibration();
|
||||
|
||||
// add measurement with noise
|
||||
shared_ptr<Graph> graph(new Graph());
|
||||
Graph graph;
|
||||
for ( size_t j = 0 ; j < X.size() ; ++j) {
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i) {
|
||||
Point2 pt = X[j].project(L[i]) ;
|
||||
graph->addMeasurement(j, i, pt, sigma1);
|
||||
graph.addMeasurement(j, i, pt, sigma1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -350,28 +339,79 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) {
|
|||
|
||||
// add initial
|
||||
const double noise = baseline*0.1;
|
||||
boost::shared_ptr<Values> values(new Values);
|
||||
Values values;
|
||||
for ( size_t i = 0 ; i < X.size() ; ++i )
|
||||
values->insert(Symbol('x',i), X[i]) ;
|
||||
values.insert(Symbol('x',i), X[i]) ;
|
||||
|
||||
// add noise only to the first landmark
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i ) {
|
||||
Point3 pt(L[i].x()+noise*getGaussian(),
|
||||
L[i].y()+noise*getGaussian(),
|
||||
L[i].z()+noise*getGaussian());
|
||||
values->insert(Symbol('l',i), pt) ;
|
||||
values.insert(Symbol('l',i), pt) ;
|
||||
}
|
||||
|
||||
graph->addCameraConstraint(0, X[0]);
|
||||
// Constrain position of system with the first camera constrained to the origin
|
||||
graph.addCameraConstraint(0, X[0]);
|
||||
|
||||
// Constrain the scale of the problem with a soft range factor of 1m between the cameras
|
||||
graph.add(RangeFactor<GeneralCamera,GeneralCamera>(Symbol('x',0), Symbol('x',1), 2.0, sharedSigma(1, 10.0)));
|
||||
|
||||
const double reproj_error = 1e-5 ;
|
||||
|
||||
shared_ptr<Ordering> ordering = getOrdering(X,L);
|
||||
NonlinearOptimizationParameters::shared_ptr params (
|
||||
new NonlinearOptimizationParameters(1e-2, 1e-2, 0.0, 100, 1e-5, 10, NonlinearOptimizationParameters::SILENT));
|
||||
Optimizer optimizer(graph, values, ordering, params);
|
||||
Ordering ordering = *getOrdering(X,L);
|
||||
LevenbergMarquardtOptimizer optimizer(graph, values, ordering);
|
||||
Values final = optimizer.optimize();
|
||||
EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements);
|
||||
}
|
||||
|
||||
Optimizer optimizer2 = optimizer.levenbergMarquardt();
|
||||
EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements);
|
||||
/* ************************************************************************* */
|
||||
TEST(GeneralSFMFactor, GeneralCameraPoseRange) {
|
||||
// Tests range factor between a GeneralCamera and a Pose3
|
||||
Graph graph;
|
||||
graph.addCameraConstraint(0, GeneralCamera());
|
||||
graph.add(RangeFactor<GeneralCamera, Pose3>(Symbol('x',0), Symbol('x',1), 2.0, sharedSigma(1, 1.0)));
|
||||
graph.add(PriorFactor<Pose3>(Symbol('x',1), Pose3(Rot3(), Point3(1.0, 0.0, 0.0)), sharedSigma(6, 1.0)));
|
||||
|
||||
Values init;
|
||||
init.insert(Symbol('x',0), GeneralCamera());
|
||||
init.insert(Symbol('x',1), Pose3(Rot3(), Point3(1.0,1.0,1.0)));
|
||||
|
||||
// The optimal value between the 2m range factor and 1m prior is 1.5m
|
||||
Values expected;
|
||||
expected.insert(Symbol('x',0), GeneralCamera());
|
||||
expected.insert(Symbol('x',1), Pose3(Rot3(), Point3(1.5,0.0,0.0)));
|
||||
|
||||
LevenbergMarquardtParams params;
|
||||
params.absoluteErrorTol = 1e-9;
|
||||
params.relativeErrorTol = 1e-9;
|
||||
Values actual = LevenbergMarquardtOptimizer(graph, init, params).optimize();
|
||||
|
||||
EXPECT(assert_equal(expected, actual, 1e-4));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GeneralSFMFactor, CalibratedCameraPoseRange) {
|
||||
// Tests range factor between a CalibratedCamera and a Pose3
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(PriorFactor<CalibratedCamera>(Symbol('x',0), CalibratedCamera(), sharedSigma(6, 1.0)));
|
||||
graph.add(RangeFactor<CalibratedCamera, Pose3>(Symbol('x',0), Symbol('x',1), 2.0, sharedSigma(1, 1.0)));
|
||||
graph.add(PriorFactor<Pose3>(Symbol('x',1), Pose3(Rot3(), Point3(1.0, 0.0, 0.0)), sharedSigma(6, 1.0)));
|
||||
|
||||
Values init;
|
||||
init.insert(Symbol('x',0), CalibratedCamera());
|
||||
init.insert(Symbol('x',1), Pose3(Rot3(), Point3(1.0,1.0,1.0)));
|
||||
|
||||
Values expected;
|
||||
expected.insert(Symbol('x',0), CalibratedCamera(Pose3(Rot3(), Point3(-0.333333333333, 0, 0))));
|
||||
expected.insert(Symbol('x',1), Pose3(Rot3(), Point3(1.333333333333, 0, 0)));
|
||||
|
||||
LevenbergMarquardtParams params;
|
||||
params.absoluteErrorTol = 1e-9;
|
||||
params.relativeErrorTol = 1e-9;
|
||||
Values actual = LevenbergMarquardtOptimizer(graph, init, params).optimize();
|
||||
|
||||
EXPECT(assert_equal(expected, actual, 1e-4));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -16,10 +16,11 @@ using namespace boost;
|
|||
#include <gtsam/geometry/Cal3Bundler.h>
|
||||
#include <gtsam/geometry/PinholeCamera.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
#include <gtsam/nonlinear/NonlinearEquality.h>
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/slam/RangeFactor.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
@ -63,8 +64,6 @@ double getGaussian()
|
|||
return sqrt(-2.0f * (double)log(S) / S) * V1;
|
||||
}
|
||||
|
||||
typedef NonlinearOptimizer<Graph> Optimizer;
|
||||
|
||||
const SharedNoiseModel sigma1(noiseModel::Unit::Create(1));
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -150,11 +149,11 @@ TEST( GeneralSFMFactor, optimize_defaultK ) {
|
|||
vector<GeneralCamera> X = genCameraDefaultCalibration();
|
||||
|
||||
// add measurement with noise
|
||||
shared_ptr<Graph> graph(new Graph());
|
||||
Graph graph;
|
||||
for ( size_t j = 0 ; j < X.size() ; ++j) {
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i) {
|
||||
Point2 pt = X[j].project(L[i]) ;
|
||||
graph->addMeasurement(j, i, pt, sigma1);
|
||||
graph.addMeasurement(j, i, pt, sigma1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -162,26 +161,24 @@ TEST( GeneralSFMFactor, optimize_defaultK ) {
|
|||
|
||||
// add initial
|
||||
const double noise = baseline*0.1;
|
||||
boost::shared_ptr<Values> values(new Values);
|
||||
Values values;
|
||||
for ( size_t i = 0 ; i < X.size() ; ++i )
|
||||
values->insert(Symbol('x',i), X[i]) ;
|
||||
values.insert(Symbol('x',i), X[i]) ;
|
||||
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i ) {
|
||||
Point3 pt(L[i].x()+noise*getGaussian(),
|
||||
L[i].y()+noise*getGaussian(),
|
||||
L[i].z()+noise*getGaussian());
|
||||
values->insert(Symbol('l',i), pt) ;
|
||||
values.insert(Symbol('l',i), pt) ;
|
||||
}
|
||||
|
||||
graph->addCameraConstraint(0, X[0]);
|
||||
graph.addCameraConstraint(0, X[0]);
|
||||
|
||||
// Create an ordering of the variables
|
||||
shared_ptr<Ordering> ordering = getOrdering(X,L);
|
||||
NonlinearOptimizationParameters::shared_ptr params (
|
||||
new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-5, 10, NonlinearOptimizationParameters::SILENT));
|
||||
Optimizer optimizer(graph, values, ordering, params);
|
||||
Optimizer optimizer2 = optimizer.levenbergMarquardt();
|
||||
EXPECT(optimizer2.error() < 0.5 * 1e-5 * nMeasurements);
|
||||
Ordering ordering = *getOrdering(X,L);
|
||||
LevenbergMarquardtOptimizer optimizer(graph, values, ordering);
|
||||
Values final = optimizer.optimize();
|
||||
EXPECT(optimizer.error() < 0.5 * 1e-5 * nMeasurements);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -189,11 +186,11 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) {
|
|||
vector<Point3> L = genPoint3();
|
||||
vector<GeneralCamera> X = genCameraVariableCalibration();
|
||||
// add measurement with noise
|
||||
shared_ptr<Graph> graph(new Graph());
|
||||
Graph graph;
|
||||
for ( size_t j = 0 ; j < X.size() ; ++j) {
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i) {
|
||||
Point2 pt = X[j].project(L[i]) ;
|
||||
graph->addMeasurement(j, i, pt, sigma1);
|
||||
graph.addMeasurement(j, i, pt, sigma1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -201,9 +198,9 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) {
|
|||
|
||||
// add initial
|
||||
const double noise = baseline*0.1;
|
||||
boost::shared_ptr<Values> values(new Values);
|
||||
Values values;
|
||||
for ( size_t i = 0 ; i < X.size() ; ++i )
|
||||
values->insert(Symbol('x',i), X[i]) ;
|
||||
values.insert(Symbol('x',i), X[i]) ;
|
||||
|
||||
// add noise only to the first landmark
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i ) {
|
||||
|
@ -211,22 +208,20 @@ TEST( GeneralSFMFactor, optimize_varK_SingleMeasurementError ) {
|
|||
Point3 pt(L[i].x()+noise*getGaussian(),
|
||||
L[i].y()+noise*getGaussian(),
|
||||
L[i].z()+noise*getGaussian());
|
||||
values->insert(Symbol('l',i), pt) ;
|
||||
values.insert(Symbol('l',i), pt) ;
|
||||
}
|
||||
else {
|
||||
values->insert(Symbol('l',i), L[i]) ;
|
||||
values.insert(Symbol('l',i), L[i]) ;
|
||||
}
|
||||
}
|
||||
|
||||
graph->addCameraConstraint(0, X[0]);
|
||||
graph.addCameraConstraint(0, X[0]);
|
||||
const double reproj_error = 1e-5;
|
||||
|
||||
shared_ptr<Ordering> ordering = getOrdering(X,L);
|
||||
NonlinearOptimizationParameters::shared_ptr params (
|
||||
new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-5, 10, NonlinearOptimizationParameters::SILENT));
|
||||
Optimizer optimizer(graph, values, ordering, params);
|
||||
Optimizer optimizer2 = optimizer.levenbergMarquardt();
|
||||
EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements);
|
||||
Ordering ordering = *getOrdering(X,L);
|
||||
LevenbergMarquardtOptimizer optimizer(graph, values, ordering);
|
||||
Values final = optimizer.optimize();
|
||||
EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -237,40 +232,37 @@ TEST( GeneralSFMFactor, optimize_varK_FixCameras ) {
|
|||
|
||||
// add measurement with noise
|
||||
const double noise = baseline*0.1;
|
||||
shared_ptr<Graph> graph(new Graph());
|
||||
Graph graph;
|
||||
for ( size_t j = 0 ; j < X.size() ; ++j) {
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i) {
|
||||
Point2 pt = X[j].project(L[i]) ;
|
||||
graph->addMeasurement(j, i, pt, sigma1);
|
||||
graph.addMeasurement(j, i, pt, sigma1);
|
||||
}
|
||||
}
|
||||
|
||||
const size_t nMeasurements = L.size()*X.size();
|
||||
|
||||
boost::shared_ptr<Values> values(new Values);
|
||||
Values values;
|
||||
for ( size_t i = 0 ; i < X.size() ; ++i )
|
||||
values->insert(Symbol('x',i), X[i]) ;
|
||||
values.insert(Symbol('x',i), X[i]) ;
|
||||
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i ) {
|
||||
Point3 pt(L[i].x()+noise*getGaussian(),
|
||||
L[i].y()+noise*getGaussian(),
|
||||
L[i].z()+noise*getGaussian());
|
||||
//Point3 pt(L[i].x(), L[i].y(), L[i].z());
|
||||
values->insert(Symbol('l',i), pt) ;
|
||||
values.insert(Symbol('l',i), pt) ;
|
||||
}
|
||||
|
||||
for ( size_t i = 0 ; i < X.size() ; ++i )
|
||||
graph->addCameraConstraint(i, X[i]);
|
||||
graph.addCameraConstraint(i, X[i]);
|
||||
|
||||
const double reproj_error = 1e-5 ;
|
||||
|
||||
shared_ptr<Ordering> ordering = getOrdering(X,L);
|
||||
NonlinearOptimizationParameters::shared_ptr params (
|
||||
new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-3, 10, NonlinearOptimizationParameters::SILENT));
|
||||
Optimizer optimizer(graph, values, ordering, params);
|
||||
|
||||
Optimizer optimizer2 = optimizer.levenbergMarquardt();
|
||||
EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements);
|
||||
Ordering ordering = *getOrdering(X,L);
|
||||
LevenbergMarquardtOptimizer optimizer(graph, values, ordering);
|
||||
Values final = optimizer.optimize();
|
||||
EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -280,23 +272,23 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) {
|
|||
vector<GeneralCamera> X = genCameraVariableCalibration();
|
||||
|
||||
// add measurement with noise
|
||||
shared_ptr<Graph> graph(new Graph());
|
||||
Graph graph;
|
||||
for ( size_t j = 0 ; j < X.size() ; ++j) {
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i) {
|
||||
Point2 pt = X[j].project(L[i]) ;
|
||||
graph->addMeasurement(j, i, pt, sigma1);
|
||||
graph.addMeasurement(j, i, pt, sigma1);
|
||||
}
|
||||
}
|
||||
|
||||
const size_t nMeasurements = L.size()*X.size();
|
||||
|
||||
boost::shared_ptr<Values> values(new Values);
|
||||
Values values;
|
||||
for ( size_t i = 0 ; i < X.size() ; ++i ) {
|
||||
const double
|
||||
rot_noise = 1e-5, trans_noise = 1e-3,
|
||||
focal_noise = 1, distort_noise = 1e-3;
|
||||
if ( i == 0 ) {
|
||||
values->insert(Symbol('x',i), X[i]) ;
|
||||
values.insert(Symbol('x',i), X[i]) ;
|
||||
}
|
||||
else {
|
||||
|
||||
|
@ -305,28 +297,25 @@ TEST( GeneralSFMFactor, optimize_varK_FixLandmarks ) {
|
|||
trans_noise, trans_noise, trans_noise, // translation
|
||||
focal_noise, distort_noise, distort_noise // f, k1, k2
|
||||
) ;
|
||||
values->insert(Symbol('x',i), X[i].retract(delta)) ;
|
||||
values.insert(Symbol('x',i), X[i].retract(delta)) ;
|
||||
}
|
||||
}
|
||||
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i ) {
|
||||
values->insert(Symbol('l',i), L[i]) ;
|
||||
values.insert(Symbol('l',i), L[i]) ;
|
||||
}
|
||||
|
||||
// fix X0 and all landmarks, allow only the X[1] to move
|
||||
graph->addCameraConstraint(0, X[0]);
|
||||
graph.addCameraConstraint(0, X[0]);
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i )
|
||||
graph->addPoint3Constraint(i, L[i]);
|
||||
graph.addPoint3Constraint(i, L[i]);
|
||||
|
||||
const double reproj_error = 1e-5 ;
|
||||
|
||||
shared_ptr<Ordering> ordering = getOrdering(X,L);
|
||||
NonlinearOptimizationParameters::shared_ptr params (
|
||||
new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-3, 10, NonlinearOptimizationParameters::SILENT));
|
||||
Optimizer optimizer(graph, values, ordering, params);
|
||||
|
||||
Optimizer optimizer2 = optimizer.levenbergMarquardt();
|
||||
EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements);
|
||||
Ordering ordering = *getOrdering(X,L);
|
||||
LevenbergMarquardtOptimizer optimizer(graph, values, ordering);
|
||||
Values final = optimizer.optimize();
|
||||
EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -335,11 +324,11 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) {
|
|||
vector<GeneralCamera> X = genCameraVariableCalibration();
|
||||
|
||||
// add measurement with noise
|
||||
shared_ptr<Graph> graph(new Graph());
|
||||
Graph graph;
|
||||
for ( size_t j = 0 ; j < X.size() ; ++j) {
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i) {
|
||||
Point2 pt = X[j].project(L[i]) ;
|
||||
graph->addMeasurement(j, i, pt, sigma1);
|
||||
graph.addMeasurement(j, i, pt, sigma1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -347,28 +336,30 @@ TEST( GeneralSFMFactor, optimize_varK_BA ) {
|
|||
|
||||
// add initial
|
||||
const double noise = baseline*0.1;
|
||||
boost::shared_ptr<Values> values(new Values);
|
||||
Values values;
|
||||
for ( size_t i = 0 ; i < X.size() ; ++i )
|
||||
values->insert(Symbol('x',i), X[i]) ;
|
||||
values.insert(Symbol('x',i), X[i]) ;
|
||||
|
||||
// add noise only to the first landmark
|
||||
for ( size_t i = 0 ; i < L.size() ; ++i ) {
|
||||
Point3 pt(L[i].x()+noise*getGaussian(),
|
||||
L[i].y()+noise*getGaussian(),
|
||||
L[i].z()+noise*getGaussian());
|
||||
values->insert(Symbol('l',i), pt) ;
|
||||
values.insert(Symbol('l',i), pt) ;
|
||||
}
|
||||
|
||||
graph->addCameraConstraint(0, X[0]);
|
||||
// Constrain position of system with the first camera constrained to the origin
|
||||
graph.addCameraConstraint(0, X[0]);
|
||||
|
||||
// Constrain the scale of the problem with a soft range factor of 1m between the cameras
|
||||
graph.add(RangeFactor<GeneralCamera,GeneralCamera>(Symbol('x',0), Symbol('x',1), 2.0, sharedSigma(1, 10.0)));
|
||||
|
||||
const double reproj_error = 1e-5 ;
|
||||
|
||||
shared_ptr<Ordering> ordering = getOrdering(X,L);
|
||||
NonlinearOptimizationParameters::shared_ptr params (
|
||||
new NonlinearOptimizationParameters(1e-5, 1e-5, 0.0, 100, 1e-5, 10, NonlinearOptimizationParameters::SILENT));
|
||||
Optimizer optimizer(graph, values, ordering, params);
|
||||
|
||||
Optimizer optimizer2 = optimizer.levenbergMarquardt();
|
||||
EXPECT(optimizer2.error() < 0.5 * reproj_error * nMeasurements);
|
||||
Ordering ordering = *getOrdering(X,L);
|
||||
LevenbergMarquardtOptimizer optimizer(graph, values, ordering);
|
||||
Values final = optimizer.optimize();
|
||||
EXPECT(optimizer.error() < 0.5 * reproj_error * nMeasurements);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -148,29 +148,29 @@ TEST( Pose2SLAM, linearization )
|
|||
TEST(Pose2Graph, optimize) {
|
||||
|
||||
// create a Pose graph with one equality constraint and one measurement
|
||||
shared_ptr<pose2SLAM::Graph> fg(new pose2SLAM::Graph);
|
||||
fg->addPoseConstraint(0, Pose2(0,0,0));
|
||||
fg->addOdometry(0, 1, Pose2(1,2,M_PI_2), covariance);
|
||||
pose2SLAM::Graph fg;
|
||||
fg.addPoseConstraint(0, Pose2(0,0,0));
|
||||
fg.addOdometry(0, 1, Pose2(1,2,M_PI_2), covariance);
|
||||
|
||||
// Create initial config
|
||||
boost::shared_ptr<Values> initial(new Values());
|
||||
initial->insert(pose2SLAM::PoseKey(0), Pose2(0,0,0));
|
||||
initial->insert(pose2SLAM::PoseKey(1), Pose2(0,0,0));
|
||||
Values initial;
|
||||
initial.insert(pose2SLAM::PoseKey(0), Pose2(0,0,0));
|
||||
initial.insert(pose2SLAM::PoseKey(1), Pose2(0,0,0));
|
||||
|
||||
// Choose an ordering and optimize
|
||||
shared_ptr<Ordering> ordering(new Ordering);
|
||||
*ordering += kx0, kx1;
|
||||
typedef NonlinearOptimizer<pose2SLAM::Graph> Optimizer;
|
||||
Ordering ordering;
|
||||
ordering += kx0, kx1;
|
||||
|
||||
NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-15, 1e-15);
|
||||
Optimizer optimizer0(fg, initial, ordering, params);
|
||||
Optimizer optimizer = optimizer0.levenbergMarquardt();
|
||||
LevenbergMarquardtParams params;
|
||||
params.relativeErrorTol = 1e-15;
|
||||
params.ordering = ordering;
|
||||
Values actual = LevenbergMarquardtOptimizer(fg, initial, params).optimize();
|
||||
|
||||
// Check with expected config
|
||||
Values expected;
|
||||
expected.insert(pose2SLAM::PoseKey(0), Pose2(0,0,0));
|
||||
expected.insert(pose2SLAM::PoseKey(1), Pose2(1,2,M_PI_2));
|
||||
CHECK(assert_equal(expected, *optimizer.values()));
|
||||
CHECK(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -182,29 +182,28 @@ TEST(Pose2Graph, optimizeThreePoses) {
|
|||
Pose2 p0 = hexagon.pose(0), p1 = hexagon.pose(1);
|
||||
|
||||
// create a Pose graph with one equality constraint and one measurement
|
||||
shared_ptr<pose2SLAM::Graph> fg(new pose2SLAM::Graph);
|
||||
fg->addPoseConstraint(0, p0);
|
||||
pose2SLAM::Graph fg;
|
||||
fg.addPoseConstraint(0, p0);
|
||||
Pose2 delta = p0.between(p1);
|
||||
fg->addOdometry(0, 1, delta, covariance);
|
||||
fg->addOdometry(1, 2, delta, covariance);
|
||||
fg->addOdometry(2, 0, delta, covariance);
|
||||
fg.addOdometry(0, 1, delta, covariance);
|
||||
fg.addOdometry(1, 2, delta, covariance);
|
||||
fg.addOdometry(2, 0, delta, covariance);
|
||||
|
||||
// Create initial config
|
||||
boost::shared_ptr<pose2SLAM::Values> initial(new pose2SLAM::Values());
|
||||
initial->insertPose(0, p0);
|
||||
initial->insertPose(1, hexagon.pose(1).retract(Vector_(3,-0.1, 0.1,-0.1)));
|
||||
initial->insertPose(2, hexagon.pose(2).retract(Vector_(3, 0.1,-0.1, 0.1)));
|
||||
pose2SLAM::Values initial;
|
||||
initial.insertPose(0, p0);
|
||||
initial.insertPose(1, hexagon.pose(1).retract(Vector_(3,-0.1, 0.1,-0.1)));
|
||||
initial.insertPose(2, hexagon.pose(2).retract(Vector_(3, 0.1,-0.1, 0.1)));
|
||||
|
||||
// Choose an ordering
|
||||
shared_ptr<Ordering> ordering(new Ordering);
|
||||
*ordering += kx0,kx1,kx2;
|
||||
Ordering ordering;
|
||||
ordering += kx0,kx1,kx2;
|
||||
|
||||
// optimize
|
||||
NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-15, 1e-15);
|
||||
pose2SLAM::Optimizer optimizer0(fg, initial, ordering, params);
|
||||
pose2SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt();
|
||||
|
||||
Values actual = *optimizer.values();
|
||||
LevenbergMarquardtParams params;
|
||||
params.relativeErrorTol = 1e-15;
|
||||
params.ordering = ordering;
|
||||
Values actual = LevenbergMarquardtOptimizer(fg, initial, params).optimize();
|
||||
|
||||
// Check with ground truth
|
||||
CHECK(assert_equal((const Values&)hexagon, actual));
|
||||
|
@ -219,35 +218,34 @@ TEST_UNSAFE(Pose2SLAM, optimizeCircle) {
|
|||
Pose2 p0 = hexagon.pose(0), p1 = hexagon.pose(1);
|
||||
|
||||
// create a Pose graph with one equality constraint and one measurement
|
||||
shared_ptr<pose2SLAM::Graph> fg(new pose2SLAM::Graph);
|
||||
fg->addPoseConstraint(0, p0);
|
||||
pose2SLAM::Graph fg;
|
||||
fg.addPoseConstraint(0, p0);
|
||||
Pose2 delta = p0.between(p1);
|
||||
fg->addOdometry(0, 1, delta, covariance);
|
||||
fg->addOdometry(1,2, delta, covariance);
|
||||
fg->addOdometry(2,3, delta, covariance);
|
||||
fg->addOdometry(3,4, delta, covariance);
|
||||
fg->addOdometry(4,5, delta, covariance);
|
||||
fg->addOdometry(5, 0, delta, covariance);
|
||||
fg.addOdometry(0, 1, delta, covariance);
|
||||
fg.addOdometry(1,2, delta, covariance);
|
||||
fg.addOdometry(2,3, delta, covariance);
|
||||
fg.addOdometry(3,4, delta, covariance);
|
||||
fg.addOdometry(4,5, delta, covariance);
|
||||
fg.addOdometry(5, 0, delta, covariance);
|
||||
|
||||
// Create initial config
|
||||
boost::shared_ptr<pose2SLAM::Values> initial(new pose2SLAM::Values());
|
||||
initial->insertPose(0, p0);
|
||||
initial->insertPose(1, hexagon.pose(1).retract(Vector_(3,-0.1, 0.1,-0.1)));
|
||||
initial->insertPose(2, hexagon.pose(2).retract(Vector_(3, 0.1,-0.1, 0.1)));
|
||||
initial->insertPose(3, hexagon.pose(3).retract(Vector_(3,-0.1, 0.1,-0.1)));
|
||||
initial->insertPose(4, hexagon.pose(4).retract(Vector_(3, 0.1,-0.1, 0.1)));
|
||||
initial->insertPose(5, hexagon.pose(5).retract(Vector_(3,-0.1, 0.1,-0.1)));
|
||||
pose2SLAM::Values initial;
|
||||
initial.insertPose(0, p0);
|
||||
initial.insertPose(1, hexagon.pose(1).retract(Vector_(3,-0.1, 0.1,-0.1)));
|
||||
initial.insertPose(2, hexagon.pose(2).retract(Vector_(3, 0.1,-0.1, 0.1)));
|
||||
initial.insertPose(3, hexagon.pose(3).retract(Vector_(3,-0.1, 0.1,-0.1)));
|
||||
initial.insertPose(4, hexagon.pose(4).retract(Vector_(3, 0.1,-0.1, 0.1)));
|
||||
initial.insertPose(5, hexagon.pose(5).retract(Vector_(3,-0.1, 0.1,-0.1)));
|
||||
|
||||
// Choose an ordering
|
||||
shared_ptr<Ordering> ordering(new Ordering);
|
||||
*ordering += kx0,kx1,kx2,kx3,kx4,kx5;
|
||||
Ordering ordering;
|
||||
ordering += kx0,kx1,kx2,kx3,kx4,kx5;
|
||||
|
||||
// optimize
|
||||
NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-15, 1e-15);
|
||||
pose2SLAM::Optimizer optimizer0(fg, initial, ordering, params);
|
||||
pose2SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt();
|
||||
|
||||
Values actual = *optimizer.values();
|
||||
LevenbergMarquardtParams params;
|
||||
params.relativeErrorTol = 1e-15;
|
||||
params.ordering = ordering;
|
||||
Values actual = LevenbergMarquardtOptimizer(fg, initial, params).optimize();
|
||||
|
||||
// Check with ground truth
|
||||
CHECK(assert_equal((const Values&)hexagon, actual));
|
||||
|
|
|
@ -30,6 +30,7 @@ using namespace boost::assign;
|
|||
|
||||
#include <gtsam/slam/pose3SLAM.h>
|
||||
#include <gtsam/slam/PartialPriorFactor.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
@ -52,35 +53,32 @@ TEST(Pose3Graph, optimizeCircle) {
|
|||
Pose3 gT0 = hexagon.at<Pose3>(PoseKey(0)), gT1 = hexagon.at<Pose3>(PoseKey(1));
|
||||
|
||||
// create a Pose graph with one equality constraint and one measurement
|
||||
shared_ptr<Pose3Graph> fg(new Pose3Graph);
|
||||
fg->addHardConstraint(0, gT0);
|
||||
Pose3Graph fg;
|
||||
fg.addHardConstraint(0, gT0);
|
||||
Pose3 _0T1 = gT0.between(gT1); // inv(gT0)*gT1
|
||||
double theta = M_PI/3.0;
|
||||
CHECK(assert_equal(Pose3(Rot3::yaw(-theta),Point3(radius*sin(theta),-radius*cos(theta),0)),_0T1));
|
||||
fg->addConstraint(0,1, _0T1, covariance);
|
||||
fg->addConstraint(1,2, _0T1, covariance);
|
||||
fg->addConstraint(2,3, _0T1, covariance);
|
||||
fg->addConstraint(3,4, _0T1, covariance);
|
||||
fg->addConstraint(4,5, _0T1, covariance);
|
||||
fg->addConstraint(5,0, _0T1, covariance);
|
||||
fg.addConstraint(0,1, _0T1, covariance);
|
||||
fg.addConstraint(1,2, _0T1, covariance);
|
||||
fg.addConstraint(2,3, _0T1, covariance);
|
||||
fg.addConstraint(3,4, _0T1, covariance);
|
||||
fg.addConstraint(4,5, _0T1, covariance);
|
||||
fg.addConstraint(5,0, _0T1, covariance);
|
||||
|
||||
// Create initial config
|
||||
boost::shared_ptr<Values> initial(new Values());
|
||||
initial->insert(PoseKey(0), gT0);
|
||||
initial->insert(PoseKey(1), hexagon.at<Pose3>(PoseKey(1)).retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1)));
|
||||
initial->insert(PoseKey(2), hexagon.at<Pose3>(PoseKey(2)).retract(Vector_(6, 0.1,-0.1, 0.1, 0.1,-0.1, 0.1)));
|
||||
initial->insert(PoseKey(3), hexagon.at<Pose3>(PoseKey(3)).retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1)));
|
||||
initial->insert(PoseKey(4), hexagon.at<Pose3>(PoseKey(4)).retract(Vector_(6, 0.1,-0.1, 0.1, 0.1,-0.1, 0.1)));
|
||||
initial->insert(PoseKey(5), hexagon.at<Pose3>(PoseKey(5)).retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1)));
|
||||
Values initial;
|
||||
initial.insert(PoseKey(0), gT0);
|
||||
initial.insert(PoseKey(1), hexagon.at<Pose3>(PoseKey(1)).retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1)));
|
||||
initial.insert(PoseKey(2), hexagon.at<Pose3>(PoseKey(2)).retract(Vector_(6, 0.1,-0.1, 0.1, 0.1,-0.1, 0.1)));
|
||||
initial.insert(PoseKey(3), hexagon.at<Pose3>(PoseKey(3)).retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1)));
|
||||
initial.insert(PoseKey(4), hexagon.at<Pose3>(PoseKey(4)).retract(Vector_(6, 0.1,-0.1, 0.1, 0.1,-0.1, 0.1)));
|
||||
initial.insert(PoseKey(5), hexagon.at<Pose3>(PoseKey(5)).retract(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1)));
|
||||
|
||||
// Choose an ordering and optimize
|
||||
shared_ptr<Ordering> ordering(new Ordering);
|
||||
*ordering += kx0,kx1,kx2,kx3,kx4,kx5;
|
||||
NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-15, 1e-15);
|
||||
pose3SLAM::Optimizer optimizer0(fg, initial, ordering, params);
|
||||
pose3SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt();
|
||||
Ordering ordering;
|
||||
ordering += kx0,kx1,kx2,kx3,kx4,kx5;
|
||||
|
||||
Values actual = *optimizer.values();
|
||||
Values actual = LevenbergMarquardtOptimizer(fg, initial, ordering).optimize();
|
||||
|
||||
// Check with ground truth
|
||||
CHECK(assert_equal(hexagon, actual,1e-4));
|
||||
|
@ -115,7 +113,7 @@ TEST(Pose3Graph, partial_prior_height) {
|
|||
// linearization
|
||||
EXPECT_DOUBLES_EQUAL(2.0, height.error(values), tol);
|
||||
|
||||
Values actual = *pose3SLAM::Optimizer::optimizeLM(graph, values);
|
||||
Values actual = LevenbergMarquardtOptimizer(graph, values).optimize();
|
||||
EXPECT(assert_equal(expected, actual.at<Pose3>(key), tol));
|
||||
EXPECT_DOUBLES_EQUAL(0.0, graph.error(actual), tol);
|
||||
}
|
||||
|
@ -167,7 +165,7 @@ TEST(Pose3Graph, partial_prior_xy) {
|
|||
Values values;
|
||||
values.insert(key, init);
|
||||
|
||||
Values actual = *pose3SLAM::Optimizer::optimizeLM(graph, values);
|
||||
Values actual = LevenbergMarquardtOptimizer(graph, values).optimize();
|
||||
EXPECT(assert_equal(expected, actual.at<Pose3>(key), tol));
|
||||
EXPECT_DOUBLES_EQUAL(0.0, graph.error(actual), tol);
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
#include <gtsam/geometry/StereoCamera.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
|
||||
#include <gtsam/slam/StereoFactor.h>
|
||||
|
||||
#include <gtsam/slam/visualSLAM.h>
|
||||
|
@ -50,45 +50,34 @@ TEST( StereoFactor, singlePoint)
|
|||
{
|
||||
//Cal3_S2 K(625, 625, 0, 320, 240, 0.5);
|
||||
boost::shared_ptr<Cal3_S2Stereo> K(new Cal3_S2Stereo(625, 625, 0, 320, 240, 0.5));
|
||||
boost::shared_ptr<visualSLAM::Graph> graph(new visualSLAM::Graph());
|
||||
NonlinearFactorGraph graph;
|
||||
|
||||
graph->add(visualSLAM::PoseConstraint(PoseKey(1),camera1));
|
||||
graph.add(visualSLAM::PoseConstraint(PoseKey(1),camera1));
|
||||
|
||||
StereoPoint2 z14(320,320.0-50, 240);
|
||||
// arguments: measurement, sigma, cam#, measurement #, K, baseline (m)
|
||||
graph->add(visualSLAM::StereoFactor(z14,sigma, PoseKey(1), PointKey(1), K));
|
||||
graph.add(visualSLAM::StereoFactor(z14,sigma, PoseKey(1), PointKey(1), K));
|
||||
|
||||
// Create a configuration corresponding to the ground truth
|
||||
boost::shared_ptr<Values> values(new Values());
|
||||
values->insert(PoseKey(1), camera1); // add camera at z=6.25m looking towards origin
|
||||
Values values;
|
||||
values.insert(PoseKey(1), camera1); // add camera at z=6.25m looking towards origin
|
||||
|
||||
Point3 l1(0, 0, 0);
|
||||
values->insert(PointKey(1), l1); // add point at origin;
|
||||
values.insert(PointKey(1), l1); // add point at origin;
|
||||
|
||||
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values);
|
||||
|
||||
typedef gtsam::NonlinearOptimizer<visualSLAM::Graph, gtsam::GaussianFactorGraph, gtsam::GaussianMultifrontalSolver> Optimizer; // optimization engine for this domain
|
||||
|
||||
double absoluteThreshold = 1e-9;
|
||||
double relativeThreshold = 1e-5;
|
||||
int maxIterations = 100;
|
||||
NonlinearOptimizationParameters::verbosityLevel verbose = NonlinearOptimizationParameters::SILENT;
|
||||
NonlinearOptimizationParameters parameters(absoluteThreshold, relativeThreshold, 0,
|
||||
maxIterations, 1.0, 10, verbose, NonlinearOptimizationParameters::BOUNDED);
|
||||
|
||||
Optimizer optimizer(graph, values, ordering, make_shared<NonlinearOptimizationParameters>(parameters));
|
||||
GaussNewtonOptimizer optimizer(graph, values);
|
||||
|
||||
// We expect the initial to be zero because config is the ground truth
|
||||
DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9);
|
||||
|
||||
// Iterate once, and the config should not have changed
|
||||
Optimizer afterOneIteration = optimizer.iterate();
|
||||
DOUBLES_EQUAL(0.0, afterOneIteration.error(), 1e-9);
|
||||
optimizer.iterate();
|
||||
DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9);
|
||||
|
||||
// Complete solution
|
||||
Optimizer final = optimizer.gaussNewton();
|
||||
optimizer.optimize();
|
||||
|
||||
DOUBLES_EQUAL(0.0, final.error(), 1e-6);
|
||||
DOUBLES_EQUAL(0.0, optimizer.error(), 1e-6);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
using namespace boost;
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/slam/visualSLAM.h>
|
||||
|
||||
using namespace std;
|
||||
|
@ -81,37 +81,37 @@ visualSLAM::Graph testGraph() {
|
|||
TEST( Graph, optimizeLM)
|
||||
{
|
||||
// build a graph
|
||||
shared_ptr<visualSLAM::Graph> graph(new visualSLAM::Graph(testGraph()));
|
||||
visualSLAM::Graph graph(testGraph());
|
||||
// add 3 landmark constraints
|
||||
graph->addPointConstraint(1, landmark1);
|
||||
graph->addPointConstraint(2, landmark2);
|
||||
graph->addPointConstraint(3, landmark3);
|
||||
graph.addPointConstraint(1, landmark1);
|
||||
graph.addPointConstraint(2, landmark2);
|
||||
graph.addPointConstraint(3, landmark3);
|
||||
|
||||
// Create an initial values structure corresponding to the ground truth
|
||||
boost::shared_ptr<Values> initialEstimate(new Values);
|
||||
initialEstimate->insert(PoseKey(1), camera1);
|
||||
initialEstimate->insert(PoseKey(2), camera2);
|
||||
initialEstimate->insert(PointKey(1), landmark1);
|
||||
initialEstimate->insert(PointKey(2), landmark2);
|
||||
initialEstimate->insert(PointKey(3), landmark3);
|
||||
initialEstimate->insert(PointKey(4), landmark4);
|
||||
Values initialEstimate;
|
||||
initialEstimate.insert(PoseKey(1), camera1);
|
||||
initialEstimate.insert(PoseKey(2), camera2);
|
||||
initialEstimate.insert(PointKey(1), landmark1);
|
||||
initialEstimate.insert(PointKey(2), landmark2);
|
||||
initialEstimate.insert(PointKey(3), landmark3);
|
||||
initialEstimate.insert(PointKey(4), landmark4);
|
||||
|
||||
// Create an ordering of the variables
|
||||
shared_ptr<Ordering> ordering(new Ordering);
|
||||
*ordering += PointKey(1),PointKey(2),PointKey(3),PointKey(4),PoseKey(1),PoseKey(2);
|
||||
Ordering ordering;
|
||||
ordering += PointKey(1),PointKey(2),PointKey(3),PointKey(4),PoseKey(1),PoseKey(2);
|
||||
|
||||
// Create an optimizer and check its error
|
||||
// We expect the initial to be zero because config is the ground truth
|
||||
visualSLAM::Optimizer optimizer(graph, initialEstimate, ordering);
|
||||
LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, ordering);
|
||||
DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9);
|
||||
|
||||
// Iterate once, and the config should not have changed because we started
|
||||
// with the ground truth
|
||||
visualSLAM::Optimizer afterOneIteration = optimizer.iterate();
|
||||
optimizer.iterate();
|
||||
DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9);
|
||||
|
||||
// check if correct
|
||||
CHECK(assert_equal(*initialEstimate,*(afterOneIteration.values())));
|
||||
CHECK(assert_equal(initialEstimate, optimizer.values()));
|
||||
}
|
||||
|
||||
|
||||
|
@ -119,36 +119,36 @@ TEST( Graph, optimizeLM)
|
|||
TEST( Graph, optimizeLM2)
|
||||
{
|
||||
// build a graph
|
||||
shared_ptr<visualSLAM::Graph> graph(new visualSLAM::Graph(testGraph()));
|
||||
visualSLAM::Graph graph(testGraph());
|
||||
// add 2 camera constraints
|
||||
graph->addPoseConstraint(1, camera1);
|
||||
graph->addPoseConstraint(2, camera2);
|
||||
graph.addPoseConstraint(1, camera1);
|
||||
graph.addPoseConstraint(2, camera2);
|
||||
|
||||
// Create an initial values structure corresponding to the ground truth
|
||||
boost::shared_ptr<Values> initialEstimate(new Values);
|
||||
initialEstimate->insert(PoseKey(1), camera1);
|
||||
initialEstimate->insert(PoseKey(2), camera2);
|
||||
initialEstimate->insert(PointKey(1), landmark1);
|
||||
initialEstimate->insert(PointKey(2), landmark2);
|
||||
initialEstimate->insert(PointKey(3), landmark3);
|
||||
initialEstimate->insert(PointKey(4), landmark4);
|
||||
Values initialEstimate;
|
||||
initialEstimate.insert(PoseKey(1), camera1);
|
||||
initialEstimate.insert(PoseKey(2), camera2);
|
||||
initialEstimate.insert(PointKey(1), landmark1);
|
||||
initialEstimate.insert(PointKey(2), landmark2);
|
||||
initialEstimate.insert(PointKey(3), landmark3);
|
||||
initialEstimate.insert(PointKey(4), landmark4);
|
||||
|
||||
// Create an ordering of the variables
|
||||
shared_ptr<Ordering> ordering(new Ordering);
|
||||
*ordering += PointKey(1),PointKey(2),PointKey(3),PointKey(4),PoseKey(1),PoseKey(2);
|
||||
Ordering ordering;
|
||||
ordering += PointKey(1),PointKey(2),PointKey(3),PointKey(4),PoseKey(1),PoseKey(2);
|
||||
|
||||
// Create an optimizer and check its error
|
||||
// We expect the initial to be zero because config is the ground truth
|
||||
visualSLAM::Optimizer optimizer(graph, initialEstimate, ordering);
|
||||
LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, ordering);
|
||||
DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9);
|
||||
|
||||
// Iterate once, and the config should not have changed because we started
|
||||
// with the ground truth
|
||||
visualSLAM::Optimizer afterOneIteration = optimizer.iterate();
|
||||
optimizer.iterate();
|
||||
DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9);
|
||||
|
||||
// check if correct
|
||||
CHECK(assert_equal(*initialEstimate,*(afterOneIteration.values())));
|
||||
CHECK(assert_equal(initialEstimate, optimizer.values()));
|
||||
}
|
||||
|
||||
|
||||
|
@ -156,34 +156,32 @@ TEST( Graph, optimizeLM2)
|
|||
TEST( Graph, CHECK_ORDERING)
|
||||
{
|
||||
// build a graph
|
||||
shared_ptr<visualSLAM::Graph> graph(new visualSLAM::Graph(testGraph()));
|
||||
visualSLAM::Graph graph = testGraph();
|
||||
// add 2 camera constraints
|
||||
graph->addPoseConstraint(1, camera1);
|
||||
graph->addPoseConstraint(2, camera2);
|
||||
graph.addPoseConstraint(1, camera1);
|
||||
graph.addPoseConstraint(2, camera2);
|
||||
|
||||
// Create an initial values structure corresponding to the ground truth
|
||||
boost::shared_ptr<Values> initialEstimate(new Values);
|
||||
initialEstimate->insert(PoseKey(1), camera1);
|
||||
initialEstimate->insert(PoseKey(2), camera2);
|
||||
initialEstimate->insert(PointKey(1), landmark1);
|
||||
initialEstimate->insert(PointKey(2), landmark2);
|
||||
initialEstimate->insert(PointKey(3), landmark3);
|
||||
initialEstimate->insert(PointKey(4), landmark4);
|
||||
|
||||
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*initialEstimate);
|
||||
Values initialEstimate;
|
||||
initialEstimate.insert(PoseKey(1), camera1);
|
||||
initialEstimate.insert(PoseKey(2), camera2);
|
||||
initialEstimate.insert(PointKey(1), landmark1);
|
||||
initialEstimate.insert(PointKey(2), landmark2);
|
||||
initialEstimate.insert(PointKey(3), landmark3);
|
||||
initialEstimate.insert(PointKey(4), landmark4);
|
||||
|
||||
// Create an optimizer and check its error
|
||||
// We expect the initial to be zero because config is the ground truth
|
||||
visualSLAM::Optimizer optimizer(graph, initialEstimate, ordering);
|
||||
LevenbergMarquardtOptimizer optimizer(graph, initialEstimate);
|
||||
DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9);
|
||||
|
||||
// Iterate once, and the config should not have changed because we started
|
||||
// with the ground truth
|
||||
visualSLAM::Optimizer afterOneIteration = optimizer.iterate();
|
||||
optimizer.iterate();
|
||||
DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9);
|
||||
|
||||
// check if correct
|
||||
CHECK(assert_equal(*initialEstimate,*(afterOneIteration.values())));
|
||||
CHECK(assert_equal(initialEstimate, optimizer.values()));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
*/
|
||||
|
||||
#include <gtsam/slam/visualSLAM.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
|
|
@ -145,7 +145,4 @@ namespace visualSLAM {
|
|||
|
||||
}; // Graph
|
||||
|
||||
/// typedef for Optimizer. The current default will use the multi-frontal solver
|
||||
typedef NonlinearOptimizer<Graph> Optimizer;
|
||||
|
||||
} // namespaces
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
#include <gtsam/slam/simulated2DConstraints.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
|
||||
namespace iq2D = simulated2D::inequality_constraints;
|
||||
using namespace std;
|
||||
|
@ -31,11 +31,6 @@ SharedDiagonal soft_model2 = noiseModel::Unit::Create(2);
|
|||
SharedDiagonal soft_model2_alt = noiseModel::Isotropic::Sigma(2, 0.1);
|
||||
SharedDiagonal hard_model1 = noiseModel::Constrained::All(1);
|
||||
|
||||
typedef NonlinearFactorGraph Graph;
|
||||
typedef boost::shared_ptr<Graph> shared_graph;
|
||||
typedef boost::shared_ptr<Values> shared_values;
|
||||
typedef NonlinearOptimizer<Graph> Optimizer;
|
||||
|
||||
// some simple inequality constraints
|
||||
Symbol key(simulated2D::PoseKey(1));
|
||||
double mu = 10.0;
|
||||
|
@ -150,19 +145,19 @@ TEST( testBoundingConstraint, unary_simple_optimization1) {
|
|||
Point2 goal_pt(1.0, 2.0);
|
||||
Point2 start_pt(0.0, 1.0);
|
||||
|
||||
shared_graph graph(new Graph());
|
||||
NonlinearFactorGraph graph;
|
||||
Symbol x1('x',1);
|
||||
graph->add(iq2D::PoseXInequality(x1, 1.0, true));
|
||||
graph->add(iq2D::PoseYInequality(x1, 2.0, true));
|
||||
graph->add(simulated2D::Prior(start_pt, soft_model2, x1));
|
||||
graph.add(iq2D::PoseXInequality(x1, 1.0, true));
|
||||
graph.add(iq2D::PoseYInequality(x1, 2.0, true));
|
||||
graph.add(simulated2D::Prior(start_pt, soft_model2, x1));
|
||||
|
||||
shared_values initValues(new Values());
|
||||
initValues->insert(x1, start_pt);
|
||||
Values initValues;
|
||||
initValues.insert(x1, start_pt);
|
||||
|
||||
Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initValues);
|
||||
Values actual = LevenbergMarquardtOptimizer(graph, initValues).optimize();
|
||||
Values expected;
|
||||
expected.insert(x1, goal_pt);
|
||||
CHECK(assert_equal(expected, *actual, tol));
|
||||
CHECK(assert_equal(expected, actual, tol));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -172,19 +167,19 @@ TEST( testBoundingConstraint, unary_simple_optimization2) {
|
|||
Point2 goal_pt(1.0, 2.0);
|
||||
Point2 start_pt(2.0, 3.0);
|
||||
|
||||
shared_graph graph(new Graph());
|
||||
NonlinearFactorGraph graph;
|
||||
Symbol x1('x',1);
|
||||
graph->add(iq2D::PoseXInequality(x1, 1.0, false));
|
||||
graph->add(iq2D::PoseYInequality(x1, 2.0, false));
|
||||
graph->add(simulated2D::Prior(start_pt, soft_model2, x1));
|
||||
graph.add(iq2D::PoseXInequality(x1, 1.0, false));
|
||||
graph.add(iq2D::PoseYInequality(x1, 2.0, false));
|
||||
graph.add(simulated2D::Prior(start_pt, soft_model2, x1));
|
||||
|
||||
shared_values initValues(new Values());
|
||||
initValues->insert(x1, start_pt);
|
||||
Values initValues;
|
||||
initValues.insert(x1, start_pt);
|
||||
|
||||
Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initValues);
|
||||
Values actual = LevenbergMarquardtOptimizer(graph, initValues).optimize();
|
||||
Values expected;
|
||||
expected.insert(x1, goal_pt);
|
||||
CHECK(assert_equal(expected, *actual, tol));
|
||||
CHECK(assert_equal(expected, actual, tol));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -233,7 +228,7 @@ TEST( testBoundingConstraint, MaxDistance_simple_optimization) {
|
|||
Point2 pt1, pt2_init(5.0, 0.0), pt2_goal(2.0, 0.0);
|
||||
Symbol x1('x',1), x2('x',2);
|
||||
|
||||
Graph graph;
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(simulated2D::equality_constraints::UnaryEqualityConstraint(pt1, x1));
|
||||
graph.add(simulated2D::Prior(pt2_init, soft_model2_alt, x2));
|
||||
graph.add(iq2D::PoseMaxDistConstraint(x1, x2, 2.0));
|
||||
|
@ -259,7 +254,7 @@ TEST( testBoundingConstraint, avoid_demo) {
|
|||
Point2 x1_pt, x2_init(2.0, 0.5), x2_goal(2.0, 1.0), x3_pt(4.0, 0.0), l1_pt(2.0, 0.0);
|
||||
Point2 odo(2.0, 0.0);
|
||||
|
||||
Graph graph;
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(simulated2D::equality_constraints::UnaryEqualityConstraint(x1_pt, x1));
|
||||
graph.add(simulated2D::Odometry(odo, soft_model2_alt, x1, x2));
|
||||
graph.add(iq2D::LandmarkAvoid(x2, l1, radius));
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
#include <gtsam/linear/GaussianSequentialSolver.h>
|
||||
#include <gtsam/linear/GaussianMultifrontalSolver.h>
|
||||
#include <gtsam/slam/smallExample.h>
|
||||
#include <gtsam/slam/planarSLAM.h>
|
||||
|
||||
|
|
|
@ -0,0 +1,187 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testMarginals.cpp
|
||||
* @brief
|
||||
* @author Richard Roberts
|
||||
* @date May 14, 2012
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
// for all nonlinear keys
|
||||
#include <gtsam/nonlinear/Symbol.h>
|
||||
|
||||
// for points and poses
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
|
||||
// for modeling measurement uncertainty - all models included here
|
||||
#include <gtsam/linear/NoiseModel.h>
|
||||
|
||||
// add in headers for specific factors
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/BearingRangeFactor.h>
|
||||
|
||||
#include <gtsam/nonlinear/Marginals.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
TEST(Marginals, planarSLAMmarginals) {
|
||||
|
||||
// Taken from PlanarSLAMSelfContained_advanced
|
||||
|
||||
// create keys for variables
|
||||
Symbol x1('x',1), x2('x',2), x3('x',3);
|
||||
Symbol l1('l',1), l2('l',2);
|
||||
|
||||
// create graph container and add factors to it
|
||||
NonlinearFactorGraph graph;
|
||||
|
||||
/* add prior */
|
||||
// gaussian for prior
|
||||
SharedDiagonal prior_model = noiseModel::Diagonal::Sigmas(Vector_(3, 0.3, 0.3, 0.1));
|
||||
Pose2 prior_measurement(0.0, 0.0, 0.0); // prior at origin
|
||||
graph.add(PriorFactor<Pose2>(x1, prior_measurement, prior_model)); // add the factor to the graph
|
||||
|
||||
/* add odometry */
|
||||
// general noisemodel for odometry
|
||||
SharedDiagonal odom_model = noiseModel::Diagonal::Sigmas(Vector_(3, 0.2, 0.2, 0.1));
|
||||
Pose2 odom_measurement(2.0, 0.0, 0.0); // create a measurement for both factors (the same in this case)
|
||||
// create between factors to represent odometry
|
||||
graph.add(BetweenFactor<Pose2>(x1, x2, odom_measurement, odom_model));
|
||||
graph.add(BetweenFactor<Pose2>(x2, x3, odom_measurement, odom_model));
|
||||
|
||||
/* add measurements */
|
||||
// general noisemodel for measurements
|
||||
SharedDiagonal meas_model = noiseModel::Diagonal::Sigmas(Vector_(2, 0.1, 0.2));
|
||||
|
||||
// create the measurement values - indices are (pose id, landmark id)
|
||||
Rot2 bearing11 = Rot2::fromDegrees(45),
|
||||
bearing21 = Rot2::fromDegrees(90),
|
||||
bearing32 = Rot2::fromDegrees(90);
|
||||
double range11 = sqrt(4+4),
|
||||
range21 = 2.0,
|
||||
range32 = 2.0;
|
||||
|
||||
// create bearing/range factors
|
||||
graph.add(BearingRangeFactor<Pose2, Point2>(x1, l1, bearing11, range11, meas_model));
|
||||
graph.add(BearingRangeFactor<Pose2, Point2>(x2, l1, bearing21, range21, meas_model));
|
||||
graph.add(BearingRangeFactor<Pose2, Point2>(x3, l2, bearing32, range32, meas_model));
|
||||
|
||||
// linearization point for marginals
|
||||
Values soln;
|
||||
soln.insert(x1, Pose2(0.0, 0.0, 0.0));
|
||||
soln.insert(x2, Pose2(2.0, 0.0, 0.0));
|
||||
soln.insert(x3, Pose2(4.0, 0.0, 0.0));
|
||||
soln.insert(l1, Point2(2.0, 2.0));
|
||||
soln.insert(l2, Point2(4.0, 2.0));
|
||||
|
||||
Matrix expectedx1(3,3);
|
||||
expectedx1 <<
|
||||
0.09, -7.1942452e-18, -1.27897692e-17,
|
||||
-7.1942452e-18, 0.09, 1.27897692e-17,
|
||||
-1.27897692e-17, 1.27897692e-17, 0.01;
|
||||
Matrix expectedx2(3,3);
|
||||
expectedx2 <<
|
||||
0.120967742, -0.00129032258, 0.00451612903,
|
||||
-0.00129032258, 0.158387097, 0.0206451613,
|
||||
0.00451612903, 0.0206451613, 0.0177419355;
|
||||
Matrix expectedx3(3,3);
|
||||
expectedx3 <<
|
||||
0.160967742, 0.00774193548, 0.00451612903,
|
||||
0.00774193548, 0.351935484, 0.0561290323,
|
||||
0.00451612903, 0.0561290323, 0.0277419355;
|
||||
Matrix expectedl1(2,2);
|
||||
expectedl1 <<
|
||||
0.168709677, -0.0477419355,
|
||||
-0.0477419355, 0.163548387;
|
||||
Matrix expectedl2(2,2);
|
||||
expectedl2 <<
|
||||
0.293870968, -0.104516129,
|
||||
-0.104516129, 0.391935484;
|
||||
|
||||
// Check marginals covariances for all variables (Cholesky mode)
|
||||
Marginals marginals(graph, soln, Marginals::CHOLESKY);
|
||||
EXPECT(assert_equal(expectedx1, marginals.marginalCovariance(x1), 1e-8));
|
||||
EXPECT(assert_equal(expectedx2, marginals.marginalCovariance(x2), 1e-8));
|
||||
EXPECT(assert_equal(expectedx3, marginals.marginalCovariance(x3), 1e-8));
|
||||
EXPECT(assert_equal(expectedl1, marginals.marginalCovariance(l1), 1e-8));
|
||||
EXPECT(assert_equal(expectedl2, marginals.marginalCovariance(l2), 1e-8));
|
||||
|
||||
// Check marginals covariances for all variables (QR mode)
|
||||
marginals = Marginals(graph, soln, Marginals::QR);
|
||||
EXPECT(assert_equal(expectedx1, marginals.marginalCovariance(x1), 1e-8));
|
||||
EXPECT(assert_equal(expectedx2, marginals.marginalCovariance(x2), 1e-8));
|
||||
EXPECT(assert_equal(expectedx3, marginals.marginalCovariance(x3), 1e-8));
|
||||
EXPECT(assert_equal(expectedl1, marginals.marginalCovariance(l1), 1e-8));
|
||||
EXPECT(assert_equal(expectedl2, marginals.marginalCovariance(l2), 1e-8));
|
||||
|
||||
// Check joint marginals for 3 variables
|
||||
Matrix expected_l2x1x3(8,8);
|
||||
expected_l2x1x3 <<
|
||||
0.293871159514111, -0.104516127560770, 0.090000180000270, -0.000000000000000, -0.020000000000000, 0.151935669757191, -0.104516127560770, -0.050967744878460,
|
||||
-0.104516127560770, 0.391935664055174, 0.000000000000000, 0.090000180000270, 0.040000000000000, 0.007741936219615, 0.351935664055174, 0.056129031890193,
|
||||
0.090000180000270, 0.000000000000000, 0.090000180000270, -0.000000000000000, 0.000000000000000, 0.090000180000270, 0.000000000000000, 0.000000000000000,
|
||||
-0.000000000000000, 0.090000180000270, -0.000000000000000, 0.090000180000270, 0.000000000000000, -0.000000000000000, 0.090000180000270, 0.000000000000000,
|
||||
-0.020000000000000, 0.040000000000000, 0.000000000000000, 0.000000000000000, 0.010000000000000, 0.000000000000000, 0.040000000000000, 0.010000000000000,
|
||||
0.151935669757191, 0.007741936219615, 0.090000180000270, -0.000000000000000, 0.000000000000000, 0.160967924878730, 0.007741936219615, 0.004516127560770,
|
||||
-0.104516127560770, 0.351935664055174, 0.000000000000000, 0.090000180000270, 0.040000000000000, 0.007741936219615, 0.351935664055174, 0.056129031890193,
|
||||
-0.050967744878460, 0.056129031890193, 0.000000000000000, 0.000000000000000, 0.010000000000000, 0.004516127560770, 0.056129031890193, 0.027741936219615;
|
||||
vector<Key> variables(3);
|
||||
variables[0] = l2;
|
||||
variables[1] = x1;
|
||||
variables[2] = x3;
|
||||
JointMarginal joint_l2x1x3 = marginals.jointMarginalCovariance(variables);
|
||||
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(0,0,2,2)), Matrix(joint_l2x1x3(l2,l2)), 1e-6));
|
||||
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(2,0,3,2)), Matrix(joint_l2x1x3(x1,l2)), 1e-6));
|
||||
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(5,0,3,2)), Matrix(joint_l2x1x3(x3,l2)), 1e-6));
|
||||
|
||||
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(0,2,2,3)), Matrix(joint_l2x1x3(l2,x1)), 1e-6));
|
||||
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(2,2,3,3)), Matrix(joint_l2x1x3(x1,x1)), 1e-6));
|
||||
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(5,2,3,3)), Matrix(joint_l2x1x3(x3,x1)), 1e-6));
|
||||
|
||||
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(0,5,2,3)), Matrix(joint_l2x1x3(l2,x3)), 1e-6));
|
||||
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(2,5,3,3)), Matrix(joint_l2x1x3(x1,x3)), 1e-6));
|
||||
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(5,5,3,3)), Matrix(joint_l2x1x3(x3,x3)), 1e-6));
|
||||
|
||||
// Check joint marginals for 2 variables (different code path than >2 variable case above)
|
||||
Matrix expected_l2x1(5,5);
|
||||
expected_l2x1 <<
|
||||
0.293871159514111, -0.104516127560770, 0.090000180000270, -0.000000000000000, -0.020000000000000,
|
||||
-0.104516127560770, 0.391935664055174, 0.000000000000000, 0.090000180000270, 0.040000000000000,
|
||||
0.090000180000270, 0.000000000000000, 0.090000180000270, -0.000000000000000, 0.000000000000000,
|
||||
-0.000000000000000, 0.090000180000270, -0.000000000000000, 0.090000180000270, 0.000000000000000,
|
||||
-0.020000000000000, 0.040000000000000, 0.000000000000000, 0.000000000000000, 0.010000000000000;
|
||||
variables.resize(2);
|
||||
variables[0] = l2;
|
||||
variables[1] = x1;
|
||||
JointMarginal joint_l2x1 = marginals.jointMarginalCovariance(variables);
|
||||
EXPECT(assert_equal(Matrix(expected_l2x1.block(0,0,2,2)), Matrix(joint_l2x1(l2,l2)), 1e-6));
|
||||
EXPECT(assert_equal(Matrix(expected_l2x1.block(2,0,3,2)), Matrix(joint_l2x1(x1,l2)), 1e-6));
|
||||
EXPECT(assert_equal(Matrix(expected_l2x1.block(0,2,2,3)), Matrix(joint_l2x1(l2,x1)), 1e-6));
|
||||
EXPECT(assert_equal(Matrix(expected_l2x1.block(2,2,3,3)), Matrix(joint_l2x1(x1,x1)), 1e-6));
|
||||
|
||||
// Check joint marginal for 1 variable (different code path than >1 variable cases above)
|
||||
variables.resize(1);
|
||||
variables[0] = x1;
|
||||
JointMarginal joint_x1 = marginals.jointMarginalCovariance(variables);
|
||||
EXPECT(assert_equal(expectedx1, Matrix(joint_l2x1(x1,x1)), 1e-6));
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
|
||||
/* ************************************************************************* */
|
|
@ -22,7 +22,7 @@
|
|||
#include <gtsam/slam/visualSLAM.h>
|
||||
#include <gtsam/nonlinear/NonlinearEquality.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer-inl.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
@ -35,9 +35,6 @@ typedef PriorFactor<Pose2> PosePrior;
|
|||
typedef NonlinearEquality<Pose2> PoseNLE;
|
||||
typedef boost::shared_ptr<PoseNLE> shared_poseNLE;
|
||||
|
||||
typedef NonlinearFactorGraph PoseGraph;
|
||||
typedef NonlinearOptimizer<PoseGraph> PoseOptimizer;
|
||||
|
||||
Symbol key('x',1);
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -188,25 +185,23 @@ TEST ( NonlinearEquality, allow_error_optimize ) {
|
|||
PoseNLE nle(key1, feasible1, error_gain);
|
||||
|
||||
// add to a graph
|
||||
boost::shared_ptr<PoseGraph> graph(new PoseGraph());
|
||||
graph->add(nle);
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(nle);
|
||||
|
||||
// initialize away from the ideal
|
||||
Pose2 initPose(0.0, 2.0, 3.0);
|
||||
boost::shared_ptr<Values> init(new Values());
|
||||
init->insert(key1, initPose);
|
||||
Values init;
|
||||
init.insert(key1, initPose);
|
||||
|
||||
// optimize
|
||||
boost::shared_ptr<Ordering> ord(new Ordering());
|
||||
ord->push_back(key1);
|
||||
NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-5, 1e-5);
|
||||
PoseOptimizer optimizer(graph, init, ord, params);
|
||||
PoseOptimizer result = optimizer.levenbergMarquardt();
|
||||
Ordering ordering;
|
||||
ordering.push_back(key1);
|
||||
Values result = LevenbergMarquardtOptimizer(graph, init, ordering).optimize();
|
||||
|
||||
// verify
|
||||
Values expected;
|
||||
expected.insert(key1, feasible1);
|
||||
EXPECT(assert_equal(expected, *result.values()));
|
||||
EXPECT(assert_equal(expected, result));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -217,9 +212,9 @@ TEST ( NonlinearEquality, allow_error_optimize_with_factors ) {
|
|||
Pose2 feasible1(1.0, 2.0, 3.0);
|
||||
|
||||
// initialize away from the ideal
|
||||
boost::shared_ptr<Values> init(new Values());
|
||||
Values init;
|
||||
Pose2 initPose(0.0, 2.0, 3.0);
|
||||
init->insert(key1, initPose);
|
||||
init.insert(key1, initPose);
|
||||
|
||||
double error_gain = 500.0;
|
||||
PoseNLE nle(key1, feasible1, error_gain);
|
||||
|
@ -228,32 +223,25 @@ TEST ( NonlinearEquality, allow_error_optimize_with_factors ) {
|
|||
PosePrior prior(key1, initPose, noiseModel::Isotropic::Sigma(3, 0.1));
|
||||
|
||||
// add to a graph
|
||||
boost::shared_ptr<PoseGraph> graph(new PoseGraph());
|
||||
graph->add(nle);
|
||||
graph->add(prior);
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(nle);
|
||||
graph.add(prior);
|
||||
|
||||
// optimize
|
||||
boost::shared_ptr<Ordering> ord(new Ordering());
|
||||
ord->push_back(key1);
|
||||
NonlinearOptimizationParameters::shared_ptr params = NonlinearOptimizationParameters::newDecreaseThresholds(1e-5, 1e-5);
|
||||
PoseOptimizer optimizer(graph, init, ord, params);
|
||||
PoseOptimizer result = optimizer.levenbergMarquardt();
|
||||
Ordering ordering;
|
||||
ordering.push_back(key1);
|
||||
Values actual = LevenbergMarquardtOptimizer(graph, init, ordering).optimize();
|
||||
|
||||
// verify
|
||||
Values expected;
|
||||
expected.insert(key1, feasible1);
|
||||
EXPECT(assert_equal(expected, *result.values()));
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
SharedDiagonal hard_model = noiseModel::Constrained::All(2);
|
||||
SharedDiagonal soft_model = noiseModel::Isotropic::Sigma(2, 1.0);
|
||||
|
||||
typedef NonlinearFactorGraph Graph;
|
||||
typedef boost::shared_ptr<Graph> shared_graph;
|
||||
typedef boost::shared_ptr<Values> shared_values;
|
||||
typedef NonlinearOptimizer<Graph> Optimizer;
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( testNonlinearEqualityConstraint, unary_basics ) {
|
||||
Point2 pt(1.0, 2.0);
|
||||
|
@ -314,23 +302,23 @@ TEST( testNonlinearEqualityConstraint, unary_simple_optimization ) {
|
|||
simulated2D::Prior::shared_ptr factor(
|
||||
new simulated2D::Prior(badPt, soft_model, key));
|
||||
|
||||
shared_graph graph(new Graph());
|
||||
graph->push_back(constraint);
|
||||
graph->push_back(factor);
|
||||
NonlinearFactorGraph graph;
|
||||
graph.push_back(constraint);
|
||||
graph.push_back(factor);
|
||||
|
||||
shared_values initValues(new Values());
|
||||
initValues->insert(key, badPt);
|
||||
Values initValues;
|
||||
initValues.insert(key, badPt);
|
||||
|
||||
// verify error values
|
||||
EXPECT(constraint->active(*initValues));
|
||||
EXPECT(constraint->active(initValues));
|
||||
|
||||
Values expected;
|
||||
expected.insert(key, truth_pt);
|
||||
EXPECT(constraint->active(expected));
|
||||
EXPECT_DOUBLES_EQUAL(0.0, constraint->error(expected), tol);
|
||||
|
||||
Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initValues);
|
||||
EXPECT(assert_equal(expected, *actual, tol));
|
||||
Values actual = LevenbergMarquardtOptimizer(graph, initValues).optimize();
|
||||
EXPECT(assert_equal(expected, actual, tol));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -411,20 +399,20 @@ TEST( testNonlinearEqualityConstraint, odo_simple_optimize ) {
|
|||
new eq2D::OdoEqualityConstraint(
|
||||
truth_pt1.between(truth_pt2), key1, key2));
|
||||
|
||||
shared_graph graph(new Graph());
|
||||
graph->push_back(constraint1);
|
||||
graph->push_back(constraint2);
|
||||
graph->push_back(factor);
|
||||
NonlinearFactorGraph graph;
|
||||
graph.push_back(constraint1);
|
||||
graph.push_back(constraint2);
|
||||
graph.push_back(factor);
|
||||
|
||||
shared_values initValues(new Values());
|
||||
initValues->insert(key1, Point2());
|
||||
initValues->insert(key2, badPt);
|
||||
Values initValues;
|
||||
initValues.insert(key1, Point2());
|
||||
initValues.insert(key2, badPt);
|
||||
|
||||
Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initValues);
|
||||
Values actual = LevenbergMarquardtOptimizer(graph, initValues).optimize();
|
||||
Values expected;
|
||||
expected.insert(key1, truth_pt1);
|
||||
expected.insert(key2, truth_pt2);
|
||||
CHECK(assert_equal(expected, *actual, tol));
|
||||
CHECK(assert_equal(expected, actual, tol));
|
||||
}
|
||||
|
||||
/* ********************************************************************* */
|
||||
|
@ -435,44 +423,44 @@ TEST (testNonlinearEqualityConstraint, two_pose ) {
|
|||
* constrained to a particular value
|
||||
*/
|
||||
|
||||
shared_graph graph(new Graph());
|
||||
NonlinearFactorGraph graph;
|
||||
|
||||
Symbol x1('x',1), x2('x',2);
|
||||
Symbol l1('l',1), l2('l',2);
|
||||
Point2 pt_x1(1.0, 1.0),
|
||||
pt_x2(5.0, 6.0);
|
||||
graph->add(eq2D::UnaryEqualityConstraint(pt_x1, x1));
|
||||
graph->add(eq2D::UnaryEqualityConstraint(pt_x2, x2));
|
||||
graph.add(eq2D::UnaryEqualityConstraint(pt_x1, x1));
|
||||
graph.add(eq2D::UnaryEqualityConstraint(pt_x2, x2));
|
||||
|
||||
Point2 z1(0.0, 5.0);
|
||||
SharedNoiseModel sigma(noiseModel::Isotropic::Sigma(2, 0.1));
|
||||
graph->add(simulated2D::Measurement(z1, sigma, x1,l1));
|
||||
graph.add(simulated2D::Measurement(z1, sigma, x1,l1));
|
||||
|
||||
Point2 z2(-4.0, 0.0);
|
||||
graph->add(simulated2D::Measurement(z2, sigma, x2,l2));
|
||||
graph.add(simulated2D::Measurement(z2, sigma, x2,l2));
|
||||
|
||||
graph->add(eq2D::PointEqualityConstraint(l1, l2));
|
||||
graph.add(eq2D::PointEqualityConstraint(l1, l2));
|
||||
|
||||
shared_values initialEstimate(new Values());
|
||||
initialEstimate->insert(x1, pt_x1);
|
||||
initialEstimate->insert(x2, Point2());
|
||||
initialEstimate->insert(l1, Point2(1.0, 6.0)); // ground truth
|
||||
initialEstimate->insert(l2, Point2(-4.0, 0.0)); // starting with a separate reference frame
|
||||
Values initialEstimate;
|
||||
initialEstimate.insert(x1, pt_x1);
|
||||
initialEstimate.insert(x2, Point2());
|
||||
initialEstimate.insert(l1, Point2(1.0, 6.0)); // ground truth
|
||||
initialEstimate.insert(l2, Point2(-4.0, 0.0)); // starting with a separate reference frame
|
||||
|
||||
Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initialEstimate);
|
||||
Values actual = LevenbergMarquardtOptimizer(graph, initialEstimate).optimize();
|
||||
|
||||
Values expected;
|
||||
expected.insert(x1, pt_x1);
|
||||
expected.insert(l1, Point2(1.0, 6.0));
|
||||
expected.insert(l2, Point2(1.0, 6.0));
|
||||
expected.insert(x2, Point2(5.0, 6.0));
|
||||
CHECK(assert_equal(expected, *actual, 1e-5));
|
||||
CHECK(assert_equal(expected, actual, 1e-5));
|
||||
}
|
||||
|
||||
/* ********************************************************************* */
|
||||
TEST (testNonlinearEqualityConstraint, map_warp ) {
|
||||
// get a graph
|
||||
shared_graph graph(new Graph());
|
||||
NonlinearFactorGraph graph;
|
||||
|
||||
// keys
|
||||
Symbol x1('x',1), x2('x',2);
|
||||
|
@ -480,37 +468,37 @@ TEST (testNonlinearEqualityConstraint, map_warp ) {
|
|||
|
||||
// constant constraint on x1
|
||||
Point2 pose1(1.0, 1.0);
|
||||
graph->add(eq2D::UnaryEqualityConstraint(pose1, x1));
|
||||
graph.add(eq2D::UnaryEqualityConstraint(pose1, x1));
|
||||
|
||||
SharedDiagonal sigma = noiseModel::Isotropic::Sigma(1,0.1);
|
||||
|
||||
// measurement from x1 to l1
|
||||
Point2 z1(0.0, 5.0);
|
||||
graph->add(simulated2D::Measurement(z1, sigma, x1, l1));
|
||||
graph.add(simulated2D::Measurement(z1, sigma, x1, l1));
|
||||
|
||||
// measurement from x2 to l2
|
||||
Point2 z2(-4.0, 0.0);
|
||||
graph->add(simulated2D::Measurement(z2, sigma, x2, l2));
|
||||
graph.add(simulated2D::Measurement(z2, sigma, x2, l2));
|
||||
|
||||
// equality constraint between l1 and l2
|
||||
graph->add(eq2D::PointEqualityConstraint(l1, l2));
|
||||
graph.add(eq2D::PointEqualityConstraint(l1, l2));
|
||||
|
||||
// create an initial estimate
|
||||
shared_values initialEstimate(new Values());
|
||||
initialEstimate->insert(x1, Point2( 1.0, 1.0));
|
||||
initialEstimate->insert(l1, Point2( 1.0, 6.0));
|
||||
initialEstimate->insert(l2, Point2(-4.0, 0.0)); // starting with a separate reference frame
|
||||
initialEstimate->insert(x2, Point2( 0.0, 0.0)); // other pose starts at origin
|
||||
Values initialEstimate;
|
||||
initialEstimate.insert(x1, Point2( 1.0, 1.0));
|
||||
initialEstimate.insert(l1, Point2( 1.0, 6.0));
|
||||
initialEstimate.insert(l2, Point2(-4.0, 0.0)); // starting with a separate reference frame
|
||||
initialEstimate.insert(x2, Point2( 0.0, 0.0)); // other pose starts at origin
|
||||
|
||||
// optimize
|
||||
Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initialEstimate);
|
||||
Values actual = LevenbergMarquardtOptimizer(graph, initialEstimate).optimize();
|
||||
|
||||
Values expected;
|
||||
expected.insert(x1, Point2(1.0, 1.0));
|
||||
expected.insert(l1, Point2(1.0, 6.0));
|
||||
expected.insert(l2, Point2(1.0, 6.0));
|
||||
expected.insert(x2, Point2(5.0, 6.0));
|
||||
CHECK(assert_equal(expected, *actual, tol));
|
||||
CHECK(assert_equal(expected, actual, tol));
|
||||
}
|
||||
|
||||
// make a realistic calibration matrix
|
||||
|
@ -520,9 +508,7 @@ Cal3_S2 K(fov,w,h);
|
|||
boost::shared_ptr<Cal3_S2> shK(new Cal3_S2(K));
|
||||
|
||||
// typedefs for visual SLAM example
|
||||
typedef boost::shared_ptr<Values> shared_vconfig;
|
||||
typedef visualSLAM::Graph VGraph;
|
||||
typedef NonlinearOptimizer<VGraph> VOptimizer;
|
||||
|
||||
// factors for visual slam
|
||||
typedef NonlinearEquality2<Point3> Point3Equality;
|
||||
|
@ -546,32 +532,32 @@ TEST (testNonlinearEqualityConstraint, stereo_constrained ) {
|
|||
Symbol l1('l',1), l2('l',2);
|
||||
|
||||
// create graph
|
||||
VGraph::shared_graph graph(new VGraph());
|
||||
VGraph graph;
|
||||
|
||||
// create equality constraints for poses
|
||||
graph->addPoseConstraint(1, camera1.pose());
|
||||
graph->addPoseConstraint(2, camera2.pose());
|
||||
graph.addPoseConstraint(1, camera1.pose());
|
||||
graph.addPoseConstraint(2, camera2.pose());
|
||||
|
||||
// create factors
|
||||
SharedDiagonal vmodel = noiseModel::Unit::Create(3);
|
||||
graph->addMeasurement(camera1.project(landmark), vmodel, 1, 1, shK);
|
||||
graph->addMeasurement(camera2.project(landmark), vmodel, 2, 2, shK);
|
||||
graph.addMeasurement(camera1.project(landmark), vmodel, 1, 1, shK);
|
||||
graph.addMeasurement(camera2.project(landmark), vmodel, 2, 2, shK);
|
||||
|
||||
// add equality constraint
|
||||
graph->add(Point3Equality(l1, l2));
|
||||
graph.add(Point3Equality(l1, l2));
|
||||
|
||||
// create initial data
|
||||
Point3 landmark1(0.5, 5.0, 0.0);
|
||||
Point3 landmark2(1.5, 5.0, 0.0);
|
||||
|
||||
shared_vconfig initValues(new Values());
|
||||
initValues->insert(x1, pose1);
|
||||
initValues->insert(x2, pose2);
|
||||
initValues->insert(l1, landmark1);
|
||||
initValues->insert(l2, landmark2);
|
||||
Values initValues;
|
||||
initValues.insert(x1, pose1);
|
||||
initValues.insert(x2, pose2);
|
||||
initValues.insert(l1, landmark1);
|
||||
initValues.insert(l2, landmark2);
|
||||
|
||||
// optimize
|
||||
VOptimizer::shared_values actual = VOptimizer::optimizeLM(graph, initValues);
|
||||
Values actual = LevenbergMarquardtOptimizer(graph, initValues).optimize();
|
||||
|
||||
// create config
|
||||
Values truthValues;
|
||||
|
@ -581,7 +567,7 @@ TEST (testNonlinearEqualityConstraint, stereo_constrained ) {
|
|||
truthValues.insert(l2, landmark);
|
||||
|
||||
// check if correct
|
||||
CHECK(assert_equal(truthValues, *actual, 1e-5));
|
||||
CHECK(assert_equal(truthValues, actual, 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -31,11 +31,9 @@ using namespace boost;
|
|||
#include <gtsam/slam/pose2SLAM.h>
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/NoiseModel.h>
|
||||
|
||||
// template definitions
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimization.h>
|
||||
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
|
||||
#include <gtsam/nonlinear/DoglegOptimizer.h>
|
||||
|
||||
using namespace gtsam;
|
||||
|
||||
|
@ -44,337 +42,219 @@ const double tol = 1e-5;
|
|||
Key kx(size_t i) { return Symbol('x',i); }
|
||||
Key kl(size_t i) { return Symbol('l',i); }
|
||||
|
||||
typedef NonlinearOptimizer<example::Graph> Optimizer;
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( NonlinearOptimizer, linearizeAndOptimizeForDelta )
|
||||
{
|
||||
shared_ptr<example::Graph> fg(new example::Graph(
|
||||
example::createNonlinearFactorGraph()));
|
||||
Optimizer::shared_values initial = example::sharedNoisyValues();
|
||||
|
||||
// Expected values structure is the difference between the noisy config
|
||||
// and the ground-truth config. One step only because it's linear !
|
||||
Ordering ord1; ord1 += kx(2),kl(1),kx(1);
|
||||
VectorValues expected(initial->dims(ord1));
|
||||
Vector dl1(2);
|
||||
dl1(0) = -0.1;
|
||||
dl1(1) = 0.1;
|
||||
expected[ord1[kl(1)]] = dl1;
|
||||
Vector dx1(2);
|
||||
dx1(0) = -0.1;
|
||||
dx1(1) = -0.1;
|
||||
expected[ord1[kx(1)]] = dx1;
|
||||
Vector dx2(2);
|
||||
dx2(0) = 0.1;
|
||||
dx2(1) = -0.2;
|
||||
expected[ord1[kx(2)]] = dx2;
|
||||
|
||||
// Check one ordering
|
||||
Optimizer optimizer1(fg, initial, Optimizer::shared_ordering(new Ordering(ord1)));
|
||||
|
||||
VectorValues actual1 = optimizer1.linearizeAndOptimizeForDelta();
|
||||
CHECK(assert_equal(actual1,expected));
|
||||
|
||||
// SL-FIX // Check another
|
||||
// shared_ptr<Ordering> ord2(new Ordering());
|
||||
// *ord2 += kx(1),kx(2),kl(1);
|
||||
// solver = Optimizer::shared_solver(new Optimizer::solver(ord2));
|
||||
// Optimizer optimizer2(fg, initial, solver);
|
||||
//
|
||||
// VectorValues actual2 = optimizer2.linearizeAndOptimizeForDelta();
|
||||
// CHECK(assert_equal(actual2,expected));
|
||||
//
|
||||
// // And yet another...
|
||||
// shared_ptr<Ordering> ord3(new Ordering());
|
||||
// *ord3 += kl(1),kx(1),kx(2);
|
||||
// solver = Optimizer::shared_solver(new Optimizer::solver(ord3));
|
||||
// Optimizer optimizer3(fg, initial, solver);
|
||||
//
|
||||
// VectorValues actual3 = optimizer3.linearizeAndOptimizeForDelta();
|
||||
// CHECK(assert_equal(actual3,expected));
|
||||
//
|
||||
// // More...
|
||||
// shared_ptr<Ordering> ord4(new Ordering());
|
||||
// *ord4 += kx(1),kx(2), kl(1);
|
||||
// solver = Optimizer::shared_solver(new Optimizer::solver(ord4));
|
||||
// Optimizer optimizer4(fg, initial, solver);
|
||||
//
|
||||
// VectorValues actual4 = optimizer4.linearizeAndOptimizeForDelta();
|
||||
// CHECK(assert_equal(actual4,expected));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( NonlinearOptimizer, iterateLM )
|
||||
{
|
||||
// really non-linear factor graph
|
||||
shared_ptr<example::Graph> fg(new example::Graph(
|
||||
example::createReallyNonlinearFactorGraph()));
|
||||
example::Graph fg(example::createReallyNonlinearFactorGraph());
|
||||
|
||||
// config far from minimum
|
||||
Point2 x0(3,0);
|
||||
boost::shared_ptr<Values> config(new Values);
|
||||
config->insert(simulated2D::PoseKey(1), x0);
|
||||
|
||||
// ordering
|
||||
shared_ptr<Ordering> ord(new Ordering());
|
||||
ord->push_back(kx(1));
|
||||
|
||||
// create initial optimization state, with lambda=0
|
||||
Optimizer optimizer(fg, config, ord, NonlinearOptimizationParameters::newLambda(0.));
|
||||
Values config;
|
||||
config.insert(simulated2D::PoseKey(1), x0);
|
||||
|
||||
// normal iterate
|
||||
Optimizer iterated1 = optimizer.iterate();
|
||||
GaussNewtonParams gnParams;
|
||||
GaussNewtonOptimizer gnOptimizer(fg, config, gnParams);
|
||||
gnOptimizer.iterate();
|
||||
|
||||
// LM iterate with lambda 0 should be the same
|
||||
Optimizer iterated2 = optimizer.iterateLM();
|
||||
LevenbergMarquardtParams lmParams;
|
||||
lmParams.lambdaInitial = 0.0;
|
||||
LevenbergMarquardtOptimizer lmOptimizer(fg, config, lmParams);
|
||||
lmOptimizer.iterate();
|
||||
|
||||
// Try successive iterates. TODO: ugly pointers, better way ?
|
||||
Optimizer *pointer = new Optimizer(iterated2);
|
||||
for (int i=0;i<10;i++) {
|
||||
Optimizer* newOptimizer = new Optimizer(pointer->iterateLM());
|
||||
delete pointer;
|
||||
pointer = newOptimizer;
|
||||
}
|
||||
delete(pointer);
|
||||
|
||||
CHECK(assert_equal(*iterated1.values(), *iterated2.values(), 1e-9));
|
||||
CHECK(assert_equal(gnOptimizer.values(), lmOptimizer.values(), 1e-9));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( NonlinearOptimizer, optimize )
|
||||
{
|
||||
shared_ptr<example::Graph> fg(new example::Graph(
|
||||
example::createReallyNonlinearFactorGraph()));
|
||||
example::Graph fg(example::createReallyNonlinearFactorGraph());
|
||||
|
||||
// test error at minimum
|
||||
Point2 xstar(0,0);
|
||||
Values cstar;
|
||||
cstar.insert(simulated2D::PoseKey(1), xstar);
|
||||
DOUBLES_EQUAL(0.0,fg->error(cstar),0.0);
|
||||
DOUBLES_EQUAL(0.0,fg.error(cstar),0.0);
|
||||
|
||||
// test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
|
||||
Point2 x0(3,3);
|
||||
boost::shared_ptr<Values> c0(new Values);
|
||||
c0->insert(simulated2D::PoseKey(1), x0);
|
||||
DOUBLES_EQUAL(199.0,fg->error(*c0),1e-3);
|
||||
Values c0;
|
||||
c0.insert(simulated2D::PoseKey(1), x0);
|
||||
DOUBLES_EQUAL(199.0,fg.error(c0),1e-3);
|
||||
|
||||
// optimize parameters
|
||||
shared_ptr<Ordering> ord(new Ordering());
|
||||
ord->push_back(kx(1));
|
||||
|
||||
// initial optimization state is the same in both cases tested
|
||||
boost::shared_ptr<NonlinearOptimizationParameters> params = boost::make_shared<NonlinearOptimizationParameters>();
|
||||
params->relDecrease_ = 1e-5;
|
||||
params->absDecrease_ = 1e-5;
|
||||
Optimizer optimizer(fg, c0, ord, params);
|
||||
Ordering ord;
|
||||
ord.push_back(kx(1));
|
||||
|
||||
// Gauss-Newton
|
||||
Optimizer actual1 = optimizer.gaussNewton();
|
||||
DOUBLES_EQUAL(0,fg->error(*(actual1.values())),tol);
|
||||
GaussNewtonParams gnParams;
|
||||
gnParams.ordering = ord;
|
||||
Values actual1 = GaussNewtonOptimizer(fg, c0, gnParams).optimize();
|
||||
DOUBLES_EQUAL(0,fg.error(actual1),tol);
|
||||
|
||||
// Levenberg-Marquardt
|
||||
Optimizer actual2 = optimizer.levenbergMarquardt();
|
||||
DOUBLES_EQUAL(0,fg->error(*(actual2.values())),tol);
|
||||
LevenbergMarquardtParams lmParams;
|
||||
lmParams.ordering = ord;
|
||||
Values actual2 = LevenbergMarquardtOptimizer(fg, c0, lmParams).optimize();
|
||||
DOUBLES_EQUAL(0,fg.error(actual2),tol);
|
||||
|
||||
// Dogleg
|
||||
DoglegParams dlParams;
|
||||
dlParams.ordering = ord;
|
||||
Values actual3 = DoglegOptimizer(fg, c0, dlParams).optimize();
|
||||
DOUBLES_EQUAL(0,fg.error(actual3),tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( NonlinearOptimizer, SimpleLMOptimizer )
|
||||
{
|
||||
shared_ptr<example::Graph> fg(new example::Graph(
|
||||
example::createReallyNonlinearFactorGraph()));
|
||||
|
||||
Point2 x0(3,3);
|
||||
boost::shared_ptr<Values> c0(new Values);
|
||||
c0->insert(simulated2D::PoseKey(1), x0);
|
||||
|
||||
Optimizer::shared_values actual = Optimizer::optimizeLM(fg, c0);
|
||||
DOUBLES_EQUAL(0,fg->error(*actual),tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( NonlinearOptimizer, SimpleLMOptimizer_noshared )
|
||||
{
|
||||
example::Graph fg = example::createReallyNonlinearFactorGraph();
|
||||
example::Graph fg(example::createReallyNonlinearFactorGraph());
|
||||
|
||||
Point2 x0(3,3);
|
||||
Values c0;
|
||||
c0.insert(simulated2D::PoseKey(1), x0);
|
||||
|
||||
Optimizer::shared_values actual = Optimizer::optimizeLM(fg, c0);
|
||||
DOUBLES_EQUAL(0,fg.error(*actual),tol);
|
||||
Values actual = LevenbergMarquardtOptimizer(fg, c0).optimize();
|
||||
DOUBLES_EQUAL(0,fg.error(actual),tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( NonlinearOptimizer, SimpleGNOptimizer )
|
||||
{
|
||||
shared_ptr<example::Graph> fg(new example::Graph(
|
||||
example::createReallyNonlinearFactorGraph()));
|
||||
example::Graph fg(example::createReallyNonlinearFactorGraph());
|
||||
|
||||
Point2 x0(3,3);
|
||||
boost::shared_ptr<Values> c0(new Values);
|
||||
c0->insert(simulated2D::PoseKey(1), x0);
|
||||
Point2 x0(3,3);
|
||||
Values c0;
|
||||
c0.insert(simulated2D::PoseKey(1), x0);
|
||||
|
||||
Optimizer::shared_values actual = Optimizer::optimizeGN(fg, c0);
|
||||
DOUBLES_EQUAL(0,fg->error(*actual),tol);
|
||||
Values actual = GaussNewtonOptimizer(fg, c0).optimize();
|
||||
DOUBLES_EQUAL(0,fg.error(actual),tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( NonlinearOptimizer, SimpleGNOptimizer_noshared )
|
||||
TEST( NonlinearOptimizer, SimpleDLOptimizer )
|
||||
{
|
||||
example::Graph fg = example::createReallyNonlinearFactorGraph();
|
||||
example::Graph fg(example::createReallyNonlinearFactorGraph());
|
||||
|
||||
Point2 x0(3,3);
|
||||
Values c0;
|
||||
c0.insert(simulated2D::PoseKey(1), x0);
|
||||
Point2 x0(3,3);
|
||||
Values c0;
|
||||
c0.insert(simulated2D::PoseKey(1), x0);
|
||||
|
||||
Optimizer::shared_values actual = Optimizer::optimizeGN(fg, c0);
|
||||
DOUBLES_EQUAL(0,fg.error(*actual),tol);
|
||||
Values actual = DoglegOptimizer(fg, c0).optimize();
|
||||
DOUBLES_EQUAL(0,fg.error(actual),tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( NonlinearOptimizer, optimization_method )
|
||||
{
|
||||
LevenbergMarquardtParams paramsQR;
|
||||
paramsQR.factorization = LevenbergMarquardtParams::QR;
|
||||
LevenbergMarquardtParams paramsLDL;
|
||||
paramsLDL.factorization = LevenbergMarquardtParams::LDL;
|
||||
LevenbergMarquardtParams paramsChol;
|
||||
paramsLDL.factorization = LevenbergMarquardtParams::CHOLESKY;
|
||||
|
||||
example::Graph fg = example::createReallyNonlinearFactorGraph();
|
||||
|
||||
Point2 x0(3,3);
|
||||
Values c0;
|
||||
c0.insert(simulated2D::PoseKey(1), x0);
|
||||
|
||||
Values actualMFQR = optimize<example::Graph>(
|
||||
fg, c0, *NonlinearOptimizationParameters().newFactorization(true), MULTIFRONTAL, LM);
|
||||
Values actualMFQR = LevenbergMarquardtOptimizer(fg, c0, paramsQR).optimize();
|
||||
DOUBLES_EQUAL(0,fg.error(actualMFQR),tol);
|
||||
|
||||
Values actualMFLDL = optimize<example::Graph>(
|
||||
fg, c0, *NonlinearOptimizationParameters().newFactorization(false), MULTIFRONTAL, LM);
|
||||
Values actualMFLDL = LevenbergMarquardtOptimizer(fg, c0, paramsLDL).optimize();
|
||||
DOUBLES_EQUAL(0,fg.error(actualMFLDL),tol);
|
||||
|
||||
Values actualMFChol = LevenbergMarquardtOptimizer(fg, c0, paramsChol).optimize();
|
||||
DOUBLES_EQUAL(0,fg.error(actualMFChol),tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( NonlinearOptimizer, Factorization )
|
||||
{
|
||||
typedef NonlinearOptimizer<pose2SLAM::Graph, GaussianFactorGraph, GaussianSequentialSolver > Optimizer;
|
||||
Values config;
|
||||
config.insert(pose2SLAM::PoseKey(1), Pose2(0.,0.,0.));
|
||||
config.insert(pose2SLAM::PoseKey(2), Pose2(1.5,0.,0.));
|
||||
|
||||
boost::shared_ptr<Values> config(new Values);
|
||||
config->insert(pose2SLAM::PoseKey(1), Pose2(0.,0.,0.));
|
||||
config->insert(pose2SLAM::PoseKey(2), Pose2(1.5,0.,0.));
|
||||
pose2SLAM::Graph graph;
|
||||
graph.addPrior(1, Pose2(0.,0.,0.), noiseModel::Isotropic::Sigma(3, 1e-10));
|
||||
graph.addOdometry(1,2, Pose2(1.,0.,0.), noiseModel::Isotropic::Sigma(3, 1));
|
||||
|
||||
boost::shared_ptr<pose2SLAM::Graph> graph(new pose2SLAM::Graph);
|
||||
graph->addPrior(1, Pose2(0.,0.,0.), noiseModel::Isotropic::Sigma(3, 1e-10));
|
||||
graph->addOdometry(1,2, Pose2(1.,0.,0.), noiseModel::Isotropic::Sigma(3, 1));
|
||||
Ordering ordering;
|
||||
ordering.push_back(pose2SLAM::PoseKey(1));
|
||||
ordering.push_back(pose2SLAM::PoseKey(2));
|
||||
|
||||
boost::shared_ptr<Ordering> ordering(new Ordering);
|
||||
ordering->push_back(pose2SLAM::PoseKey(1));
|
||||
ordering->push_back(pose2SLAM::PoseKey(2));
|
||||
|
||||
Optimizer optimizer(graph, config, ordering);
|
||||
Optimizer optimized = optimizer.iterateLM();
|
||||
LevenbergMarquardtOptimizer optimizer(graph, config, ordering);
|
||||
optimizer.iterate();
|
||||
|
||||
Values expected;
|
||||
expected.insert(pose2SLAM::PoseKey(1), Pose2(0.,0.,0.));
|
||||
expected.insert(pose2SLAM::PoseKey(2), Pose2(1.,0.,0.));
|
||||
CHECK(assert_equal(expected, *optimized.values(), 1e-5));
|
||||
CHECK(assert_equal(expected, optimizer.values(), 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST_UNSAFE(NonlinearOptimizer, NullFactor) {
|
||||
TEST(NonlinearOptimizer, NullFactor) {
|
||||
|
||||
shared_ptr<example::Graph> fg(new example::Graph(
|
||||
example::createReallyNonlinearFactorGraph()));
|
||||
example::Graph fg = example::createReallyNonlinearFactorGraph();
|
||||
|
||||
// Add null factor
|
||||
fg->push_back(example::Graph::sharedFactor());
|
||||
fg.push_back(example::Graph::sharedFactor());
|
||||
|
||||
// test error at minimum
|
||||
Point2 xstar(0,0);
|
||||
Values cstar;
|
||||
cstar.insert(simulated2D::PoseKey(1), xstar);
|
||||
DOUBLES_EQUAL(0.0,fg->error(cstar),0.0);
|
||||
DOUBLES_EQUAL(0.0,fg.error(cstar),0.0);
|
||||
|
||||
// test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
|
||||
Point2 x0(3,3);
|
||||
boost::shared_ptr<Values> c0(new Values);
|
||||
c0->insert(simulated2D::PoseKey(1), x0);
|
||||
DOUBLES_EQUAL(199.0,fg->error(*c0),1e-3);
|
||||
Values c0;
|
||||
c0.insert(simulated2D::PoseKey(1), x0);
|
||||
DOUBLES_EQUAL(199.0,fg.error(c0),1e-3);
|
||||
|
||||
// optimize parameters
|
||||
shared_ptr<Ordering> ord(new Ordering());
|
||||
ord->push_back(kx(1));
|
||||
|
||||
// initial optimization state is the same in both cases tested
|
||||
boost::shared_ptr<NonlinearOptimizationParameters> params = boost::make_shared<NonlinearOptimizationParameters>();
|
||||
params->relDecrease_ = 1e-5;
|
||||
params->absDecrease_ = 1e-5;
|
||||
Optimizer optimizer(fg, c0, ord, params);
|
||||
Ordering ord;
|
||||
ord.push_back(kx(1));
|
||||
|
||||
// Gauss-Newton
|
||||
Optimizer actual1 = optimizer.gaussNewton();
|
||||
DOUBLES_EQUAL(0,fg->error(*(actual1.values())),tol);
|
||||
Values actual1 = GaussNewtonOptimizer(fg, c0, ord).optimize();
|
||||
DOUBLES_EQUAL(0,fg.error(actual1),tol);
|
||||
|
||||
// Levenberg-Marquardt
|
||||
Optimizer actual2 = optimizer.levenbergMarquardt();
|
||||
DOUBLES_EQUAL(0,fg->error(*(actual2.values())),tol);
|
||||
Values actual2 = LevenbergMarquardtOptimizer(fg, c0, ord).optimize();
|
||||
DOUBLES_EQUAL(0,fg.error(actual2),tol);
|
||||
|
||||
// Dogleg
|
||||
Values actual3 = DoglegOptimizer(fg, c0, ord).optimize();
|
||||
DOUBLES_EQUAL(0,fg.error(actual3),tol);
|
||||
}
|
||||
|
||||
///* ************************************************************************* */
|
||||
// SL-FIX TEST( NonlinearOptimizer, SubgraphSolver )
|
||||
//{
|
||||
// using namespace pose2SLAM;
|
||||
// typedef SubgraphSolver<Graph, Values> Solver;
|
||||
// typedef NonlinearOptimizer<Graph, Values, SubgraphPreconditioner, Solver> Optimizer;
|
||||
//
|
||||
// // Create a graph
|
||||
// boost::shared_ptr<Graph> graph(new Graph);
|
||||
// graph->addPrior(1, Pose2(0., 0., 0.), noiseModel::Isotropic::Sigma(3, 1e-10));
|
||||
// graph->addConstraint(1, 2, Pose2(1., 0., 0.), noiseModel::Isotropic::Sigma(3, 1));
|
||||
//
|
||||
// // Create an initial config
|
||||
// boost::shared_ptr<Values> config(new Values);
|
||||
// config->insert(1, Pose2(0., 0., 0.));
|
||||
// config->insert(2, Pose2(1.5, 0., 0.));
|
||||
//
|
||||
// // Create solver and optimizer
|
||||
// Optimizer::shared_solver solver
|
||||
// (new SubgraphSolver<Graph, Values> (*graph, *config));
|
||||
// Optimizer optimizer(graph, config, solver);
|
||||
//
|
||||
// // Optimize !!!!
|
||||
// double relativeThreshold = 1e-5;
|
||||
// double absoluteThreshold = 1e-5;
|
||||
// Optimizer optimized = optimizer.gaussNewton(relativeThreshold,
|
||||
// absoluteThreshold, Optimizer::SILENT);
|
||||
//
|
||||
// // Check solution
|
||||
// Values expected;
|
||||
// expected.insert(1, Pose2(0., 0., 0.));
|
||||
// expected.insert(2, Pose2(1., 0., 0.));
|
||||
// CHECK(assert_equal(expected, *optimized.values(), 1e-5));
|
||||
//}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// SL-FIX TEST( NonlinearOptimizer, MultiFrontalSolver )
|
||||
//{
|
||||
// shared_ptr<example::Graph> fg(new example::Graph(
|
||||
// example::createNonlinearFactorGraph()));
|
||||
// Optimizer::shared_values initial = example::sharedNoisyValues();
|
||||
//
|
||||
// Values expected;
|
||||
// expected.insert(simulated2D::PoseKey(1), Point2(0.0, 0.0));
|
||||
// expected.insert(simulated2D::PoseKey(2), Point2(1.5, 0.0));
|
||||
// expected.insert(simulated2D::PointKey(1), Point2(0.0, -1.0));
|
||||
//
|
||||
// Optimizer::shared_solver solver;
|
||||
//
|
||||
// // Check one ordering
|
||||
// shared_ptr<Ordering> ord1(new Ordering());
|
||||
// *ord1 += kx(2),kl(1),kx(1);
|
||||
// solver = Optimizer::shared_solver(new Optimizer::solver(ord1));
|
||||
// Optimizer optimizer1(fg, initial, solver);
|
||||
//
|
||||
// Values actual = optimizer1.levenbergMarquardt();
|
||||
// CHECK(assert_equal(actual,expected));
|
||||
//}
|
||||
TEST(NonlinearOptimizer, MoreOptimization) {
|
||||
|
||||
NonlinearFactorGraph fg;
|
||||
fg.add(PriorFactor<Pose2>(0, Pose2(0,0,0), sharedSigma(3,1)));
|
||||
fg.add(BetweenFactor<Pose2>(0, 1, Pose2(1,0,M_PI/2), sharedSigma(3,1)));
|
||||
fg.add(BetweenFactor<Pose2>(1, 2, Pose2(1,0,M_PI/2), sharedSigma(3,1)));
|
||||
|
||||
Values init;
|
||||
init.insert(0, Pose2(3,4,-M_PI));
|
||||
init.insert(1, Pose2(10,2,-M_PI));
|
||||
init.insert(2, Pose2(11,7,-M_PI));
|
||||
|
||||
Values expected;
|
||||
expected.insert(0, Pose2(0,0,0));
|
||||
expected.insert(1, Pose2(1,0,M_PI/2));
|
||||
expected.insert(2, Pose2(1,1,M_PI));
|
||||
|
||||
// Try LM and Dogleg
|
||||
EXPECT(assert_equal(expected, LevenbergMarquardtOptimizer(fg, init).optimize()));
|
||||
EXPECT(assert_equal(expected, DoglegOptimizer(fg, init).optimize()));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#include <gtsam/geometry/Point3.h>
|
||||
#include <gtsam/geometry/Rot3.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimization.h>
|
||||
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
|
||||
|
@ -47,8 +47,7 @@ TEST(Rot3, optimize) {
|
|||
fg.add(Between(Symbol('r',j), Symbol('r',(j+1)%6), Rot3::Rz(M_PI/3.0), sharedSigma(3, 0.01)));
|
||||
}
|
||||
|
||||
NonlinearOptimizationParameters params;
|
||||
Values final = optimize(fg, initial, params);
|
||||
Values final = GaussNewtonOptimizer(fg, initial).optimize();
|
||||
|
||||
EXPECT(assert_equal(truth, final, 1e-5));
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue