From 07bda5aa97208a2a88161dc25ad875d36cd1d679 Mon Sep 17 00:00:00 2001 From: Alex Cunningham Date: Sat, 9 Oct 2010 03:09:58 +0000 Subject: [PATCH] Changed linear config names to *Values, updated comments --- examples/PlanarSLAMExample.cpp | 4 +- examples/PlanarSLAMSelfContained.cpp | 26 +- examples/SimpleRotation.cpp | 16 +- gtsam-broken.h | 12 +- gtsam.h | 52 ++-- inference/Factor.h | 2 +- inference/FactorGraph.h | 2 +- inference/ISAM2-inl.h | 84 +++--- inference/ISAM2.h | 28 +- inference/graph-inl.h | 30 +-- inference/graph.h | 6 +- inference/inference-inl.h | 2 +- inference/tests/testBinaryBayesNet.cpp | 2 +- linear/BayesNetPreconditioner.cpp | 26 +- linear/BayesNetPreconditioner.h | 18 +- linear/Errors.cpp | 2 +- linear/Factorization.h | 10 +- linear/GaussianBayesNet.cpp | 30 +-- linear/GaussianBayesNet.h | 18 +- linear/GaussianConditional.cpp | 4 +- linear/GaussianConditional.h | 10 +- linear/GaussianFactor.cpp | 18 +- linear/GaussianFactor.h | 14 +- linear/GaussianFactorGraph.cpp | 64 ++--- linear/GaussianFactorGraph.h | 54 ++-- linear/GaussianISAM.cpp | 6 +- linear/GaussianISAM.h | 6 +- linear/GaussianJunctionTree.cpp | 10 +- linear/GaussianJunctionTree.h | 4 +- linear/Makefile.am | 4 +- linear/SubgraphPreconditioner.cpp | 38 +-- linear/SubgraphPreconditioner.h | 28 +- linear/SubgraphSolver-inl.h | 28 +- linear/SubgraphSolver.h | 26 +- linear/VectorBTree.cpp | 8 +- linear/VectorBTree.h | 6 +- linear/VectorMap.cpp | 16 +- linear/VectorMap.h | 6 +- linear/{VectorConfig.h => VectorValues.h} | 52 ++-- linear/iterative.cpp | 12 +- linear/iterative.h | 10 +- linear/tests/testBayesNetPreconditioner.cpp | 12 +- linear/tests/testGaussianConditional.cpp | 2 +- linear/tests/testGaussianFactor.cpp | 10 +- linear/tests/testGaussianJunctionTree.cpp | 4 +- linear/tests/testVectorBTree.cpp | 4 +- linear/tests/testVectorConfig.cpp | 26 +- linear/tests/testVectorMap.cpp | 4 +- linear/tests/timeFactorOverhead.cpp | 4 +- linear/tests/timeSLAMlike.cpp | 2 +- ...eVectorConfig.cpp => timeVectorValues.cpp} | 4 +- nonlinear/Key.h | 2 +- nonlinear/LieValues-inl.h | 34 +-- nonlinear/LieValues.h | 30 +-- nonlinear/NonlinearConstraint.h | 108 ++++---- nonlinear/NonlinearEquality.h | 12 +- nonlinear/NonlinearFactor.h | 66 ++--- nonlinear/NonlinearFactorGraph-inl.h | 32 +-- nonlinear/NonlinearFactorGraph.h | 28 +- nonlinear/NonlinearOptimizer-inl.h | 26 +- nonlinear/NonlinearOptimizer.h | 38 +-- nonlinear/TupleValues-inl.h | 240 +++++++++--------- nonlinear/TupleValues.h | 192 +++++++------- nonlinear/tests/testConstraintOptimizer.cpp | 8 +- nonlinear/tests/testLieValues.cpp | 54 ++-- slam/BearingFactor.h | 6 +- slam/BearingRangeFactor.h | 6 +- slam/BetweenConstraint.h | 8 +- slam/BetweenFactor.h | 14 +- slam/GaussianISAM2.cpp | 16 +- slam/GaussianISAM2.h | 8 +- slam/LinearApproxFactor-inl.h | 20 +- slam/LinearApproxFactor.h | 20 +- slam/Makefile.am | 10 +- slam/Pose2SLAMOptimizer.cpp | 4 +- slam/Pose2SLAMOptimizer.h | 8 +- slam/PriorFactor.h | 16 +- slam/RangeFactor.h | 6 +- slam/Simulated2DMeasurement.h | 2 +- slam/Simulated2DOdometry.h | 2 +- slam/Simulated2DOrientedOdometry.h | 2 +- slam/Simulated2DOrientedPosePrior.h | 4 +- ...edConfig.h => Simulated2DOrientedValues.h} | 6 +- slam/Simulated2DPointPrior.h | 4 +- slam/Simulated2DPosePrior.h | 4 +- ...imulated2DConfig.h => Simulated2DValues.h} | 6 +- slam/Simulated3D.cpp | 2 +- slam/Simulated3D.h | 16 +- slam/TransformConstraint.h | 10 +- slam/dataset.cpp | 16 +- slam/dataset.h | 6 +- slam/planarSLAM.cpp | 10 +- slam/planarSLAM.h | 26 +- slam/pose2SLAM.cpp | 8 +- slam/pose2SLAM.h | 20 +- slam/pose3SLAM.cpp | 8 +- slam/pose3SLAM.h | 18 +- slam/saveGraph.cpp | 2 +- slam/saveGraph.h | 4 +- slam/simulated2D.cpp | 6 +- slam/simulated2D.h | 18 +- slam/simulated2DConstraints.h | 18 +- slam/simulated2DOriented.cpp | 4 +- slam/simulated2DOriented.h | 12 +- slam/smallExample.cpp | 76 +++--- slam/smallExample.h | 38 +-- slam/tests/testGaussianISAM2.cpp | 32 +-- slam/tests/testPlanarSLAM.cpp | 8 +- slam/tests/testPose2Factor.cpp | 12 +- slam/tests/testPose2Prior.cpp | 12 +- slam/tests/testPose2SLAM.cpp | 22 +- ...estPose2Config.cpp => testPose2Values.cpp} | 18 +- slam/tests/testPose3Factor.cpp | 2 +- slam/tests/testPose3SLAM.cpp | 8 +- ...estPose3Config.cpp => testPose3Values.cpp} | 16 +- slam/tests/testSimulated2D.cpp | 6 +- slam/tests/testSimulated2DOriented.cpp | 8 +- slam/tests/testSimulated3D.cpp | 4 +- slam/tests/testVSLAMFactor.cpp | 10 +- slam/tests/testVSLAMGraph.cpp | 14 +- ...estVSLAMConfig.cpp => testVSLAMValues.cpp} | 20 +- slam/visualSLAM.cpp | 6 +- slam/visualSLAM.h | 26 +- tests/testBayesNetPreconditioner.cpp | 22 +- tests/testBoundingConstraint.cpp | 46 ++-- tests/testGaussianBayesNet.cpp | 28 +- tests/testGaussianFactor.cpp | 2 +- tests/testGaussianFactorGraph.cpp | 48 ++-- tests/testGaussianISAM.cpp | 8 +- tests/testGaussianJunctionTree.cpp | 20 +- tests/testGraph.cpp | 8 +- tests/testIterative.cpp | 48 ++-- tests/testLinearApproxFactor.cpp | 6 +- tests/testNonlinearEquality.cpp | 32 +-- tests/testNonlinearEqualityConstraint.cpp | 84 +++--- tests/testNonlinearFactor.cpp | 24 +- tests/testNonlinearFactorGraph.cpp | 10 +- tests/testNonlinearOptimizer.cpp | 66 ++--- tests/testSerialization.cpp | 10 +- tests/testSubgraphPreconditioner.cpp | 46 ++-- tests/testTransformConstraint.cpp | 34 +-- tests/testTupleValues.cpp | 144 +++++------ tests/timeGaussianFactorGraph.cpp | 12 +- tests/timeLinearOnDataset.cpp | 4 +- 144 files changed, 1574 insertions(+), 1578 deletions(-) rename linear/{VectorConfig.h => VectorValues.h} (81%) rename linear/tests/{timeVectorConfig.cpp => timeVectorValues.cpp} (94%) rename slam/{Simulated2DOrientedConfig.h => Simulated2DOrientedValues.h} (87%) rename slam/{Simulated2DConfig.h => Simulated2DValues.h} (88%) rename slam/tests/{testPose2Config.cpp => testPose2Values.cpp} (82%) rename slam/tests/{testPose3Config.cpp => testPose3Values.cpp} (87%) rename slam/tests/{testVSLAMConfig.cpp => testVSLAMValues.cpp} (75%) diff --git a/examples/PlanarSLAMExample.cpp b/examples/PlanarSLAMExample.cpp index e9ce801b0..81f96361d 100644 --- a/examples/PlanarSLAMExample.cpp +++ b/examples/PlanarSLAMExample.cpp @@ -65,7 +65,7 @@ int main(int argc, char** argv) { graph.print("Full Graph"); // initialize to noisy points - Config initialEstimate; + Values initialEstimate; initialEstimate.insert(x1, Pose2(0.5, 0.0, 0.2)); initialEstimate.insert(x2, Pose2(2.3, 0.1,-0.2)); initialEstimate.insert(x3, Pose2(4.1, 0.1, 0.1)); @@ -75,7 +75,7 @@ int main(int argc, char** argv) { initialEstimate.print("Initial Estimate"); // optimize using Levenburg-Marquadt optimization with an ordering from colamd - Optimizer::shared_config result = Optimizer::optimizeLM(graph, initialEstimate); + Optimizer::shared_values result = Optimizer::optimizeLM(graph, initialEstimate); result->print("Final Result"); diff --git a/examples/PlanarSLAMSelfContained.cpp b/examples/PlanarSLAMSelfContained.cpp index 8e827128e..c6a1d2422 100644 --- a/examples/PlanarSLAMSelfContained.cpp +++ b/examples/PlanarSLAMSelfContained.cpp @@ -31,11 +31,11 @@ // Main typedefs typedef gtsam::TypedSymbol PoseKey; // Key for poses, with type included typedef gtsam::TypedSymbol PointKey; // Key for points, with type included -typedef gtsam::LieValues PoseConfig; // config type for poses -typedef gtsam::LieValues PointConfig; // config type for points -typedef gtsam::TupleValues2 Config; // main config with two variable classes -typedef gtsam::NonlinearFactorGraph Graph; // graph structure -typedef gtsam::NonlinearOptimizer Optimizer; // optimization engine for this domain +typedef gtsam::LieValues PoseValues; // config type for poses +typedef gtsam::LieValues PointValues; // config type for points +typedef gtsam::TupleValues2 Values; // main config with two variable classes +typedef gtsam::NonlinearFactorGraph Graph; // graph structure +typedef gtsam::NonlinearOptimizer Optimizer; // optimization engine for this domain using namespace std; using namespace gtsam; @@ -61,7 +61,7 @@ int main(int argc, char** argv) { // gaussian for prior SharedDiagonal prior_model = noiseModel::Diagonal::Sigmas(Vector_(3, 0.3, 0.3, 0.1)); Pose2 prior_measurement(0.0, 0.0, 0.0); // prior at origin - PriorFactor posePrior(x1, prior_measurement, prior_model); // create the factor + PriorFactor posePrior(x1, prior_measurement, prior_model); // create the factor graph.add(posePrior); // add the factor to the graph /* add odometry */ @@ -69,8 +69,8 @@ int main(int argc, char** argv) { SharedDiagonal odom_model = noiseModel::Diagonal::Sigmas(Vector_(3, 0.2, 0.2, 0.1)); Pose2 odom_measurement(2.0, 0.0, 0.0); // create a measurement for both factors (the same in this case) // create between factors to represent odometry - BetweenFactor odom12(x1, x2, odom_measurement, odom_model); - BetweenFactor odom23(x2, x3, odom_measurement, odom_model); + BetweenFactor odom12(x1, x2, odom_measurement, odom_model); + BetweenFactor odom23(x2, x3, odom_measurement, odom_model); graph.add(odom12); // add both to graph graph.add(odom23); @@ -87,9 +87,9 @@ int main(int argc, char** argv) { range32 = 2.0; // create bearing/range factors - BearingRangeFactor meas11(x1, l1, bearing11, range11, meas_model); - BearingRangeFactor meas21(x2, l1, bearing21, range21, meas_model); - BearingRangeFactor meas32(x3, l2, bearing32, range32, meas_model); + BearingRangeFactor meas11(x1, l1, bearing11, range11, meas_model); + BearingRangeFactor meas21(x2, l1, bearing21, range21, meas_model); + BearingRangeFactor meas32(x3, l2, bearing32, range32, meas_model); // add the factors graph.add(meas11); @@ -99,7 +99,7 @@ int main(int argc, char** argv) { graph.print("Full Graph"); // initialize to noisy points - Config initialEstimate; + Values initialEstimate; initialEstimate.insert(x1, Pose2(0.5, 0.0, 0.2)); initialEstimate.insert(x2, Pose2(2.3, 0.1,-0.2)); initialEstimate.insert(x3, Pose2(4.1, 0.1, 0.1)); @@ -109,7 +109,7 @@ int main(int argc, char** argv) { initialEstimate.print("Initial Estimate"); // optimize using Levenburg-Marquadt optimization with an ordering from colamd - Optimizer::shared_config result = Optimizer::optimizeLM(graph, initialEstimate); + Optimizer::shared_values result = Optimizer::optimizeLM(graph, initialEstimate); result->print("Final Result"); diff --git a/examples/SimpleRotation.cpp b/examples/SimpleRotation.cpp index 7595bfc39..df497be33 100644 --- a/examples/SimpleRotation.cpp +++ b/examples/SimpleRotation.cpp @@ -21,7 +21,7 @@ #include /* - * TODO: make factors independent of Config + * TODO: make factors independent of Values * TODO: get rid of excessive shared pointer stuff: mostly gone * TODO: make toplevel documentation * TODO: investigate whether we can just use ints as keys: will occur for linear, not nonlinear @@ -31,10 +31,10 @@ using namespace std; using namespace gtsam; typedef TypedSymbol Key; -typedef LieValues Config; -typedef NonlinearFactorGraph Graph; -typedef Factorization Solver; -typedef NonlinearOptimizer Optimizer; +typedef LieValues Values; +typedef NonlinearFactorGraph Graph; +typedef Factorization Solver; +typedef NonlinearOptimizer Optimizer; const double degree = M_PI / 180; @@ -47,19 +47,19 @@ int main() { prior1.print("Goal Angle"); SharedDiagonal model1 = noiseModel::Isotropic::Sigma(1, 1 * degree); Key key1(1); - PriorFactor factor1(key1, prior1, model1); + PriorFactor factor1(key1, prior1, model1); // Create a factor graph Graph graph; graph.add(factor1); // and an initial estimate - Config initialEstimate; + Values initialEstimate; initialEstimate.insert(key1, Rot2::fromAngle(20 * degree)); initialEstimate.print("Initialization"); // create an ordering - Optimizer::shared_config result = Optimizer::optimizeLM(graph, initialEstimate, Optimizer::LAMBDA); + Optimizer::shared_values result = Optimizer::optimizeLM(graph, initialEstimate, Optimizer::LAMBDA); result->print("Final config"); return 0; diff --git a/gtsam-broken.h b/gtsam-broken.h index e83eaba79..bbb6245a3 100644 --- a/gtsam-broken.h +++ b/gtsam-broken.h @@ -1,9 +1,9 @@ // These are currently broken -// Solve by parsing a namespace pose2SLAM::Config and making a Pose2SLAMConfig class +// Solve by parsing a namespace pose2SLAM::Values and making a Pose2SLAMValues class // We also have to solve the shared pointer mess to avoid duplicate methods -class Pose2Config{ - Pose2Config(); +class Pose2Values{ + Pose2Values(); Pose2 get(string key) const; void insert(string name, const Pose2& val); void print(string s) const; @@ -15,15 +15,15 @@ class Pose2Factor { Pose2Factor(string key1, string key2, const Pose2& measured, Matrix measurement_covariance); void print(string name) const; - double error(const Pose2Config& c) const; + double error(const Pose2Values& c) const; size_t size() const; - GaussianFactor* linearize(const Pose2Config& config) const; + GaussianFactor* linearize(const Pose2Values& config) const; }; class Pose2Graph{ Pose2Graph(); void print(string s) const; - GaussianFactorGraph* linearize_(const Pose2Config& config) const; + GaussianFactorGraph* linearize_(const Pose2Values& config) const; void push_back(Pose2Factor* factor); }; diff --git a/gtsam.h b/gtsam.h index e6fb5f997..3bf84cafb 100644 --- a/gtsam.h +++ b/gtsam.h @@ -23,10 +23,10 @@ class SymbolicFactor{ void print(string s) const; }; -class VectorConfig { - VectorConfig(); +class VectorValues { + VectorValues(); void print(string s) const; - bool equals(const VectorConfig& expected, double tol) const; + bool equals(const VectorValues& expected, double tol) const; void insert(string name, Vector val); Vector get(string name) const; bool contains(string name) const; @@ -57,7 +57,7 @@ class GaussianFactor { bool empty() const; Vector get_b() const; Matrix get_A(string key) const; - double error(const VectorConfig& c) const; + double error(const VectorValues& c) const; bool involves(string key) const; pair matrix(const Ordering& ordering) const; pair eliminate(string key) const; @@ -91,7 +91,7 @@ class GaussianConditional { void print(string s) const; bool equals(const GaussianConditional &cg, double tol) const; void add(string key, Matrix S); - Vector solve(const VectorConfig& x); + Vector solve(const VectorValues& x); }; class GaussianBayesNet { @@ -109,17 +109,17 @@ class GaussianFactorGraph { size_t size() const; void push_back(GaussianFactor* ptr_f); - double error(const VectorConfig& c) const; - double probPrime(const VectorConfig& c) const; + double error(const VectorValues& c) const; + double probPrime(const VectorValues& c) const; void combine(const GaussianFactorGraph& lfg); GaussianConditional* eliminateOne(string key); GaussianBayesNet* eliminate_(const Ordering& ordering); - VectorConfig* optimize_(const Ordering& ordering); + VectorValues* optimize_(const Ordering& ordering); pair matrix(const Ordering& ordering) const; Matrix sparse(const Ordering& ordering) const; - VectorConfig* steepestDescent_(const VectorConfig& x0) const; - VectorConfig* conjugateGradientDescent_(const VectorConfig& x0) const; + VectorValues* steepestDescent_(const VectorValues& x0) const; + VectorValues* conjugateGradientDescent_(const VectorValues& x0) const; }; class Point2 { @@ -159,8 +159,8 @@ class Pose2 { Rot2 r() const; }; -class Simulated2DConfig { - Simulated2DConfig(); +class Simulated2DValues { + Simulated2DValues(); void print(string s) const; void insertPose(int i, const Point2& p); void insertPoint(int j, const Point2& p); @@ -170,8 +170,8 @@ class Simulated2DConfig { Point2* point(int j); }; -class Simulated2DOrientedConfig { - Simulated2DOrientedConfig(); +class Simulated2DOrientedValues { + Simulated2DOrientedValues(); void print(string s) const; void insertPose(int i, const Pose2& p); void insertPoint(int j, const Point2& p); @@ -184,43 +184,43 @@ class Simulated2DOrientedConfig { class Simulated2DPosePrior { Simulated2DPosePrior(Point2& mu, const SharedDiagonal& model, int i); void print(string s) const; - GaussianFactor* linearize(const Simulated2DConfig& config) const; - double error(const Simulated2DConfig& c) const; + GaussianFactor* linearize(const Simulated2DValues& config) const; + double error(const Simulated2DValues& c) const; }; class Simulated2DOrientedPosePrior { Simulated2DOrientedPosePrior(Pose2& mu, const SharedDiagonal& model, int i); void print(string s) const; - GaussianFactor* linearize(const Simulated2DOrientedConfig& config) const; - double error(const Simulated2DOrientedConfig& c) const; + GaussianFactor* linearize(const Simulated2DOrientedValues& config) const; + double error(const Simulated2DOrientedValues& c) const; }; class Simulated2DPointPrior { Simulated2DPointPrior(Point2& mu, const SharedDiagonal& model, int i); void print(string s) const; - GaussianFactor* linearize(const Simulated2DConfig& config) const; - double error(const Simulated2DConfig& c) const; + GaussianFactor* linearize(const Simulated2DValues& config) const; + double error(const Simulated2DValues& c) const; }; class Simulated2DOdometry { Simulated2DOdometry(Point2& mu, const SharedDiagonal& model, int i1, int i2); void print(string s) const; - GaussianFactor* linearize(const Simulated2DConfig& config) const; - double error(const Simulated2DConfig& c) const; + GaussianFactor* linearize(const Simulated2DValues& config) const; + double error(const Simulated2DValues& c) const; }; class Simulated2DOrientedOdometry { Simulated2DOrientedOdometry(Pose2& mu, const SharedDiagonal& model, int i1, int i2); void print(string s) const; - GaussianFactor* linearize(const Simulated2DOrientedConfig& config) const; - double error(const Simulated2DOrientedConfig& c) const; + GaussianFactor* linearize(const Simulated2DOrientedValues& config) const; + double error(const Simulated2DOrientedValues& c) const; }; class Simulated2DMeasurement { Simulated2DMeasurement(Point2& mu, const SharedDiagonal& model, int i, int j); void print(string s) const; - GaussianFactor* linearize(const Simulated2DConfig& config) const; - double error(const Simulated2DConfig& c) const; + GaussianFactor* linearize(const Simulated2DValues& config) const; + double error(const Simulated2DValues& c) const; }; class Pose2SLAMOptimizer { diff --git a/inference/Factor.h b/inference/Factor.h index bb067d5bd..8979ea048 100644 --- a/inference/Factor.h +++ b/inference/Factor.h @@ -31,7 +31,7 @@ class Conditional; * conflicts with efficiency as well, esp. in the case of incomplete * QR factorization. A solution is still being sought. * - * A Factor is templated on a Config, for example VectorConfig is a configuration of + * A Factor is templated on a Values, for example VectorValues is a values structure of * labeled vectors. This way, we can have factors that might be defined on discrete * variables, continuous ones, or a combination of both. It is up to the config to * provide the appropriate values at the appropriate time. diff --git a/inference/FactorGraph.h b/inference/FactorGraph.h index 2041a9773..738ffc3bc 100644 --- a/inference/FactorGraph.h +++ b/inference/FactorGraph.h @@ -29,7 +29,7 @@ namespace gtsam { * A factor graph is a bipartite graph with factor nodes connected to variable nodes. * In this class, however, only factor nodes are kept around. * - * Templated on the type of factors and configuration. + * Templated on the type of factors and values structure. */ template class FactorGraph: public Testable > { diff --git a/inference/ISAM2-inl.h b/inference/ISAM2-inl.h index 94fefe8c1..e474bf143 100644 --- a/inference/ISAM2-inl.h +++ b/inference/ISAM2-inl.h @@ -15,7 +15,7 @@ using namespace boost::assign; #include #include #include -#include +#include #include #include @@ -33,12 +33,12 @@ using namespace std; static const bool disableReordering = false; /** Create an empty Bayes Tree */ -template -ISAM2::ISAM2() : BayesTree(), delta_(Permutation(), deltaUnpermuted_) {} +template +ISAM2::ISAM2() : BayesTree(), delta_(Permutation(), deltaUnpermuted_) {} /** Create a Bayes Tree from a nonlinear factor graph */ -//template -//ISAM2::ISAM2(const NonlinearFactorGraph& nlfg, const Ordering& ordering, const Config& config) : +//template +//ISAM2::ISAM2(const NonlinearFactorGraph& nlfg, const Ordering& ordering, const Values& config) : //BayesTree(nlfg.linearize(config)->eliminate(ordering)), theta_(config), //variableIndex_(nlfg.symbolic(config, ordering), config.dims(ordering)), deltaUnpermuted_(variableIndex_.dims()), //delta_(Permutation::Identity(variableIndex_.size())), nonlinearFactors_(nlfg), ordering_(ordering) { @@ -48,14 +48,14 @@ ISAM2::ISAM2() : BayesTree(), delta_(Permutati //} /* ************************************************************************* */ -template -list ISAM2::getAffectedFactors(const list& keys) const { +template +list ISAM2::getAffectedFactors(const list& keys) const { static const bool debug = false; if(debug) cout << "Getting affected factors for "; if(debug) { BOOST_FOREACH(const varid_t key, keys) { cout << key << " "; } } if(debug) cout << endl; - FactorGraph > allAffected; + FactorGraph > allAffected; list indices; BOOST_FOREACH(const varid_t key, keys) { // const list l = nonlinearFactors_.factors(key); @@ -77,15 +77,15 @@ list ISAM2::getAffectedFactors(const list& /* ************************************************************************* */ // retrieve all factors that ONLY contain the affected variables // (note that the remaining stuff is summarized in the cached factors) -template -boost::shared_ptr ISAM2::relinearizeAffectedFactors +template +boost::shared_ptr ISAM2::relinearizeAffectedFactors (const list& affectedKeys) const { tic("8.2.2.1 getAffectedFactors"); list candidates = getAffectedFactors(affectedKeys); toc("8.2.2.1 getAffectedFactors"); - NonlinearFactorGraph nonlinearAffectedFactors; + NonlinearFactorGraph nonlinearAffectedFactors; tic("8.2.2.2 affectedKeysSet"); // for fast lookup below @@ -113,8 +113,8 @@ boost::shared_ptr ISAM2::relinearizeAf /* ************************************************************************* */ // find intermediate (linearized) factors from cache that are passed into the affected area -template -GaussianFactorGraph ISAM2::getCachedBoundaryFactors(Cliques& orphans) { +template +GaussianFactorGraph ISAM2::getCachedBoundaryFactors(Cliques& orphans) { static const bool debug = false; @@ -139,8 +139,8 @@ GaussianFactorGraph ISAM2::getCachedBoundaryFactors(Cliques } /* ************************************************************************* */ -template -void reinsertCache(const typename ISAM2::sharedClique& root, vector& cache, const Permutation& selector, const Permutation& selectorInverse) { +template +void reinsertCache(const typename ISAM2::sharedClique& root, vector& cache, const Permutation& selector, const Permutation& selectorInverse) { static const bool debug = false; if(root) { if(root->size() > 0) { @@ -162,15 +162,15 @@ void reinsertCache(const typename ISAM2::sharedClique& root, assert(!root->cachedFactor()); root->cachedFactor() = cachedFactor; } - typedef ISAM2 This; + typedef ISAM2 This; BOOST_FOREACH(typename This::sharedClique& child, root->children()) { - reinsertCache(child, cache, selector, selectorInverse); + reinsertCache(child, cache, selector, selectorInverse); } } } -template -boost::shared_ptr > ISAM2::recalculate(const set& markedKeys, const vector& newKeys, const GaussianFactorGraph* newFactors) { +template +boost::shared_ptr > ISAM2::recalculate(const set& markedKeys, const vector& newKeys, const GaussianFactorGraph* newFactors) { static const bool debug = false; static const bool useMultiFrontal = true; @@ -522,7 +522,7 @@ boost::shared_ptr > ISAM2::recalculate(const s if(bayesNet->size() == 0) assert(newlyCached.size() == 0); else - reinsertCache(this->root(), newlyCached, affectedKeysSelector, affectedKeysSelectorInverse); + reinsertCache(this->root(), newlyCached, affectedKeysSelector, affectedKeysSelectorInverse); toc("8.7.3 re-assemble: insert cache"); lastNnzTop = 0; //calculate_nnz(this->root()); @@ -563,16 +563,16 @@ boost::shared_ptr > ISAM2::recalculate(const s } ///* ************************************************************************* */ -//template -//void ISAM2::linear_update(const GaussianFactorGraph& newFactors) { +//template +//void ISAM2::linear_update(const GaussianFactorGraph& newFactors) { // const list markedKeys = newFactors.keys(); // recalculate(markedKeys, &newFactors); //} /* ************************************************************************* */ // find all variables that are directly connected by a measurement to one of the marked variables -template -void ISAM2::find_all(sharedClique clique, set& keys, const vector& markedMask) { +template +void ISAM2::find_all(sharedClique clique, set& keys, const vector& markedMask) { // does the separator contain any of the variables? bool found = false; BOOST_FOREACH(const varid_t& key, clique->separator_) { @@ -591,10 +591,10 @@ void ISAM2::find_all(sharedClique clique, set& key /* ************************************************************************* */ struct _SelectiveExpmap { - const Permuted& delta; + const Permuted& delta; const Ordering& ordering; const vector& mask; - _SelectiveExpmap(const Permuted& _delta, const Ordering& _ordering, const vector& _mask) : + _SelectiveExpmap(const Permuted& _delta, const Ordering& _ordering, const vector& _mask) : delta(_delta), ordering(_ordering), mask(_mask) {} template void operator()(I it_x) { @@ -604,10 +604,10 @@ struct _SelectiveExpmap { }; #ifndef NDEBUG struct _SelectiveExpmapAndClear { - Permuted& delta; + Permuted& delta; const Ordering& ordering; const vector& mask; - _SelectiveExpmapAndClear(Permuted& _delta, const Ordering& _ordering, const vector& _mask) : + _SelectiveExpmapAndClear(Permuted& _delta, const Ordering& _ordering, const vector& _mask) : delta(_delta), ordering(_ordering), mask(_mask) {} template void operator()(I it_x) { @@ -627,8 +627,8 @@ struct _SelectiveExpmapAndClear { #endif struct _VariableAdder { Ordering& ordering; - Permuted& vconfig; - _VariableAdder(Ordering& _ordering, Permuted& _vconfig) : ordering(_ordering), vconfig(_vconfig) {} + Permuted& vconfig; + _VariableAdder(Ordering& _ordering, Permuted& _vconfig) : ordering(_ordering), vconfig(_vconfig) {} template void operator()(I xIt) { static const bool debug = false; @@ -638,9 +638,9 @@ struct _VariableAdder { if(debug) cout << "Adding variable " << (string)xIt->first << " with order " << var << endl; } }; -template -void ISAM2::update( - const NonlinearFactorGraph& newFactors, const Config& newTheta, +template +void ISAM2::update( + const NonlinearFactorGraph& newFactors, const Values& newTheta, double wildfire_threshold, double relinearize_threshold, bool relinearize) { static const bool debug = false; @@ -690,7 +690,7 @@ void ISAM2::update( // 3. Mark linear update set markedKeys; vector newKeys; newKeys.reserve(newFactors.size() * 6); - BOOST_FOREACH(const typename NonlinearFactor::shared_ptr& factor, newFactors) { + BOOST_FOREACH(const typename NonlinearFactor::shared_ptr& factor, newFactors) { BOOST_FOREACH(const Symbol& key, factor->keys()) { markedKeys.insert(ordering_[key]); newKeys.push_back(ordering_[key]); @@ -798,7 +798,7 @@ void ISAM2::update( tic("9 step9"); // 9. Solve if (wildfire_threshold<=0.) { - VectorConfig newDelta(variableIndex_.dims()); + VectorValues newDelta(variableIndex_.dims()); optimize2(this->root(), newDelta); assert(newDelta.size() == delta_.size()); delta_.permutation() = Permutation::Identity(delta_.size()); @@ -807,7 +807,7 @@ void ISAM2::update( // GaussianFactorGraph linearfull = *nonlinearFactors_.linearize(theta_, ordering_); // GaussianBayesNet gbn = *Inference::Eliminate(linearfull); -// VectorConfig deltafull = optimize(gbn); +// VectorValues deltafull = optimize(gbn); // assert(assert_equal(deltafull, newDelta, 1e-3)); } else { @@ -822,9 +822,9 @@ void ISAM2::update( } /* ************************************************************************* */ -template -Config ISAM2::calculateEstimate() const { - Config ret(theta_); +template +Values ISAM2::calculateEstimate() const { + Values ret(theta_); vector mask(ordering_.nVars(), true); _SelectiveExpmap selectiveExpmap(delta_, ordering_, mask); ret.apply(selectiveExpmap); @@ -832,9 +832,9 @@ Config ISAM2::calculateEstimate() const { } /* ************************************************************************* */ -template -Config ISAM2::calculateBestEstimate() const { - VectorConfig delta(variableIndex_.dims()); +template +Values ISAM2::calculateBestEstimate() const { + VectorValues delta(variableIndex_.dims()); optimize2(this->root(), delta); return theta_.expmap(delta, ordering_); } diff --git a/inference/ISAM2.h b/inference/ISAM2.h index 929ea6911..ffd7e5b92 100644 --- a/inference/ISAM2.h +++ b/inference/ISAM2.h @@ -28,29 +28,29 @@ namespace gtsam { //typedef std::vector CachedFactors; -template +template class ISAM2: public BayesTree { protected: // current linearization point - Config theta_; + Values theta_; // VariableIndex lets us look up factors by involved variable and keeps track of dimensions typedef GaussianVariableIndex VariableIndexType; VariableIndexType variableIndex_; // the linear solution, an update to the estimate in theta - VectorConfig deltaUnpermuted_; + VectorValues deltaUnpermuted_; // The residual permutation through which the deltaUnpermuted_ is - // referenced. Permuting the VectorConfig is slow, so for performance the - // permutation is applied at access time instead of to the VectorConfig + // referenced. Permuting the VectorValues is slow, so for performance the + // permutation is applied at access time instead of to the VectorValues // itself. - Permuted delta_; + Permuted delta_; // for keeping all original nonlinear factors - NonlinearFactorGraph nonlinearFactors_; + NonlinearFactorGraph nonlinearFactors_; // The "ordering" allows converting Symbols to varid_t (integer) keys. We // keep it up to date as we add and reorder variables. @@ -69,7 +69,7 @@ public: ISAM2(); // /** Create a Bayes Tree from a Bayes Net */ -// ISAM2(const NonlinearFactorGraph& fg, const Ordering& ordering, const Config& config); +// ISAM2(const NonlinearFactorGraph& fg, const Ordering& ordering, const Values& config); /** Destructor */ virtual ~ISAM2() {} @@ -81,21 +81,21 @@ public: /** * ISAM2. */ - void update(const NonlinearFactorGraph& newFactors, const Config& newTheta, + void update(const NonlinearFactorGraph& newFactors, const Values& newTheta, double wildfire_threshold = 0., double relinearize_threshold = 0., bool relinearize = true); // needed to create initial estimates - const Config& getLinearizationPoint() const {return theta_;} + const Values& getLinearizationPoint() const {return theta_;} // estimate based on incomplete delta (threshold!) - Config calculateEstimate() const; + Values calculateEstimate() const; // estimate based on full delta (note that this is based on the current linearization point) - Config calculateBestEstimate() const; + Values calculateBestEstimate() const; - const Permuted& getDelta() const { return delta_; } + const Permuted& getDelta() const { return delta_; } - const NonlinearFactorGraph& getFactorsUnsafe() const { return nonlinearFactors_; } + const NonlinearFactorGraph& getFactorsUnsafe() const { return nonlinearFactors_; } const Ordering& getOrdering() const { return ordering_; } diff --git a/inference/graph-inl.h b/inference/graph-inl.h index 1e00a1503..3d65c0c15 100644 --- a/inference/graph-inl.h +++ b/inference/graph-inl.h @@ -126,19 +126,19 @@ predecessorMap2Graph(const PredecessorMap& p_map) { } /* ************************************************************************* */ -template +template class compose_key_visitor : public boost::default_bfs_visitor { private: - boost::shared_ptr config_; + boost::shared_ptr config_; public: - compose_key_visitor(boost::shared_ptr config_in) {config_ = config_in;} + compose_key_visitor(boost::shared_ptr config_in) {config_ = config_in;} template void tree_edge(Edge edge, const Graph& g) const { - typename Config::Key key_from = boost::get(boost::vertex_name, g, boost::source(edge, g)); - typename Config::Key key_to = boost::get(boost::vertex_name, g, boost::target(edge, g)); + typename Values::Key key_from = boost::get(boost::vertex_name, g, boost::source(edge, g)); + typename Values::Key key_to = boost::get(boost::vertex_name, g, boost::target(edge, g)); Pose relativePose = boost::get(boost::edge_weight, g, edge); config_->insert(key_to, (*config_)[key_from].compose(relativePose)); } @@ -146,23 +146,23 @@ public: }; /* ************************************************************************* */ -template -boost::shared_ptr composePoses(const G& graph, const PredecessorMap& tree, +template +boost::shared_ptr composePoses(const G& graph, const PredecessorMap& tree, const Pose& rootPose) { //TODO: change edge_weight_t to edge_pose_t typedef typename boost::adjacency_list< boost::vecS, boost::vecS, boost::directedS, - boost::property, + boost::property, boost::property > PoseGraph; typedef typename boost::graph_traits::vertex_descriptor PoseVertex; typedef typename boost::graph_traits::edge_descriptor PoseEdge; PoseGraph g; PoseVertex root; - map key2vertex; + map key2vertex; boost::tie(g, root, key2vertex) = - predecessorMap2Graph(tree); + predecessorMap2Graph(tree); // attach the relative poses to the edges PoseEdge edge12, edge21; @@ -176,8 +176,8 @@ boost::shared_ptr composePoses(const G& graph, const PredecessorMap factor = boost::dynamic_pointer_cast(nl_factor); if (!factor) continue; - typename Config::Key key1 = factor->key1(); - typename Config::Key key2 = factor->key2(); + typename Values::Key key1 = factor->key1(); + typename Values::Key key2 = factor->key2(); PoseVertex v1 = key2vertex.find(key1)->second; PoseVertex v2 = key2vertex.find(key2)->second; @@ -194,10 +194,10 @@ boost::shared_ptr composePoses(const G& graph, const PredecessorMap config(new Config); - typename Config::Key rootKey = boost::get(boost::vertex_name, g, root); + boost::shared_ptr config(new Values); + typename Values::Key rootKey = boost::get(boost::vertex_name, g, root); config->insert(rootKey, rootPose); - compose_key_visitor vis(config); + compose_key_visitor vis(config); boost::breadth_first_search(g, root, boost::visitor(vis)); return config; diff --git a/inference/graph.h b/inference/graph.h index 666b1ac47..28d963592 100644 --- a/inference/graph.h +++ b/inference/graph.h @@ -76,8 +76,8 @@ namespace gtsam { /** * Compose the poses by following the chain specified by the spanning tree */ - template - boost::shared_ptr - composePoses(const G& graph, const PredecessorMap& tree, const Pose& rootPose); + template + boost::shared_ptr + composePoses(const G& graph, const PredecessorMap& tree, const Pose& rootPose); } // namespace gtsam diff --git a/inference/inference-inl.h b/inference/inference-inl.h index 5f6c367f3..2e39b235a 100644 --- a/inference/inference-inl.h +++ b/inference/inference-inl.h @@ -438,7 +438,7 @@ Permutation::shared_ptr Inference::PermutationCOLAMD(const VariableIndexType& va // if(factor->keys().size() != 1 || factor->keys().front() != key) // throw runtime_error("Didn't get the right marginal!"); // -// VectorConfig mean_cfg(marginal.optimize(Ordering(key))); +// VectorValues mean_cfg(marginal.optimize(Ordering(key))); // Matrix A(factor->get_A(key)); // // return make_pair(mean_cfg[key], inverse(prod(trans(A), A))); diff --git a/inference/tests/testBinaryBayesNet.cpp b/inference/tests/testBinaryBayesNet.cpp index ff6d9fc2a..8d626e1f4 100644 --- a/inference/tests/testBinaryBayesNet.cpp +++ b/inference/tests/testBinaryBayesNet.cpp @@ -62,7 +62,7 @@ TEST( BinaryBayesNet, constructor ) // single parent conditional for x vector cpt; - cpt += 0.3, 0.6 ; // array index corresponds to binary parent configuration + cpt += 0.3, 0.6 ; // array index corresponds to binary parent values structure boost::shared_ptr px_y(new BinaryConditional("x","y",cpt)); DOUBLES_EQUAL(0.7,px_y->probability(config),0.01); diff --git a/linear/BayesNetPreconditioner.cpp b/linear/BayesNetPreconditioner.cpp index 62a3e5039..e44cdc757 100644 --- a/linear/BayesNetPreconditioner.cpp +++ b/linear/BayesNetPreconditioner.cpp @@ -17,26 +17,26 @@ namespace gtsam { /* ************************************************************************* */ // R*x = y by solving x=inv(R)*y - VectorConfig BayesNetPreconditioner::backSubstitute(const VectorConfig& y) const { + VectorValues BayesNetPreconditioner::backSubstitute(const VectorValues& y) const { return gtsam::backSubstitute(Rd_, y); } /* ************************************************************************* */ // gy=inv(L)*gx by solving L*gy=gx. - VectorConfig BayesNetPreconditioner::backSubstituteTranspose( - const VectorConfig& gx) const { + VectorValues BayesNetPreconditioner::backSubstituteTranspose( + const VectorValues& gx) const { return gtsam::backSubstituteTranspose(Rd_, gx); } /* ************************************************************************* */ - double BayesNetPreconditioner::error(const VectorConfig& y) const { + double BayesNetPreconditioner::error(const VectorValues& y) const { return Ab_.error(x(y)); } /* ************************************************************************* */ // gradient is inv(R')*A'*(A*inv(R)*y-b), - VectorConfig BayesNetPreconditioner::gradient(const VectorConfig& y) const { - VectorConfig gx = VectorConfig::zero(y); + VectorValues BayesNetPreconditioner::gradient(const VectorValues& y) const { + VectorValues gx = VectorValues::zero(y); Errors e = Ab_.errors(x(y)); Ab_.transposeMultiplyAdd(1.0,e,gx); return gtsam::backSubstituteTranspose(Rd_, gx); @@ -44,31 +44,31 @@ namespace gtsam { /* ************************************************************************* */ // Apply operator * - Errors BayesNetPreconditioner::operator*(const VectorConfig& y) const { + Errors BayesNetPreconditioner::operator*(const VectorValues& y) const { return Ab_ * x(y); } /* ************************************************************************* */ // In-place version that overwrites e // TODO: version that takes scratch space for x - void BayesNetPreconditioner::multiplyInPlace(const VectorConfig& y, Errors& e) const { - VectorConfig x = y; + void BayesNetPreconditioner::multiplyInPlace(const VectorValues& y, Errors& e) const { + VectorValues x = y; backSubstituteInPlace(Rd_,x); Ab_.multiplyInPlace(x,e); } /* ************************************************************************* */ // Apply operator inv(R')*A'*e - VectorConfig BayesNetPreconditioner::operator^(const Errors& e) const { - VectorConfig x = Ab_ ^ e; // x = A'*e2 + VectorValues BayesNetPreconditioner::operator^(const Errors& e) const { + VectorValues x = Ab_ ^ e; // x = A'*e2 return gtsam::backSubstituteTranspose(Rd_, x); } /* ************************************************************************* */ // y += alpha*inv(R')*A'*e void BayesNetPreconditioner::transposeMultiplyAdd(double alpha, - const Errors& e, VectorConfig& y) const { - VectorConfig x = VectorConfig::zero(y); + const Errors& e, VectorValues& y) const { + VectorValues x = VectorValues::zero(y); Ab_.transposeMultiplyAdd(1.0,e,x); // x += A'*e axpy(alpha, gtsam::backSubstituteTranspose(Rd_, x), y); // y += alpha*inv(R')*x } diff --git a/linear/BayesNetPreconditioner.h b/linear/BayesNetPreconditioner.h index c31f05692..cc2b4b0b5 100644 --- a/linear/BayesNetPreconditioner.h +++ b/linear/BayesNetPreconditioner.h @@ -34,33 +34,33 @@ namespace gtsam { const GaussianBayesNet& Rd); // R*x = y by solving x=inv(R)*y - VectorConfig backSubstitute(const VectorConfig& y) const; + VectorValues backSubstitute(const VectorValues& y) const; // gy=inv(L)*gx by solving L*gy=gx. - VectorConfig backSubstituteTranspose(const VectorConfig& gx) const; + VectorValues backSubstituteTranspose(const VectorValues& gx) const; /* x = inv(R)*y */ - inline VectorConfig x(const VectorConfig& y) const { + inline VectorValues x(const VectorValues& y) const { return backSubstitute(y); } /* error, given y */ - double error(const VectorConfig& y) const; + double error(const VectorValues& y) const; /** gradient */ - VectorConfig gradient(const VectorConfig& y) const; + VectorValues gradient(const VectorValues& y) const; /** Apply operator A */ - Errors operator*(const VectorConfig& y) const; + Errors operator*(const VectorValues& y) const; /** In-place version that overwrites e */ - void multiplyInPlace(const VectorConfig& y, Errors& e) const; + void multiplyInPlace(const VectorValues& y, Errors& e) const; /** Apply operator A' */ - VectorConfig operator^(const Errors& e) const; + VectorValues operator^(const Errors& e) const; /** BLAS level 2 equivalent y += alpha*inv(R')*A'*e */ - void transposeMultiplyAdd(double alpha, const Errors& e, VectorConfig& y) const; + void transposeMultiplyAdd(double alpha, const Errors& e, VectorValues& y) const; }; } // namespace gtsam diff --git a/linear/Errors.cpp b/linear/Errors.cpp index c452d1ad5..cc1b6112d 100644 --- a/linear/Errors.cpp +++ b/linear/Errors.cpp @@ -1,6 +1,6 @@ /** * @file Errors.cpp - * @brief Factor Graph Configuration + * @brief Factor Graph Values * @brief Errors * @author Carlos Nieto * @author Christian Potthast diff --git a/linear/Factorization.h b/linear/Factorization.h index e3613308e..20a11f750 100644 --- a/linear/Factorization.h +++ b/linear/Factorization.h @@ -20,7 +20,7 @@ namespace gtsam { /** * A linear system solver using factorization */ - template + template class Factorization { private: boost::shared_ptr ordering_; @@ -35,14 +35,14 @@ namespace gtsam { * solve for the optimal displacement in the tangent space, and then solve * the resulted linear system */ - VectorConfig optimize(GaussianFactorGraph& fg) const { + VectorValues optimize(GaussianFactorGraph& fg) const { return gtsam::optimize(*Inference::Eliminate(fg)); } /** * linearize the non-linear graph around the current config */ - boost::shared_ptr linearize(const NonlinearGraph& g, const Config& config) const { + boost::shared_ptr linearize(const NonlinearGraph& g, const Values& config) const { return g.linearize(config, *ordering_); } @@ -53,8 +53,8 @@ namespace gtsam { return boost::shared_ptr(new Factorization(*this)); } - /** expmap the Config given the stored Ordering */ - Config expmap(const Config& config, const VectorConfig& delta) const { + /** expmap the Values given the stored Ordering */ + Values expmap(const Values& config, const VectorValues& delta) const { return config.expmap(delta, *ordering_); } }; diff --git a/linear/GaussianBayesNet.cpp b/linear/GaussianBayesNet.cpp index 8faf820d0..a6016b52b 100644 --- a/linear/GaussianBayesNet.cpp +++ b/linear/GaussianBayesNet.cpp @@ -10,7 +10,7 @@ #include #include -#include +#include using namespace std; using namespace gtsam; @@ -59,25 +59,25 @@ void push_front(GaussianBayesNet& bn, varid_t key, Vector d, Matrix R, } /* ************************************************************************* */ -boost::shared_ptr allocateVectorConfig(const GaussianBayesNet& bn) { +boost::shared_ptr allocateVectorValues(const GaussianBayesNet& bn) { vector dimensions(bn.size()); varid_t var = 0; BOOST_FOREACH(const boost::shared_ptr conditional, bn) { dimensions[var++] = conditional->get_R().size1(); } - return boost::shared_ptr(new VectorConfig(dimensions)); + return boost::shared_ptr(new VectorValues(dimensions)); } /* ************************************************************************* */ -VectorConfig optimize(const GaussianBayesNet& bn) +VectorValues optimize(const GaussianBayesNet& bn) { return *optimize_(bn); } /* ************************************************************************* */ -boost::shared_ptr optimize_(const GaussianBayesNet& bn) +boost::shared_ptr optimize_(const GaussianBayesNet& bn) { - boost::shared_ptr result(allocateVectorConfig(bn)); + boost::shared_ptr result(allocateVectorValues(bn)); /** solve each node in turn in topological sort order (parents first)*/ BOOST_REVERSE_FOREACH(GaussianConditional::shared_ptr cg, bn) { @@ -88,16 +88,16 @@ boost::shared_ptr optimize_(const GaussianBayesNet& bn) } /* ************************************************************************* */ -VectorConfig backSubstitute(const GaussianBayesNet& bn, const VectorConfig& y) { - VectorConfig x(y); +VectorValues backSubstitute(const GaussianBayesNet& bn, const VectorValues& y) { + VectorValues x(y); backSubstituteInPlace(bn,x); return x; } /* ************************************************************************* */ // (R*x)./sigmas = y by solving x=inv(R)*(y.*sigmas) -void backSubstituteInPlace(const GaussianBayesNet& bn, VectorConfig& y) { - VectorConfig& x = y; +void backSubstituteInPlace(const GaussianBayesNet& bn, VectorValues& y) { + VectorValues& x = y; /** solve each node in turn in topological sort order (parents first)*/ BOOST_REVERSE_FOREACH(const boost::shared_ptr cg, bn) { // i^th part of R*x=y, x=inv(R)*y @@ -116,12 +116,12 @@ void backSubstituteInPlace(const GaussianBayesNet& bn, VectorConfig& y) { // gy=inv(L)*gx by solving L*gy=gx. // gy=inv(R'*inv(Sigma))*gx // gz'*R'=gx', gy = gz.*sigmas -VectorConfig backSubstituteTranspose(const GaussianBayesNet& bn, - const VectorConfig& gx) { +VectorValues backSubstituteTranspose(const GaussianBayesNet& bn, + const VectorValues& gx) { // Initialize gy from gx // TODO: used to insert zeros if gx did not have an entry for a variable in bn - VectorConfig gy = gx; + VectorValues gy = gx; // we loop from first-eliminated to last-eliminated // i^th part of L*gy=gx is done block-column by block-column of L @@ -195,8 +195,8 @@ pair matrix(const GaussianBayesNet& bn) { } /* ************************************************************************* */ -VectorConfig rhs(const GaussianBayesNet& bn) { - boost::shared_ptr result(allocateVectorConfig(bn)); +VectorValues rhs(const GaussianBayesNet& bn) { + boost::shared_ptr result(allocateVectorValues(bn)); BOOST_FOREACH(boost::shared_ptr cg,bn) { varid_t key = cg->key(); // get sigmas diff --git a/linear/GaussianBayesNet.h b/linear/GaussianBayesNet.h index 6ca9fc3e8..3389292d0 100644 --- a/linear/GaussianBayesNet.h +++ b/linear/GaussianBayesNet.h @@ -43,30 +43,30 @@ namespace gtsam { varid_t name1, Matrix S, varid_t name2, Matrix T, Vector sigmas); /** - * Allocate a VectorConfig for the variables in a BayesNet + * Allocate a VectorValues for the variables in a BayesNet */ - boost::shared_ptr allocateVectorConfig(const GaussianBayesNet& bn); + boost::shared_ptr allocateVectorValues(const GaussianBayesNet& bn); /** * optimize, i.e. return x = inv(R)*d */ - VectorConfig optimize(const GaussianBayesNet&); + VectorValues optimize(const GaussianBayesNet&); /** * shared pointer version */ - boost::shared_ptr optimize_(const GaussianBayesNet& bn); + boost::shared_ptr optimize_(const GaussianBayesNet& bn); /* * Backsubstitute * (R*x)./sigmas = y by solving x=inv(R)*(y.*sigmas) */ - VectorConfig backSubstitute(const GaussianBayesNet& bn, const VectorConfig& y); + VectorValues backSubstitute(const GaussianBayesNet& bn, const VectorValues& y); /* * Backsubstitute in place, y is replaced with solution */ - void backSubstituteInPlace(const GaussianBayesNet& bn, VectorConfig& y); + void backSubstituteInPlace(const GaussianBayesNet& bn, VectorValues& y); /* * Transpose Backsubstitute @@ -74,7 +74,7 @@ namespace gtsam { * gy=inv(R'*inv(Sigma))*gx * gz'*R'=gx', gy = gz.*sigmas */ - VectorConfig backSubstituteTranspose(const GaussianBayesNet& bn, const VectorConfig& gx); + VectorValues backSubstituteTranspose(const GaussianBayesNet& bn, const VectorValues& gx); /** * Return (dense) upper-triangular matrix representation @@ -82,9 +82,9 @@ namespace gtsam { std::pair matrix(const GaussianBayesNet&); /** - * Return RHS d as a VectorConfig + * Return RHS d as a VectorValues * Such that backSubstitute(bn,d) = optimize(bn) */ - VectorConfig rhs(const GaussianBayesNet&); + VectorValues rhs(const GaussianBayesNet&); } /// namespace gtsam diff --git a/linear/GaussianConditional.cpp b/linear/GaussianConditional.cpp index 3c2a08d32..b1e2286b1 100644 --- a/linear/GaussianConditional.cpp +++ b/linear/GaussianConditional.cpp @@ -160,7 +160,7 @@ bool GaussianConditional::equals(const GaussianConditional &c, double tol) const //} /* ************************************************************************* */ -Vector GaussianConditional::solve(const VectorConfig& x) const { +Vector GaussianConditional::solve(const VectorValues& x) const { static const bool debug = false; if(debug) print("Solving conditional "); Vector rhs(get_d()); @@ -179,7 +179,7 @@ Vector GaussianConditional::solve(const VectorConfig& x) const { } /* ************************************************************************* */ -Vector GaussianConditional::solve(const Permuted& x) const { +Vector GaussianConditional::solve(const Permuted& x) const { Vector rhs(get_d()); for (const_iterator parent = beginParents(); parent != endParents(); ++parent) { ublas::axpy_prod(-get_S(parent), x[*parent], rhs, false); diff --git a/linear/GaussianConditional.h b/linear/GaussianConditional.h index d97197097..1f1da1076 100644 --- a/linear/GaussianConditional.h +++ b/linear/GaussianConditional.h @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include @@ -118,17 +118,17 @@ public: /** * solve a conditional Gaussian - * @param x configuration in which the parents values (y,z,...) are known + * @param x values structure in which the parents values (y,z,...) are known * @return solution x = R \ (d - Sy - Tz - ...) */ - Vector solve(const VectorConfig& x) const; + Vector solve(const VectorValues& x) const; /** * solve a conditional Gaussian - * @param x configuration in which the parents values (y,z,...) are known + * @param x values structure in which the parents values (y,z,...) are known * @return solution x = R \ (d - Sy - Tz - ...) */ - Vector solve(const Permuted& x) const; + Vector solve(const Permuted& x) const; protected: rsd_type::column_type get_d_() { return rsd_.column(1+nrParents(), 0); } diff --git a/linear/GaussianFactor.cpp b/linear/GaussianFactor.cpp index 26f9e5cd7..dad38b89b 100644 --- a/linear/GaussianFactor.cpp +++ b/linear/GaussianFactor.cpp @@ -249,7 +249,7 @@ void GaussianFactor::permuteWithInverse(const Permutation& inversePermutation) { } /* ************************************************************************* */ -Vector GaussianFactor::unweighted_error(const VectorConfig& c) const { +Vector GaussianFactor::unweighted_error(const VectorValues& c) const { Vector e = -getb(); if (empty()) return e; for(size_t pos=0; poswhiten(-getb()); return model_->whiten(unweighted_error(c)); } /* ************************************************************************* */ -double GaussianFactor::error(const VectorConfig& c) const { +double GaussianFactor::error(const VectorValues& c) const { if (empty()) return 0; Vector weighted = error_vector(c); return 0.5 * inner_prod(weighted,weighted); @@ -287,7 +287,7 @@ double GaussianFactor::error(const VectorConfig& c) const { //} /* ************************************************************************* */ -Vector GaussianFactor::operator*(const VectorConfig& x) const { +Vector GaussianFactor::operator*(const VectorValues& x) const { Vector Ax = zero(Ab_.size1()); if (empty()) return Ax; @@ -301,10 +301,10 @@ Vector GaussianFactor::operator*(const VectorConfig& x) const { } ///* ************************************************************************* */ -//VectorConfig GaussianFactor::operator^(const Vector& e) const { +//VectorValues GaussianFactor::operator^(const Vector& e) const { // Vector E = model_->whiten(e); -// VectorConfig x; -// // Just iterate over all A matrices and insert Ai^e into VectorConfig +// VectorValues x; +// // Just iterate over all A matrices and insert Ai^e into VectorValues // for(size_t pos=0; poswhiten(e); - // Just iterate over all A matrices and insert Ai^e into VectorConfig + // Just iterate over all A matrices and insert Ai^e into VectorValues for(size_t pos=0; pos #include #include -#include +#include #include #include #include @@ -105,9 +105,9 @@ public: void print(const std::string& s = "") const; bool equals(const GaussianFactor& lf, double tol = 1e-9) const; - Vector unweighted_error(const VectorConfig& c) const; /** (A*x-b) */ - Vector error_vector(const VectorConfig& c) const; /** (A*x-b)/sigma */ - double error(const VectorConfig& c) const; /** 0.5*(A*x-b)'*D*(A*x-b) */ + Vector unweighted_error(const VectorValues& c) const; /** (A*x-b) */ + Vector error_vector(const VectorValues& c) const; /** (A*x-b)/sigma */ + double error(const VectorValues& c) const; /** 0.5*(A*x-b)'*D*(A*x-b) */ /** Check if the factor contains no information, i.e. zero rows. This does * not necessarily mean that the factor involves no variables (to check for @@ -170,13 +170,13 @@ public: size_t numberOfRows() const { return Ab_.size1(); } /** Return A*x */ - Vector operator*(const VectorConfig& x) const; + Vector operator*(const VectorValues& x) const; // /** Return A'*e */ -// VectorConfig operator^(const Vector& e) const; +// VectorValues operator^(const Vector& e) const; /** x += A'*e */ - void transposeMultiplyAdd(double alpha, const Vector& e, VectorConfig& x) const; + void transposeMultiplyAdd(double alpha, const Vector& e, VectorValues& x) const; /** * Return (dense) matrix associated with factor diff --git a/linear/GaussianFactorGraph.cpp b/linear/GaussianFactorGraph.cpp index 83c7a8613..d4ba34434 100644 --- a/linear/GaussianFactorGraph.cpp +++ b/linear/GaussianFactorGraph.cpp @@ -51,7 +51,7 @@ void GaussianFactorGraph::permuteWithInverse(const Permutation& inversePermutati } /* ************************************************************************* */ -double GaussianFactorGraph::error(const VectorConfig& x) const { +double GaussianFactorGraph::error(const VectorValues& x) const { double total_error = 0.; BOOST_FOREACH(sharedFactor factor,factors_) total_error += factor->error(x); @@ -59,12 +59,12 @@ double GaussianFactorGraph::error(const VectorConfig& x) const { } /* ************************************************************************* */ -Errors GaussianFactorGraph::errors(const VectorConfig& x) const { +Errors GaussianFactorGraph::errors(const VectorValues& x) const { return *errors_(x); } /* ************************************************************************* */ -boost::shared_ptr GaussianFactorGraph::errors_(const VectorConfig& x) const { +boost::shared_ptr GaussianFactorGraph::errors_(const VectorValues& x) const { boost::shared_ptr e(new Errors); BOOST_FOREACH(const sharedFactor& factor,factors_) e->push_back(factor->error_vector(x)); @@ -72,7 +72,7 @@ boost::shared_ptr GaussianFactorGraph::errors_(const VectorConfig& x) co } /* ************************************************************************* */ -Errors GaussianFactorGraph::operator*(const VectorConfig& x) const { +Errors GaussianFactorGraph::operator*(const VectorValues& x) const { Errors e; BOOST_FOREACH(const sharedFactor& Ai,factors_) e.push_back((*Ai)*x); @@ -80,12 +80,12 @@ Errors GaussianFactorGraph::operator*(const VectorConfig& x) const { } /* ************************************************************************* */ -void GaussianFactorGraph::multiplyInPlace(const VectorConfig& x, Errors& e) const { +void GaussianFactorGraph::multiplyInPlace(const VectorValues& x, Errors& e) const { multiplyInPlace(x,e.begin()); } /* ************************************************************************* */ -void GaussianFactorGraph::multiplyInPlace(const VectorConfig& x, +void GaussianFactorGraph::multiplyInPlace(const VectorValues& x, const Errors::iterator& e) const { Errors::iterator ei = e; BOOST_FOREACH(const sharedFactor& Ai,factors_) { @@ -95,12 +95,12 @@ void GaussianFactorGraph::multiplyInPlace(const VectorConfig& x, } ///* ************************************************************************* */ -//VectorConfig GaussianFactorGraph::operator^(const Errors& e) const { -// VectorConfig x; +//VectorValues GaussianFactorGraph::operator^(const Errors& e) const { +// VectorValues x; // // For each factor add the gradient contribution // Errors::const_iterator it = e.begin(); // BOOST_FOREACH(const sharedFactor& Ai,factors_) { -// VectorConfig xi = (*Ai)^(*(it++)); +// VectorValues xi = (*Ai)^(*(it++)); // x.insertAdd(xi); // } // return x; @@ -109,7 +109,7 @@ void GaussianFactorGraph::multiplyInPlace(const VectorConfig& x, /* ************************************************************************* */ // x += alpha*A'*e void GaussianFactorGraph::transposeMultiplyAdd(double alpha, const Errors& e, - VectorConfig& x) const { + VectorValues& x) const { // For each factor add the gradient contribution Errors::const_iterator ei = e.begin(); BOOST_FOREACH(const sharedFactor& Ai,factors_) @@ -117,9 +117,9 @@ void GaussianFactorGraph::transposeMultiplyAdd(double alpha, const Errors& e, } ///* ************************************************************************* */ -//VectorConfig GaussianFactorGraph::gradient(const VectorConfig& x) const { +//VectorValues GaussianFactorGraph::gradient(const VectorValues& x) const { // // It is crucial for performance to make a zero-valued clone of x -// VectorConfig g = VectorConfig::zero(x); +// VectorValues g = VectorValues::zero(x); // transposeMultiplyAdd(1.0, errors(x), g); // return g; //} @@ -279,23 +279,23 @@ void GaussianFactorGraph::transposeMultiplyAdd(double alpha, const Errors& e, ///* ************************************************************************* */ -//VectorConfig GaussianFactorGraph::optimize(const Ordering& ordering, bool old) +//VectorValues GaussianFactorGraph::optimize(const Ordering& ordering, bool old) //{ // // eliminate all nodes in the given ordering -> chordal Bayes net // GaussianBayesNet chordalBayesNet = eliminate(ordering, old); // -// // calculate new configuration (using backsubstitution) -// VectorConfig delta = ::optimize(chordalBayesNet); +// // calculate new values structure (using backsubstitution) +// VectorValues delta = ::optimize(chordalBayesNet); // return delta; //} // ///* ************************************************************************* */ -//VectorConfig GaussianFactorGraph::optimizeMultiFrontals(const Ordering& ordering) +//VectorValues GaussianFactorGraph::optimizeMultiFrontals(const Ordering& ordering) //{ // GaussianJunctionTree junctionTree(*this, ordering); // -// // calculate new configuration (using backsubstitution) -// VectorConfig delta = junctionTree.optimize(); +// // calculate new values structure (using backsubstitution) +// VectorValues delta = junctionTree.optimize(); // return delta; //} @@ -312,9 +312,9 @@ void GaussianFactorGraph::transposeMultiplyAdd(double alpha, const Errors& e, //} // ///* ************************************************************************* */ -//boost::shared_ptr +//boost::shared_ptr //GaussianFactorGraph::optimize_(const Ordering& ordering) { -// return boost::shared_ptr(new VectorConfig(optimize(ordering))); +// return boost::shared_ptr(new VectorValues(optimize(ordering))); //} /* ************************************************************************* */ @@ -402,9 +402,9 @@ GaussianFactorGraph GaussianFactorGraph::add_priors(double sigma, const Gaussian //} ///* ************************************************************************* */ -//VectorConfig GaussianFactorGraph::assembleConfig(const Vector& vs, const Ordering& ordering) const { +//VectorValues GaussianFactorGraph::assembleValues(const Vector& vs, const Ordering& ordering) const { // Dimensions dims = dimensions(); -// VectorConfig config; +// VectorValues config; // Vector::const_iterator itSrc = vs.begin(); // Vector::iterator itDst; // BOOST_FOREACH(varid_t key, ordering){ @@ -415,7 +415,7 @@ GaussianFactorGraph GaussianFactorGraph::add_priors(double sigma, const Gaussian // config.insert(key, v); // } // if (itSrc != vs.end()) -// throw runtime_error("assembleConfig: input vector and ordering are not compatible with the graph"); +// throw runtime_error("assembleValues: input vector and ordering are not compatible with the graph"); // return config; //} // @@ -508,30 +508,30 @@ GaussianFactorGraph GaussianFactorGraph::add_priors(double sigma, const Gaussian //} ///* ************************************************************************* */ -//VectorConfig GaussianFactorGraph::steepestDescent(const VectorConfig& x0, +//VectorValues GaussianFactorGraph::steepestDescent(const VectorValues& x0, // bool verbose, double epsilon, size_t maxIterations) const { // return gtsam::steepestDescent(*this, x0, verbose, epsilon, maxIterations); //} // ///* ************************************************************************* */ -//boost::shared_ptr GaussianFactorGraph::steepestDescent_( -// const VectorConfig& x0, bool verbose, double epsilon, size_t maxIterations) const { -// return boost::shared_ptr(new VectorConfig( +//boost::shared_ptr GaussianFactorGraph::steepestDescent_( +// const VectorValues& x0, bool verbose, double epsilon, size_t maxIterations) const { +// return boost::shared_ptr(new VectorValues( // gtsam::conjugateGradientDescent(*this, x0, verbose, epsilon, // maxIterations))); //} // ///* ************************************************************************* */ -//VectorConfig GaussianFactorGraph::conjugateGradientDescent( -// const VectorConfig& x0, bool verbose, double epsilon, size_t maxIterations) const { +//VectorValues GaussianFactorGraph::conjugateGradientDescent( +// const VectorValues& x0, bool verbose, double epsilon, size_t maxIterations) const { // return gtsam::conjugateGradientDescent(*this, x0, verbose, epsilon, // maxIterations); //} // ///* ************************************************************************* */ -//boost::shared_ptr GaussianFactorGraph::conjugateGradientDescent_( -// const VectorConfig& x0, bool verbose, double epsilon, size_t maxIterations) const { -// return boost::shared_ptr(new VectorConfig( +//boost::shared_ptr GaussianFactorGraph::conjugateGradientDescent_( +// const VectorValues& x0, bool verbose, double epsilon, size_t maxIterations) const { +// return boost::shared_ptr(new VectorValues( // gtsam::conjugateGradientDescent(*this, x0, verbose, epsilon, // maxIterations))); //} diff --git a/linear/GaussianFactorGraph.h b/linear/GaussianFactorGraph.h index 5ad8c6e48..9b8829032 100644 --- a/linear/GaussianFactorGraph.h +++ b/linear/GaussianFactorGraph.h @@ -5,10 +5,6 @@ * @author Christian Potthast * @author Alireza Fathi */ - -// $Id: GaussianFactorGraph.h,v 1.24 2009/08/14 20:48:51 acunning Exp $ - -// \callgraph #pragma once @@ -24,7 +20,7 @@ namespace gtsam { /** * A Linear Factor Graph is a factor graph where all factors are Gaussian, i.e. * Factor == GaussianFactor - * VectorConfig = A configuration of vectors + * VectorValues = A values structure of vectors * Most of the time, linear factor graphs arise by linearizing a non-linear factor graph. */ class GaussianFactorGraph : public FactorGraph { @@ -86,38 +82,38 @@ namespace gtsam { void permuteWithInverse(const Permutation& inversePermutation); /** return A*x-b */ - Errors errors(const VectorConfig& x) const; + Errors errors(const VectorValues& x) const; /** shared pointer version */ - boost::shared_ptr errors_(const VectorConfig& x) const; + boost::shared_ptr errors_(const VectorValues& x) const; /** unnormalized error */ - double error(const VectorConfig& x) const; + double error(const VectorValues& x) const; /** return A*x */ - Errors operator*(const VectorConfig& x) const; + Errors operator*(const VectorValues& x) const; /* In-place version e <- A*x that overwrites e. */ - void multiplyInPlace(const VectorConfig& x, Errors& e) const; + void multiplyInPlace(const VectorValues& x, Errors& e) const; /* In-place version e <- A*x that takes an iterator. */ - void multiplyInPlace(const VectorConfig& x, const Errors::iterator& e) const; + void multiplyInPlace(const VectorValues& x, const Errors::iterator& e) const; // /** return A^e */ -// VectorConfig operator^(const Errors& e) const; +// VectorValues operator^(const Errors& e) const; /** x += alpha*A'*e */ - void transposeMultiplyAdd(double alpha, const Errors& e, VectorConfig& x) const; + void transposeMultiplyAdd(double alpha, const Errors& e, VectorValues& x) const; // /** // * Calculate Gradient of A^(A*x-b) for a given config -// * @param x: VectorConfig specifying where to calculate gradient -// * @return gradient, as a VectorConfig as well +// * @param x: VectorValues specifying where to calculate gradient +// * @return gradient, as a VectorValues as well // */ -// VectorConfig gradient(const VectorConfig& x) const; +// VectorValues gradient(const VectorValues& x) const; /** Unnormalized probability. O(n) */ - double probPrime(const VectorConfig& c) const { + double probPrime(const VectorValues& c) const { return exp(-0.5 * error(c)); } @@ -145,19 +141,19 @@ namespace gtsam { // * @param enableJoinFactor uses the older joint factor combine process when true, // * and when false uses the newer single matrix combine // */ -// VectorConfig optimize(const Ordering& ordering, bool enableJoinFactor = true); +// VectorValues optimize(const Ordering& ordering, bool enableJoinFactor = true); // /** // * optimize a linear factor graph using a multi-frontal solver // * @param ordering fg in order // */ -// VectorConfig optimizeMultiFrontals(const Ordering& ordering); +// VectorValues optimizeMultiFrontals(const Ordering& ordering); // /** // * shared pointer versions for MATLAB // */ // boost::shared_ptr eliminate_(const Ordering&); -// boost::shared_ptr optimize_(const Ordering&); +// boost::shared_ptr optimize_(const Ordering&); /** * static function that combines two factor graphs @@ -207,7 +203,7 @@ namespace gtsam { // * @param v: the source vector // * @param ordeirng: the ordering corresponding to the vector // */ -// VectorConfig assembleConfig(const Vector& v, const Ordering& ordering) const; +// VectorValues assembleValues(const Vector& v, const Ordering& ordering) const; // // /** // * get the 1-based starting column indices for all variables @@ -237,32 +233,32 @@ namespace gtsam { // /** // * Find solution using gradient descent -// * @param x0: VectorConfig specifying initial estimate +// * @param x0: VectorValues specifying initial estimate // * @return solution // */ -// VectorConfig steepestDescent(const VectorConfig& x0, bool verbose = false, +// VectorValues steepestDescent(const VectorValues& x0, bool verbose = false, // double epsilon = 1e-3, size_t maxIterations = 0) const; // // /** // * shared pointer versions for MATLAB // */ -// boost::shared_ptr -// steepestDescent_(const VectorConfig& x0, bool verbose = false, +// boost::shared_ptr +// steepestDescent_(const VectorValues& x0, bool verbose = false, // double epsilon = 1e-3, size_t maxIterations = 0) const; // // /** // * Find solution using conjugate gradient descent -// * @param x0: VectorConfig specifying initial estimate +// * @param x0: VectorValues specifying initial estimate // * @return solution // */ -// VectorConfig conjugateGradientDescent(const VectorConfig& x0, bool verbose = +// VectorValues conjugateGradientDescent(const VectorValues& x0, bool verbose = // false, double epsilon = 1e-3, size_t maxIterations = 0) const; // // /** // * shared pointer versions for MATLAB // */ -// boost::shared_ptr conjugateGradientDescent_( -// const VectorConfig& x0, bool verbose = false, double epsilon = 1e-3, +// boost::shared_ptr conjugateGradientDescent_( +// const VectorValues& x0, bool verbose = false, double epsilon = 1e-3, // size_t maxIterations = 0) const; }; diff --git a/linear/GaussianISAM.cpp b/linear/GaussianISAM.cpp index 07c51efcb..ce0e6f3ab 100644 --- a/linear/GaussianISAM.cpp +++ b/linear/GaussianISAM.cpp @@ -16,7 +16,7 @@ template class ISAM; namespace gtsam { /* ************************************************************************* */ -void optimize(const GaussianISAM::sharedClique& clique, VectorConfig& result) { +void optimize(const GaussianISAM::sharedClique& clique, VectorValues& result) { // parents are assumed to already be solved and available in result GaussianISAM::Clique::const_reverse_iterator it; for (it = clique->rbegin(); it!=clique->rend(); it++) { @@ -32,8 +32,8 @@ void optimize(const GaussianISAM::sharedClique& clique, VectorConfig& result) { } /* ************************************************************************* */ -VectorConfig optimize(const GaussianISAM& bayesTree) { - VectorConfig result(bayesTree.dims_); +VectorValues optimize(const GaussianISAM& bayesTree) { + VectorValues result(bayesTree.dims_); // starting from the root, call optimize on each conditional optimize(bayesTree.root(), result); return result; diff --git a/linear/GaussianISAM.h b/linear/GaussianISAM.h index e42bc5e33..966ad633b 100644 --- a/linear/GaussianISAM.h +++ b/linear/GaussianISAM.h @@ -56,14 +56,14 @@ public: dims_.clear(); } - friend VectorConfig optimize(const GaussianISAM&); + friend VectorValues optimize(const GaussianISAM&); }; // recursively optimize this conditional and all subtrees - void optimize(const GaussianISAM::sharedClique& clique, VectorConfig& result); + void optimize(const GaussianISAM::sharedClique& clique, VectorValues& result); // optimize the BayesTree, starting from the root - VectorConfig optimize(const GaussianISAM& bayesTree); + VectorValues optimize(const GaussianISAM& bayesTree); }/// namespace gtsam diff --git a/linear/GaussianJunctionTree.cpp b/linear/GaussianJunctionTree.cpp index cad2d325b..cf43c8d68 100644 --- a/linear/GaussianJunctionTree.cpp +++ b/linear/GaussianJunctionTree.cpp @@ -25,7 +25,7 @@ namespace gtsam { /** * GaussianJunctionTree */ - void GaussianJunctionTree::btreeBackSubstitute(const boost::shared_ptr& current, VectorConfig& config) const { + void GaussianJunctionTree::btreeBackSubstitute(const boost::shared_ptr& current, VectorValues& config) const { // solve the bayes net in the current node GaussianBayesNet::const_reverse_iterator it = current->rbegin(); for (; it!=current->rend(); ++it) { @@ -52,18 +52,18 @@ namespace gtsam { } /* ************************************************************************* */ - VectorConfig GaussianJunctionTree::optimize() const { + VectorValues GaussianJunctionTree::optimize() const { tic("GJT optimize 1: eliminate"); // eliminate from leaves to the root boost::shared_ptr rootClique(this->eliminate()); toc("GJT optimize 1: eliminate"); // Allocate solution vector - tic("GJT optimize 2: allocate VectorConfig"); + tic("GJT optimize 2: allocate VectorValues"); vector dims(rootClique->back()->key() + 1, 0); countDims(rootClique, dims); - VectorConfig result(dims); - toc("GJT optimize 2: allocate VectorConfig"); + VectorValues result(dims); + toc("GJT optimize 2: allocate VectorValues"); // back-substitution tic("GJT optimize 3: back-substitute"); diff --git a/linear/GaussianJunctionTree.h b/linear/GaussianJunctionTree.h index 7f045586a..b99f6ee60 100644 --- a/linear/GaussianJunctionTree.h +++ b/linear/GaussianJunctionTree.h @@ -25,7 +25,7 @@ namespace gtsam { protected: // back-substitute in topological sort order (parents first) - void btreeBackSubstitute(const boost::shared_ptr& current, VectorConfig& config) const; + void btreeBackSubstitute(const boost::shared_ptr& current, VectorValues& config) const; public : @@ -35,7 +35,7 @@ namespace gtsam { GaussianJunctionTree(const GaussianFactorGraph& fg) : Base(fg) {} // optimize the linear graph - VectorConfig optimize() const; + VectorValues optimize() const; }; // GaussianJunctionTree } // namespace gtsam diff --git a/linear/Makefile.am b/linear/Makefile.am index 81e7ea05a..66444521c 100644 --- a/linear/Makefile.am +++ b/linear/Makefile.am @@ -16,7 +16,7 @@ sources += NoiseModel.cpp Errors.cpp check_PROGRAMS += tests/testNoiseModel tests/testErrors # Vector Configurations -headers += VectorConfig.h +headers += VectorValues.h #sources += VectorMap.cpp VectorBTree.cpp #check_PROGRAMS += tests/testVectorMap tests/testVectorBTree @@ -36,7 +36,7 @@ check_PROGRAMS += tests/testGaussianJunctionTree # Timing tests noinst_PROGRAMS = tests/timeGaussianFactor tests/timeFactorOverhead tests/timeSLAMlike -#noinst_PROGRAMS += tests/timeVectorConfig +#noinst_PROGRAMS += tests/timeVectorValues #---------------------------------------------------------------------------------------------------- # Create a libtool library that is not installed diff --git a/linear/SubgraphPreconditioner.cpp b/linear/SubgraphPreconditioner.cpp index 08009a019..80957e9c6 100644 --- a/linear/SubgraphPreconditioner.cpp +++ b/linear/SubgraphPreconditioner.cpp @@ -13,25 +13,25 @@ namespace gtsam { /* ************************************************************************* */ SubgraphPreconditioner::SubgraphPreconditioner(sharedFG& Ab1, sharedFG& Ab2, - sharedBayesNet& Rc1, sharedConfig& xbar) : + sharedBayesNet& Rc1, sharedValues& xbar) : Ab1_(Ab1), Ab2_(Ab2), Rc1_(Rc1), xbar_(xbar), b2bar_(Ab2_->errors_(*xbar)) { } /* ************************************************************************* */ // x = xbar + inv(R1)*y - VectorConfig SubgraphPreconditioner::x(const VectorConfig& y) const { + VectorValues SubgraphPreconditioner::x(const VectorValues& y) const { #ifdef VECTORBTREE if (!y.cloned(*xbar_)) throw invalid_argument("SubgraphPreconditioner::x: y needs to be cloned from xbar"); #endif - VectorConfig x = y; + VectorValues x = y; backSubstituteInPlace(*Rc1_,x); x += *xbar_; return x; } /* ************************************************************************* */ - double SubgraphPreconditioner::error(const VectorConfig& y) const { + double SubgraphPreconditioner::error(const VectorValues& y) const { Errors e; @@ -42,7 +42,7 @@ namespace gtsam { } // Add A2 contribution - VectorConfig x = this->x(y); + VectorValues x = this->x(y); Errors e2 = Ab2_->errors(x); e.splice(e.end(), e2); @@ -51,18 +51,18 @@ namespace gtsam { /* ************************************************************************* */ // gradient is y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar), - VectorConfig SubgraphPreconditioner::gradient(const VectorConfig& y) const { - VectorConfig x = this->x(y); // x = inv(R1)*y + VectorValues SubgraphPreconditioner::gradient(const VectorValues& y) const { + VectorValues x = this->x(y); // x = inv(R1)*y Errors e2 = Ab2_->errors(x); - VectorConfig gx2 = VectorConfig::zero(y); + VectorValues gx2 = VectorValues::zero(y); Ab2_->transposeMultiplyAdd(1.0,e2,gx2); // A2'*e2; - VectorConfig gy2 = gtsam::backSubstituteTranspose(*Rc1_, gx2); // inv(R1')*gx2 + VectorValues gy2 = gtsam::backSubstituteTranspose(*Rc1_, gx2); // inv(R1')*gx2 return y + gy2; } /* ************************************************************************* */ // Apply operator A, A*y = [I;A2*inv(R1)]*y = [y; A2*inv(R1)*y] - Errors SubgraphPreconditioner::operator*(const VectorConfig& y) const { + Errors SubgraphPreconditioner::operator*(const VectorValues& y) const { Errors e; @@ -73,7 +73,7 @@ namespace gtsam { } // Add A2 contribution - VectorConfig x = y; // TODO avoid ? + VectorValues x = y; // TODO avoid ? gtsam::backSubstituteInPlace(*Rc1_, x); // x=inv(R1)*y Errors e2 = *Ab2_ * x; // A2*x e.splice(e.end(), e2); @@ -83,7 +83,7 @@ namespace gtsam { /* ************************************************************************* */ // In-place version that overwrites e - void SubgraphPreconditioner::multiplyInPlace(const VectorConfig& y, Errors& e) const { + void SubgraphPreconditioner::multiplyInPlace(const VectorValues& y, Errors& e) const { Errors::iterator ei = e.begin(); @@ -95,16 +95,16 @@ namespace gtsam { } // Add A2 contribution - VectorConfig x = y; // TODO avoid ? + VectorValues x = y; // TODO avoid ? gtsam::backSubstituteInPlace(*Rc1_, x); // x=inv(R1)*y Ab2_->multiplyInPlace(x,ei); // use iterator version } /* ************************************************************************* */ // Apply operator A', A'*e = [I inv(R1')*A2']*e = e1 + inv(R1')*A2'*e2 - VectorConfig SubgraphPreconditioner::operator^(const Errors& e) const { + VectorValues SubgraphPreconditioner::operator^(const Errors& e) const { - VectorConfig y; + VectorValues y; // Use BayesNet order to remove y contributions in order Errors::const_iterator it = e.begin(); @@ -123,7 +123,7 @@ namespace gtsam { /* ************************************************************************* */ // y += alpha*A'*e void SubgraphPreconditioner::transposeMultiplyAdd - (double alpha, const Errors& e, VectorConfig& y) const { + (double alpha, const Errors& e, VectorValues& y) const { // Use BayesNet order to remove y contributions in order Errors::const_iterator it = e.begin(); @@ -140,7 +140,7 @@ namespace gtsam { /* ************************************************************************* */ // y += alpha*inv(R1')*A2'*e2 void SubgraphPreconditioner::transposeMultiplyAdd2 (double alpha, - Errors::const_iterator it, Errors::const_iterator end, VectorConfig& y) const { + Errors::const_iterator it, Errors::const_iterator end, VectorValues& y) const { // create e2 with what's left of e // TODO can we avoid creating e2 by passing iterator to transposeMultiplyAdd ? @@ -149,10 +149,10 @@ namespace gtsam { e2.push_back(*(it++)); // Old code: - // VectorConfig x = *Ab2_ ^ e2; // x = A2'*e2 + // VectorValues x = *Ab2_ ^ e2; // x = A2'*e2 // y += alpha * gtsam::backSubstituteTranspose(*Rc1_, x); // inv(R1')*x; // New Code: - VectorConfig x = VectorConfig::zero(y); // x = 0 + VectorValues x = VectorValues::zero(y); // x = 0 Ab2_->transposeMultiplyAdd(1.0,e2,x); // x += A2'*e2 axpy(alpha, gtsam::backSubstituteTranspose(*Rc1_, x), y); // y += alpha*inv(R1')*x } diff --git a/linear/SubgraphPreconditioner.h b/linear/SubgraphPreconditioner.h index 1a020ce3e..1a51abed8 100644 --- a/linear/SubgraphPreconditioner.h +++ b/linear/SubgraphPreconditioner.h @@ -24,13 +24,13 @@ namespace gtsam { public: typedef boost::shared_ptr sharedBayesNet; typedef boost::shared_ptr sharedFG; - typedef boost::shared_ptr sharedConfig; + typedef boost::shared_ptr sharedValues; typedef boost::shared_ptr sharedErrors; private: sharedFG Ab1_, Ab2_; sharedBayesNet Rc1_; - sharedConfig xbar_; + sharedValues xbar_; sharedErrors b2bar_; /** b2 - A2*xbar */ public: @@ -42,7 +42,7 @@ namespace gtsam { * @param Rc1: the Bayes Net R1*x=c1 * @param xbar: the solution to R1*x=c1 */ - SubgraphPreconditioner(sharedFG& Ab1, sharedFG& Ab2, sharedBayesNet& Rc1, sharedConfig& xbar); + SubgraphPreconditioner(sharedFG& Ab1, sharedFG& Ab2, sharedBayesNet& Rc1, sharedValues& xbar); std::pair Ab1(const Ordering& ordering) const { return Ab1_->matrix(ordering); } std::pair Ab2(const Ordering& ordering) const { return Ab2_->matrix(ordering); } @@ -50,34 +50,34 @@ namespace gtsam { Matrix A2(const Ordering& ordering) const { return Ab2_->sparse(Ab1_->columnIndices(ordering)); } Vector b1() const { return Ab1_->rhsVector(); } Vector b2() const { return Ab2_->rhsVector(); } - VectorConfig assembleConfig(const Vector& v, const Ordering& ordering) const { return Ab1_->assembleConfig(v, ordering); } + VectorValues assembleValues(const Vector& v, const Ordering& ordering) const { return Ab1_->assembleValues(v, ordering); } /* x = xbar + inv(R1)*y */ - VectorConfig x(const VectorConfig& y) const; + VectorValues x(const VectorValues& y) const; - /* A zero VectorConfig with the structure of xbar */ - VectorConfig zero() const { return VectorConfig::zero(*xbar_);} + /* A zero VectorValues with the structure of xbar */ + VectorValues zero() const { return VectorValues::zero(*xbar_);} /* error, given y */ - double error(const VectorConfig& y) const; + double error(const VectorValues& y) const; /** gradient = y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar) */ - VectorConfig gradient(const VectorConfig& y) const; + VectorValues gradient(const VectorValues& y) const; /** Apply operator A */ - Errors operator*(const VectorConfig& y) const; + Errors operator*(const VectorValues& y) const; /** Apply operator A in place: needs e allocated already */ - void multiplyInPlace(const VectorConfig& y, Errors& e) const; + void multiplyInPlace(const VectorValues& y, Errors& e) const; /** Apply operator A' */ - VectorConfig operator^(const Errors& e) const; + VectorValues operator^(const Errors& e) const; /** * Add A'*e to y * y += alpha*A'*[e1;e2] = [alpha*e1; alpha*inv(R1')*A2'*e2] */ - void transposeMultiplyAdd(double alpha, const Errors& e, VectorConfig& y) const; + void transposeMultiplyAdd(double alpha, const Errors& e, VectorValues& y) const; /** * Add constraint part of the error only, used in both calls above @@ -85,7 +85,7 @@ namespace gtsam { * Takes a range indicating e2 !!!! */ void transposeMultiplyAdd2(double alpha, Errors::const_iterator begin, - Errors::const_iterator end, VectorConfig& y) const; + Errors::const_iterator end, VectorValues& y) const; /** print the object */ void print(const std::string& s = "SubgraphPreconditioner") const; diff --git a/linear/SubgraphSolver-inl.h b/linear/SubgraphSolver-inl.h index 845257d69..ba4e13ea9 100644 --- a/linear/SubgraphSolver-inl.h +++ b/linear/SubgraphSolver-inl.h @@ -20,14 +20,14 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ - template - SubgraphSolver::SubgraphSolver(const Graph& G, const Config& theta0) { + template + SubgraphSolver::SubgraphSolver(const Graph& G, const Values& theta0) { initialize(G,theta0); } /* ************************************************************************* */ - template - void SubgraphSolver::initialize(const Graph& G, const Config& theta0) { + template + void SubgraphSolver::initialize(const Graph& G, const Values& theta0) { // generate spanning tree PredecessorMap tree = G.template findMinimumSpanningTree(); @@ -46,21 +46,21 @@ namespace gtsam { // compose the approximate solution Key root = keys.back(); - theta_bar_ = composePoses (T_, tree, theta0[root]); + theta_bar_ = composePoses (T_, tree, theta0[root]); } /* ************************************************************************* */ - template - boost::shared_ptr SubgraphSolver::linearize(const Graph& G, const Config& theta_bar) const { + template + boost::shared_ptr SubgraphSolver::linearize(const Graph& G, const Values& theta_bar) const { SubgraphPreconditioner::sharedFG Ab1 = T_.linearize(theta_bar); SubgraphPreconditioner::sharedFG Ab2 = C_.linearize(theta_bar); #ifdef TIMING SubgraphPreconditioner::sharedBayesNet Rc1; - SubgraphPreconditioner::sharedConfig xbar; + SubgraphPreconditioner::sharedValues xbar; #else GaussianFactorGraph sacrificialAb1 = *Ab1; // duplicate !!!!! SubgraphPreconditioner::sharedBayesNet Rc1 = sacrificialAb1.eliminate_(*ordering_); - SubgraphPreconditioner::sharedConfig xbar = gtsam::optimize_(*Rc1); + SubgraphPreconditioner::sharedValues xbar = gtsam::optimize_(*Rc1); #endif // TODO: there does not seem to be a good reason to have Ab1_ // It seems only be used to provide an ordering for creating sparse matrices @@ -68,14 +68,14 @@ namespace gtsam { } /* ************************************************************************* */ - template - VectorConfig SubgraphSolver::optimize(SubgraphPreconditioner& system) const { - VectorConfig zeros = system.zero(); + template + VectorValues SubgraphSolver::optimize(SubgraphPreconditioner& system) const { + VectorValues zeros = system.zero(); // Solve the subgraph PCG - VectorConfig ybar = conjugateGradients (system, zeros, verbose_, epsilon_, epsilon_abs_, maxIterations_); - VectorConfig xbar = system.x(ybar); + VectorValues xbar = system.x(ybar); return xbar; } diff --git a/linear/SubgraphSolver.h b/linear/SubgraphSolver.h index 8510a6e46..59a8313dc 100644 --- a/linear/SubgraphSolver.h +++ b/linear/SubgraphSolver.h @@ -17,13 +17,13 @@ namespace gtsam { * A nonlinear system solver using subgraph preconditioning conjugate gradient * Concept NonLinearSolver implements * linearize: G * T -> L - * solve : L -> VectorConfig + * solve : L -> VectorValues */ - template + template class SubgraphSolver { private: - typedef typename Config::Key Key; + typedef typename Values::Key Key; typedef typename Graph::Constraint Constraint; typedef typename Graph::Pose Pose; @@ -36,40 +36,40 @@ namespace gtsam { boost::shared_ptr ordering_; /* the solution computed from the first subgraph */ - boost::shared_ptr theta_bar_; + boost::shared_ptr theta_bar_; Graph T_, C_; public: SubgraphSolver() {} - SubgraphSolver(const Graph& G, const Config& theta0); + SubgraphSolver(const Graph& G, const Values& theta0); - void initialize(const Graph& G, const Config& theta0); + void initialize(const Graph& G, const Values& theta0); boost::shared_ptr ordering() const { return ordering_; } - boost::shared_ptr theta_bar() const { return theta_bar_; } + boost::shared_ptr theta_bar() const { return theta_bar_; } /** * linearize the non-linear graph around the current config and build the subgraph preconditioner systme */ - boost::shared_ptr linearize(const Graph& G, const Config& theta_bar) const; + boost::shared_ptr linearize(const Graph& G, const Values& theta_bar) const; /** * solve for the optimal displacement in the tangent space, and then solve * the resulted linear system */ - VectorConfig optimize(SubgraphPreconditioner& system) const; + VectorValues optimize(SubgraphPreconditioner& system) const; boost::shared_ptr prepareLinear(const SubgraphPreconditioner& fg) const { return boost::shared_ptr(new SubgraphSolver(*this)); } }; - template const size_t SubgraphSolver::maxIterations_; - template const bool SubgraphSolver::verbose_; - template const double SubgraphSolver::epsilon_; - template const double SubgraphSolver::epsilon_abs_; + template const size_t SubgraphSolver::maxIterations_; + template const bool SubgraphSolver::verbose_; + template const double SubgraphSolver::epsilon_; + template const double SubgraphSolver::epsilon_abs_; } // nsamespace gtsam diff --git a/linear/VectorBTree.cpp b/linear/VectorBTree.cpp index 2208256a8..c526473b8 100644 --- a/linear/VectorBTree.cpp +++ b/linear/VectorBTree.cpp @@ -1,7 +1,7 @@ /** - * @file VectorConfig.cpp - * @brief Factor Graph Configuration - * @brief VectorConfig + * @file VectorValues.cpp + * @brief Factor Graph Values + * @brief VectorValues * @author Frank Dellaert */ @@ -14,7 +14,7 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ - // Check if two VectorConfigs are compatible, throw exception if not + // Check if two VectorValuess are compatible, throw exception if not static void check_compatible(const string& s, const VectorBTree& a, const VectorBTree& b) { if (!a.compatible(b)) diff --git a/linear/VectorBTree.h b/linear/VectorBTree.h index f53aaace1..b8b7f09e2 100644 --- a/linear/VectorBTree.h +++ b/linear/VectorBTree.h @@ -1,6 +1,6 @@ /** * @file VectorBTree.h - * @brief Factor Graph Configuration + * @brief Factor Graph Valuesuration * @author Frank Dellaert */ @@ -19,7 +19,7 @@ namespace gtsam { - /** Factor Graph Configuration */ + /** Factor Graph Valuesuration */ class VectorBTree: public Testable { private: @@ -74,7 +74,7 @@ public: /** equals, for unit testing */ bool equals(const VectorBTree& expected, double tol = 1e-9) const; - /** Insert a value into the configuration with a given index: O(n) */ + /** Insert a value into the values structure with a given index: O(n) */ VectorBTree& insert(const Symbol& j, const Vector& v); /** Insert or add a value with given index: O(n) if does not exist */ diff --git a/linear/VectorMap.cpp b/linear/VectorMap.cpp index 0e8d8703d..6123c1fd6 100644 --- a/linear/VectorMap.cpp +++ b/linear/VectorMap.cpp @@ -1,6 +1,6 @@ /** * @file VectorMap.cpp - * @brief Factor Graph Configuration + * @brief Factor Graph Values * @brief VectorMap * @author Carlos Nieto * @author Christian Potthast @@ -148,33 +148,33 @@ Vector VectorMap::vector() const { /* ************************************************************************* */ VectorMap expmap(const VectorMap& original, const VectorMap& delta) { - VectorMap newConfig; + VectorMap newValues; varid_t j; Vector vj; // rtodo: copying vector? FOREACH_PAIR(j, vj, original.values) { if (delta.contains(j)) { const Vector& dj = delta[j]; check_size(j,vj,dj); - newConfig.insert(j, vj + dj); + newValues.insert(j, vj + dj); } else { - newConfig.insert(j, vj); + newValues.insert(j, vj); } } - return newConfig; + return newValues; } /* ************************************************************************* */ VectorMap expmap(const VectorMap& original, const Vector& delta) { - VectorMap newConfig; + VectorMap newValues; size_t i = 0; varid_t j; Vector vj; // rtodo: copying vector? FOREACH_PAIR(j, vj, original.values) { size_t mj = vj.size(); Vector dj = sub(delta, i, i+mj); - newConfig.insert(j, vj + dj); + newValues.insert(j, vj + dj); i += mj; } - return newConfig; + return newValues; } /* ************************************************************************* */ diff --git a/linear/VectorMap.h b/linear/VectorMap.h index 4be210a8f..2650eb02b 100644 --- a/linear/VectorMap.h +++ b/linear/VectorMap.h @@ -1,6 +1,6 @@ /** * @file VectorMap.h - * @brief Factor Graph Configuration + * @brief Factor Graph Valuesuration * @author Carlos Nieto * @author Christian Potthast */ @@ -18,7 +18,7 @@ namespace gtsam { - /** Factor Graph Configuration */ + /** Factor Graph Valuesuration */ class VectorMap : public Testable { protected: @@ -41,7 +41,7 @@ namespace gtsam { /** convert into a single large vector */ Vector vector() const; - /** Insert a value into the configuration with a given index */ + /** Insert a value into the values structure with a given index */ VectorMap& insert(varid_t name, const Vector& v); /** Insert or add a value with given index */ diff --git a/linear/VectorConfig.h b/linear/VectorValues.h similarity index 81% rename from linear/VectorConfig.h rename to linear/VectorValues.h index ff0a113a5..0b7725340 100644 --- a/linear/VectorConfig.h +++ b/linear/VectorValues.h @@ -1,6 +1,6 @@ /** - * @file VectorConfig.h - * @brief Factor Graph Configuration + * @file VectorValues.h + * @brief Factor Graph Valuesuration * @author Richard Roberts */ @@ -19,16 +19,16 @@ namespace gtsam { -class VectorConfig : public Testable { +class VectorValues : public Testable { protected: Vector values_; std::vector varStarts_; public: template class _impl_iterator; // Forward declaration of iterator implementation - typedef boost::shared_ptr shared_ptr; - typedef _impl_iterator iterator; - typedef _impl_iterator const_iterator; + typedef boost::shared_ptr shared_ptr; + typedef _impl_iterator iterator; + typedef _impl_iterator const_iterator; typedef boost::numeric::ublas::vector_range value_reference_type; typedef boost::numeric::ublas::vector_range const_value_reference_type; typedef boost::numeric::ublas::vector_range mapped_type; @@ -38,23 +38,23 @@ public: // * Constructor requires an existing GaussianVariableIndex to get variable // * dimensions. // */ -// VectorConfig(const GaussianVariableIndex& variableIndex); +// VectorValues(const GaussianVariableIndex& variableIndex); /** - * Default constructor creates an empty VectorConfig. reserve(...) must be + * Default constructor creates an empty VectorValues. reserve(...) must be * called to allocate space before any values can be added. This prevents * slow reallocation of space at runtime. */ - VectorConfig() : varStarts_(1,0) {} + VectorValues() : varStarts_(1,0) {} /** Construct from a container of variable dimensions (in variable order). */ template - VectorConfig(const Container& dimensions); + VectorValues(const Container& dimensions); /** Construct from a container of variable dimensions in variable order and * a combined Vector of all of the variables in order. */ - VectorConfig(const std::vector& dimensions, const Vector& values); + VectorValues(const std::vector& dimensions, const Vector& values); /** Element access */ mapped_type operator[](varid_t variable); @@ -72,10 +72,10 @@ public: size_t dimCapacity() const { return values_.size(); } /** Iterator access */ - iterator begin() { return _impl_iterator(*this, 0); } - const_iterator begin() const { return _impl_iterator(*this, 0); } - iterator end() { return _impl_iterator(*this, varStarts_.size()-1); } - const_iterator end() const { return _impl_iterator(*this, varStarts_.size()-1); } + iterator begin() { return _impl_iterator(*this, 0); } + const_iterator begin() const { return _impl_iterator(*this, 0); } + iterator end() { return _impl_iterator(*this, varStarts_.size()-1); } + const_iterator end() const { return _impl_iterator(*this, varStarts_.size()-1); } /** Reserve space for a total number of variables and dimensionality */ void reserve(varid_t nVars, size_t totalDims) { values_.resize(std::max(totalDims, values_.size())); varStarts_.reserve(nVars+1); } @@ -95,7 +95,7 @@ public: void makeZero() { boost::numeric::ublas::noalias(values_) = boost::numeric::ublas::zero_vector(values_.size()); } /** print required by Testable for unit testing */ - void print(const std::string& str = "VectorConfig: ") const { + void print(const std::string& str = "VectorValues: ") const { std::cout << str << ": " << varStarts_.size()-1 << " elements\n"; for(varid_t var=0; var& r) { assert(&config_ == &r.config_); } - friend class VectorConfig; + friend class VectorValues; public: - typedef typename const_selector::type value_type; + typedef typename const_selector::type value_type; _impl_iterator& operator++() { ++curVariable_; return *this; } _impl_iterator& operator--() { --curVariable_; return *this; } _impl_iterator& operator++(int) { throw std::runtime_error("Use prefix ++ operator"); } @@ -158,7 +158,7 @@ protected: }; -//inline VectorConfig::VectorConfig(const GaussianVariableIndex& variableIndex) : varStarts_(variableIndex.size()+1) { +//inline VectorValues::VectorValues(const GaussianVariableIndex& variableIndex) : varStarts_(variableIndex.size()+1) { // size_t varStart = 0; // varStarts_[0] = 0; // for(varid_t var=0; var -inline VectorConfig::VectorConfig(const Container& dimensions) : varStarts_(dimensions.size()+1) { +inline VectorValues::VectorValues(const Container& dimensions) : varStarts_(dimensions.size()+1) { varStarts_[0] = 0; size_t varStart = 0; varid_t var = 0; @@ -179,7 +179,7 @@ inline VectorConfig::VectorConfig(const Container& dimensions) : varStarts_(dime values_.resize(varStarts_.back(), false); } -inline VectorConfig::VectorConfig(const std::vector& dimensions, const Vector& values) : +inline VectorValues::VectorValues(const std::vector& dimensions, const Vector& values) : values_(values), varStarts_(dimensions.size()+1) { varStarts_[0] = 0; size_t varStart = 0; @@ -190,13 +190,13 @@ inline VectorConfig::VectorConfig(const std::vector& dimensions, const V assert(varStarts_.back() == values.size()); } -inline VectorConfig::mapped_type VectorConfig::operator[](varid_t variable) { +inline VectorValues::mapped_type VectorValues::operator[](varid_t variable) { checkVariable(variable); return boost::numeric::ublas::project(values_, boost::numeric::ublas::range(varStarts_[variable], varStarts_[variable+1])); } -inline VectorConfig::const_mapped_type VectorConfig::operator[](varid_t variable) const { +inline VectorValues::const_mapped_type VectorValues::operator[](varid_t variable) const { checkVariable(variable); return boost::numeric::ublas::project(values_, boost::numeric::ublas::range(varStarts_[variable], varStarts_[variable+1])); diff --git a/linear/iterative.cpp b/linear/iterative.cpp index 8983c0309..28707dbdd 100644 --- a/linear/iterative.cpp +++ b/linear/iterative.cpp @@ -51,15 +51,15 @@ namespace gtsam { } /* ************************************************************************* */ - VectorConfig steepestDescent(const GaussianFactorGraph& fg, - const VectorConfig& x, bool verbose, double epsilon, double epsilon_abs, size_t maxIterations) { - return conjugateGradients (fg, + VectorValues steepestDescent(const GaussianFactorGraph& fg, + const VectorValues& x, bool verbose, double epsilon, double epsilon_abs, size_t maxIterations) { + return conjugateGradients (fg, x, verbose, epsilon, epsilon_abs, maxIterations, true); } - VectorConfig conjugateGradientDescent(const GaussianFactorGraph& fg, - const VectorConfig& x, bool verbose, double epsilon, double epsilon_abs, size_t maxIterations) { - return conjugateGradients (fg, + VectorValues conjugateGradientDescent(const GaussianFactorGraph& fg, + const VectorValues& x, bool verbose, double epsilon, double epsilon_abs, size_t maxIterations) { + return conjugateGradients (fg, x, verbose, epsilon, epsilon_abs, maxIterations); } diff --git a/linear/iterative.h b/linear/iterative.h index ac2497202..2a446e7ec 100644 --- a/linear/iterative.h +++ b/linear/iterative.h @@ -8,7 +8,7 @@ #pragma once #include -#include +#include namespace gtsam { @@ -107,15 +107,15 @@ namespace gtsam { /** * Method of steepest gradients, Gaussian Factor Graph version * */ - VectorConfig steepestDescent(const GaussianFactorGraph& fg, - const VectorConfig& x, bool verbose = false, double epsilon = 1e-3, + VectorValues steepestDescent(const GaussianFactorGraph& fg, + const VectorValues& x, bool verbose = false, double epsilon = 1e-3, double epsilon_abs = 1e-5, size_t maxIterations = 0); /** * Method of conjugate gradients (CG), Gaussian Factor Graph version * */ - VectorConfig conjugateGradientDescent(const GaussianFactorGraph& fg, - const VectorConfig& x, bool verbose = false, double epsilon = 1e-3, + VectorValues conjugateGradientDescent(const GaussianFactorGraph& fg, + const VectorValues& x, bool verbose = false, double epsilon = 1e-3, double epsilon_abs = 1e-5, size_t maxIterations = 0); } // namespace gtsam diff --git a/linear/tests/testBayesNetPreconditioner.cpp b/linear/tests/testBayesNetPreconditioner.cpp index 60989e7e5..fb59709f5 100644 --- a/linear/tests/testBayesNetPreconditioner.cpp +++ b/linear/tests/testBayesNetPreconditioner.cpp @@ -40,23 +40,23 @@ TEST( BayesNetPreconditioner, operators ) BayesNetPreconditioner P(dummy,bn); // inv(R1)*d should equal solution [1;-2;1] - VectorConfig D; + VectorValues D; D.insert("x", d); D.insert("y", Vector_(1, 1.0 / 0.1)); // corrected by sigma - VectorConfig expected1; + VectorValues expected1; expected1.insert("x", Vector_(2, 1.0, -2.0)); expected1.insert("y", Vector_(1, 1.0)); - VectorConfig actual1 = P.backSubstitute(D); + VectorValues actual1 = P.backSubstitute(D); CHECK(assert_equal(expected1,actual1)); // inv(R1')*ones should equal ? - VectorConfig ones; + VectorValues ones; ones.insert("x", Vector_(2, 1.0, 1.0)); ones.insert("y", Vector_(1, 1.0)); - VectorConfig expected2; + VectorValues expected2; expected2.insert("x", Vector_(2, 0.1, -0.1)); expected2.insert("y", Vector_(1, 0.0)); - VectorConfig actual2 = P.backSubstituteTranspose(ones); + VectorValues actual2 = P.backSubstituteTranspose(ones); CHECK(assert_equal(expected2,actual2)); } diff --git a/linear/tests/testGaussianConditional.cpp b/linear/tests/testGaussianConditional.cpp index 58e50e8c7..6203d16bd 100644 --- a/linear/tests/testGaussianConditional.cpp +++ b/linear/tests/testGaussianConditional.cpp @@ -145,7 +145,7 @@ TEST( GaussianConditional, solve ) Vector sl1(2); sl1(0) = 1.0; sl1(1) = 1.0; - VectorConfig solution(vector(3, 2)); + VectorValues solution(vector(3, 2)); solution[_x1_] = sx1; solution[_l1_] = sl1; diff --git a/linear/tests/testGaussianFactor.cpp b/linear/tests/testGaussianFactor.cpp index 95af66841..a84fda930 100644 --- a/linear/tests/testGaussianFactor.cpp +++ b/linear/tests/testGaussianFactor.cpp @@ -180,7 +180,7 @@ TEST(GaussianFactor, Combine2) // Vector b = Vector_(2,0.2,-0.1); // GaussianFactor lf(_x1_, -I, _x2_, I, b, sigma0_1); // -// VectorConfig c; +// VectorValues c; // c.insert(_x1_,Vector_(2,10.,20.)); // c.insert(_x2_,Vector_(2,30.,60.)); // @@ -189,16 +189,16 @@ TEST(GaussianFactor, Combine2) // CHECK(assert_equal(expectedE,e)); // // // test A^e -// VectorConfig expectedX; +// VectorValues expectedX; // expectedX.insert(_x1_,Vector_(2,-2000.,-4000.)); // expectedX.insert(_x2_,Vector_(2, 2000., 4000.)); // CHECK(assert_equal(expectedX,lf^e)); // // // test transposeMultiplyAdd -// VectorConfig x; +// VectorValues x; // x.insert(_x1_,Vector_(2, 1.,2.)); // x.insert(_x2_,Vector_(2, 3.,4.)); -// VectorConfig expectedX2 = x + 0.1 * (lf^e); +// VectorValues expectedX2 = x + 0.1 * (lf^e); // lf.transposeMultiplyAdd(0.1,e,x); // CHECK(assert_equal(expectedX2,x)); //} @@ -514,7 +514,7 @@ TEST( GaussianFactor, default_error ) { GaussianFactor f; vector dims; - VectorConfig c(dims); + VectorValues c(dims); double actual = f.error(c); CHECK(actual==0.0); } diff --git a/linear/tests/testGaussianJunctionTree.cpp b/linear/tests/testGaussianJunctionTree.cpp index 2308447d1..f575fbbff 100644 --- a/linear/tests/testGaussianJunctionTree.cpp +++ b/linear/tests/testGaussianJunctionTree.cpp @@ -78,8 +78,8 @@ TEST( GaussianJunctionTree, optimizeMultiFrontal ) GaussianFactorGraph fg = createChain(); GaussianJunctionTree tree(fg); - VectorConfig actual = tree.optimize(); - VectorConfig expected(vector(4,1)); + VectorValues actual = tree.optimize(); + VectorValues expected(vector(4,1)); expected[x1] = Vector_(1, 0.); expected[x2] = Vector_(1, 1.); expected[x3] = Vector_(1, 0.); diff --git a/linear/tests/testVectorBTree.cpp b/linear/tests/testVectorBTree.cpp index 47feaf745..2e4e080b3 100644 --- a/linear/tests/testVectorBTree.cpp +++ b/linear/tests/testVectorBTree.cpp @@ -1,6 +1,6 @@ /** * @file testVectorBTree.cpp - * @brief Unit tests for Factor Graph Configuration + * @brief Unit tests for Factor Graph Values * @author Frank Dellaert **/ @@ -283,7 +283,7 @@ TEST( VectorBTree, serialize) cout << "VectorBTree: Running Serialization Test" << endl; //create an VectorBTree - VectorBTree fg = createConfig(); + VectorBTree fg = createValues(); //serialize the config std::ostringstream in_archive_stream; diff --git a/linear/tests/testVectorConfig.cpp b/linear/tests/testVectorConfig.cpp index ad16f3df0..4f9443436 100644 --- a/linear/tests/testVectorConfig.cpp +++ b/linear/tests/testVectorConfig.cpp @@ -1,5 +1,5 @@ /** - * @file testVectorConfig.cpp + * @file testVectorValues.cpp * @brief * @author Richard Roberts * @created Sep 16, 2010 @@ -7,7 +7,7 @@ #include -#include +#include #include #include @@ -15,13 +15,13 @@ using namespace gtsam; using namespace std; -TEST(VectorConfig, standard) { +TEST(VectorValues, standard) { Vector v1 = Vector_(3, 1.0,2.0,3.0); Vector v2 = Vector_(2, 4.0,5.0); Vector v3 = Vector_(4, 6.0,7.0,8.0,9.0); vector dims(3); dims[0]=3; dims[1]=2; dims[2]=4; - VectorConfig combined(dims); + VectorValues combined(dims); combined[0] = v1; combined[1] = v2; combined[2] = v3; @@ -30,7 +30,7 @@ TEST(VectorConfig, standard) { CHECK(assert_equal(combined[1], v2)) CHECK(assert_equal(combined[2], v3)) - VectorConfig incremental; + VectorValues incremental; incremental.reserve(3, 9); incremental.push_back_preallocated(v1); incremental.push_back_preallocated(v2); @@ -41,13 +41,13 @@ TEST(VectorConfig, standard) { CHECK(assert_equal(incremental[2], v3)) } -TEST(VectorConfig, permuted_combined) { +TEST(VectorValues, permuted_combined) { Vector v1 = Vector_(3, 1.0,2.0,3.0); Vector v2 = Vector_(2, 4.0,5.0); Vector v3 = Vector_(4, 6.0,7.0,8.0,9.0); vector dims(3); dims[0]=3; dims[1]=2; dims[2]=4; - VectorConfig combined(dims); + VectorValues combined(dims); combined[0] = v1; combined[1] = v2; combined[2] = v3; @@ -62,7 +62,7 @@ TEST(VectorConfig, permuted_combined) { perm2[1] = 2; perm2[2] = 0; - Permuted permuted1(combined); + Permuted permuted1(combined); CHECK(assert_equal(v1, permuted1[0])) CHECK(assert_equal(v2, permuted1[1])) CHECK(assert_equal(v3, permuted1[2])) @@ -77,7 +77,7 @@ TEST(VectorConfig, permuted_combined) { CHECK(assert_equal(v2, permuted1[2])) CHECK(assert_equal(v3, permuted1[0])) - Permuted permuted2(perm1, combined); + Permuted permuted2(perm1, combined); CHECK(assert_equal(v1, permuted2[2])) CHECK(assert_equal(v2, permuted2[0])) CHECK(assert_equal(v3, permuted2[1])) @@ -89,12 +89,12 @@ TEST(VectorConfig, permuted_combined) { } -TEST(VectorConfig, permuted_incremental) { +TEST(VectorValues, permuted_incremental) { Vector v1 = Vector_(3, 1.0,2.0,3.0); Vector v2 = Vector_(2, 4.0,5.0); Vector v3 = Vector_(4, 6.0,7.0,8.0,9.0); - VectorConfig incremental; + VectorValues incremental; incremental.reserve(3, 9); incremental.push_back_preallocated(v1); incremental.push_back_preallocated(v2); @@ -110,7 +110,7 @@ TEST(VectorConfig, permuted_incremental) { perm2[1] = 2; perm2[2] = 0; - Permuted permuted1(incremental); + Permuted permuted1(incremental); CHECK(assert_equal(v1, permuted1[0])) CHECK(assert_equal(v2, permuted1[1])) CHECK(assert_equal(v3, permuted1[2])) @@ -125,7 +125,7 @@ TEST(VectorConfig, permuted_incremental) { CHECK(assert_equal(v2, permuted1[2])) CHECK(assert_equal(v3, permuted1[0])) - Permuted permuted2(perm1, incremental); + Permuted permuted2(perm1, incremental); CHECK(assert_equal(v1, permuted2[2])) CHECK(assert_equal(v2, permuted2[0])) CHECK(assert_equal(v3, permuted2[1])) diff --git a/linear/tests/testVectorMap.cpp b/linear/tests/testVectorMap.cpp index d9e1ba8d0..5c3edc06a 100644 --- a/linear/tests/testVectorMap.cpp +++ b/linear/tests/testVectorMap.cpp @@ -1,6 +1,6 @@ /** * @file testVectorMap.cpp - * @brief Unit tests for Factor Graph Configuration + * @brief Unit tests for Factor Graph Values * @author Carlos Nieto **/ @@ -221,7 +221,7 @@ TEST( VectorMap, serialize) cout << "VectorMap: Running Serialization Test" << endl; //create an VectorMap - VectorMap fg = createConfig(); + VectorMap fg = createValues(); //serialize the config std::ostringstream in_archive_stream; diff --git a/linear/tests/timeFactorOverhead.cpp b/linear/tests/timeFactorOverhead.cpp index 11836f440..ed92f5337 100644 --- a/linear/tests/timeFactorOverhead.cpp +++ b/linear/tests/timeFactorOverhead.cpp @@ -70,7 +70,7 @@ int main(int argc, char *argv[]) { for(size_t trial=0; trial #include -#include +#include #include #include #include @@ -20,9 +20,9 @@ #define INSTANTIATE_LIE_CONFIG(J) \ /*INSTANTIATE_LIE(T);*/ \ - /*template LieValues expmap(const LieValues&, const VectorConfig&);*/ \ + /*template LieValues expmap(const LieValues&, const VectorValues&);*/ \ /*template LieValues expmap(const LieValues&, const Vector&);*/ \ - /*template VectorConfig logmap(const LieValues&, const LieValues&);*/ \ + /*template VectorValues logmap(const LieValues&, const LieValues&);*/ \ template class LieValues; using namespace std; @@ -69,8 +69,8 @@ namespace gtsam { /* ************************************************************************* */ template - VectorConfig LieValues::zero(const Ordering& ordering) const { - VectorConfig z(this->dims(ordering)); + VectorValues LieValues::zero(const Ordering& ordering) const { + VectorValues z(this->dims(ordering)); z.makeZero(); return z; } @@ -131,22 +131,22 @@ namespace gtsam { /* ************************************************************************* */ // todo: insert for every element is inefficient template - LieValues LieValues::expmap(const VectorConfig& delta, const Ordering& ordering) const { - LieValues newConfig; + LieValues LieValues::expmap(const VectorValues& delta, const Ordering& ordering) const { + LieValues newValues; typedef pair KeyValue; BOOST_FOREACH(const KeyValue& value, this->values_) { const J& j = value.first; const typename J::Value_t& pj = value.second; const Vector& dj = delta[ordering[j]]; - newConfig.insert(j, pj.expmap(dj)); + newValues.insert(j, pj.expmap(dj)); } - return newConfig; + return newValues; } /* ************************************************************************* */ template std::vector LieValues::dims(const Ordering& ordering) const { - _ConfigDimensionCollector dimCollector(ordering); + _ValuesDimensionCollector dimCollector(ordering); this->apply(dimCollector); return dimCollector.dimensions; } @@ -155,7 +155,7 @@ namespace gtsam { template Ordering::shared_ptr LieValues::orderingArbitrary(varid_t firstVar) const { // Generate an initial key ordering in iterator order - _ConfigKeyOrderer keyOrderer(firstVar); + _ValuesKeyOrderer keyOrderer(firstVar); this->apply(keyOrderer); return keyOrderer.ordering; } @@ -168,32 +168,32 @@ namespace gtsam { // cout << "LieValues::dim (" << dim() << ") <> delta.size (" << delta.size() << ")" << endl; // throw invalid_argument("Delta vector length does not match config dimensionality."); // } -// LieValues newConfig; +// LieValues newValues; // int delta_offset = 0; // typedef pair KeyValue; // BOOST_FOREACH(const KeyValue& value, this->values_) { // const J& j = value.first; // const typename J::Value_t& pj = value.second; // int cur_dim = pj.dim(); -// newConfig.insert(j,pj.expmap(sub(delta, delta_offset, delta_offset+cur_dim))); +// newValues.insert(j,pj.expmap(sub(delta, delta_offset, delta_offset+cur_dim))); // delta_offset += cur_dim; // } -// return newConfig; +// return newValues; // } /* ************************************************************************* */ // todo: insert for every element is inefficient // todo: currently only logmaps elements in both configs template - inline VectorConfig LieValues::logmap(const LieValues& cp, const Ordering& ordering) const { - VectorConfig delta(this->dims(ordering)); + inline VectorValues LieValues::logmap(const LieValues& cp, const Ordering& ordering) const { + VectorValues delta(this->dims(ordering)); logmap(cp, ordering, delta); return delta; } /* ************************************************************************* */ template - void LieValues::logmap(const LieValues& cp, const Ordering& ordering, VectorConfig& delta) const { + void LieValues::logmap(const LieValues& cp, const Ordering& ordering, VectorValues& delta) const { typedef pair KeyValue; BOOST_FOREACH(const KeyValue& value, cp) { assert(this->exists(value.first)); diff --git a/nonlinear/LieValues.h b/nonlinear/LieValues.h index 48ca2452a..dded5413b 100644 --- a/nonlinear/LieValues.h +++ b/nonlinear/LieValues.h @@ -5,8 +5,8 @@ * @brief A templated config for Lie-group elements * * Detailed story: - * A configuration is a map from keys to values. It is used to specify the value of a bunch - * of variables in a factor graph. A LieValues is a configuration which can hold variables that + * A values structure is a map from keys to values. It is used to specify the value of a bunch + * of variables in a factor graph. A LieValues is a values structure which can hold variables that * are elements of Lie groups, not just vectors. It then, as a whole, implements a aggregate type * which is also a Lie group, and hence supports operations dim, expmap, and logmap. */ @@ -23,14 +23,14 @@ #include namespace boost { template class optional; } -namespace gtsam { class VectorConfig; class Ordering; } +namespace gtsam { class VectorValues; class Ordering; } namespace gtsam { /** - * Lie type configuration + * Lie type values structure * Takes two template types - * J: a key type to look up values in the configuration (need to be sortable) + * J: a key type to look up values in the values structure (need to be sortable) * * Key concept: * The key will be assumed to be sortable, and must have a @@ -95,8 +95,8 @@ namespace gtsam { /** The dimensionality of the tangent space */ size_t dim() const; - /** Get a zero VectorConfig of the correct structure */ - VectorConfig zero(const Ordering& ordering) const; + /** Get a zero VectorValues of the correct structure */ + VectorValues zero(const Ordering& ordering) const; const_iterator begin() const { return values_.begin(); } const_iterator end() const { return values_.end(); } @@ -106,16 +106,16 @@ namespace gtsam { // Lie operations /** Add a delta config to current config and returns a new config */ - LieValues expmap(const VectorConfig& delta, const Ordering& ordering) const; + LieValues expmap(const VectorValues& delta, const Ordering& ordering) const; // /** Add a delta vector to current config and returns a new config, uses iterator order */ // LieValues expmap(const Vector& delta) const; /** Get a delta config about a linearization point c0 (*this) */ - VectorConfig logmap(const LieValues& cp, const Ordering& ordering) const; + VectorValues logmap(const LieValues& cp, const Ordering& ordering) const; /** Get a delta config about a linearization point c0 (*this) */ - void logmap(const LieValues& cp, const Ordering& ordering, VectorConfig& delta) const; + void logmap(const LieValues& cp, const Ordering& ordering, VectorValues& delta) const; // imperative methods: @@ -159,7 +159,7 @@ namespace gtsam { /** * Apply a class with an application operator() to a const_iterator over * every pair. The operator must be able to handle such an - * iterator for every type in the Config, (i.e. through templating). + * iterator for every type in the Values, (i.e. through templating). */ template void apply(A& operation) { @@ -193,10 +193,10 @@ namespace gtsam { }; - struct _ConfigDimensionCollector { + struct _ValuesDimensionCollector { const Ordering& ordering; std::vector dimensions; - _ConfigDimensionCollector(const Ordering& _ordering) : ordering(_ordering), dimensions(_ordering.nVars()) {} + _ValuesDimensionCollector(const Ordering& _ordering) : ordering(_ordering), dimensions(_ordering.nVars()) {} template void operator()(const I& key_value) { varid_t var = ordering[key_value->first]; assert(var < dimensions.size()); @@ -205,10 +205,10 @@ namespace gtsam { }; /* ************************************************************************* */ - struct _ConfigKeyOrderer { + struct _ValuesKeyOrderer { varid_t var; Ordering::shared_ptr ordering; - _ConfigKeyOrderer(varid_t firstVar) : var(firstVar), ordering(new Ordering) {} + _ValuesKeyOrderer(varid_t firstVar) : var(firstVar), ordering(new Ordering) {} template void operator()(const I& key_value) { ordering->insert(key_value->first, var); ++ var; diff --git a/nonlinear/NonlinearConstraint.h b/nonlinear/NonlinearConstraint.h index 1ad52e07c..5c3a3fec9 100644 --- a/nonlinear/NonlinearConstraint.h +++ b/nonlinear/NonlinearConstraint.h @@ -27,12 +27,12 @@ namespace gtsam { * on the constraint function that should be made high enough to be * significant */ -template -class NonlinearConstraint : public NonlinearFactor { +template +class NonlinearConstraint : public NonlinearFactor { protected: - typedef NonlinearConstraint This; - typedef NonlinearFactor Base; + typedef NonlinearConstraint This; + typedef NonlinearFactor Base; double mu_; /// gain for quadratic merit function size_t dim_; /// dimension of the constraint @@ -57,14 +57,14 @@ public: size_t dim() const { return dim_; } /** Check if two factors are equal */ - virtual bool equals(const NonlinearFactor& f, double tol=1e-9) const { + virtual bool equals(const NonlinearFactor& f, double tol=1e-9) const { const This* p = dynamic_cast (&f); if (p == NULL) return false; return Base::equals(*p, tol) && (mu_ == p->mu_); } /** error function - returns the quadratic merit function */ - virtual double error(const Config& c) const { + virtual double error(const Values& c) const { const Vector error_vector = unwhitenedError(c); if (active(c)) return mu_ * inner_prod(error_vector, error_vector); @@ -72,7 +72,7 @@ public: } /** Raw error vector function g(x) */ - virtual Vector unwhitenedError(const Config& c) const = 0; + virtual Vector unwhitenedError(const Values& c) const = 0; /** * active set check, defines what type of constraint this is @@ -81,29 +81,29 @@ public: * when the constraint is *NOT* fulfilled. * @return true if the constraint is active */ - virtual bool active(const Config& c) const=0; + virtual bool active(const Values& c) const=0; /** * Linearizes around a given config - * @param config is the configuration + * @param config is the values structure * @return a combined linear factor containing both the constraint and the constraint factor */ - virtual boost::shared_ptr linearize(const Config& c) const=0; + virtual boost::shared_ptr linearize(const Values& c) const=0; }; /** * A unary constraint that defaults to an equality constraint */ -template -class NonlinearConstraint1 : public NonlinearConstraint { +template +class NonlinearConstraint1 : public NonlinearConstraint { public: typedef typename Key::Value_t X; protected: - typedef NonlinearConstraint1 This; - typedef NonlinearConstraint Base; + typedef NonlinearConstraint1 This; + typedef NonlinearConstraint Base; /** key for the constrained variable */ Key key_; @@ -130,14 +130,14 @@ public: } /** Check if two factors are equal. Note type is Factor and needs cast. */ - virtual bool equals(const NonlinearFactor& f, double tol = 1e-9) const { + virtual bool equals(const NonlinearFactor& f, double tol = 1e-9) const { const This* p = dynamic_cast (&f); if (p == NULL) return false; return Base::equals(*p, tol) && (key_ == p->key_); } /** error function g(x), switched depending on whether the constraint is active */ - inline Vector unwhitenedError(const Config& x) const { + inline Vector unwhitenedError(const Values& x) const { if (!active(x)) { return zero(this->dim()); } @@ -147,7 +147,7 @@ public: } /** Linearize from config */ - boost::shared_ptr linearize(const Config& c, const Ordering& ordering) const { + boost::shared_ptr linearize(const Values& c, const Ordering& ordering) const { if (!active(c)) { boost::shared_ptr factor; return factor; @@ -168,15 +168,15 @@ public: /** * Unary Equality constraint - simply forces the value of active() to true */ -template -class NonlinearEqualityConstraint1 : public NonlinearConstraint1 { +template +class NonlinearEqualityConstraint1 : public NonlinearConstraint1 { public: typedef typename Key::Value_t X; protected: - typedef NonlinearEqualityConstraint1 This; - typedef NonlinearConstraint1 Base; + typedef NonlinearEqualityConstraint1 This; + typedef NonlinearConstraint1 Base; public: NonlinearEqualityConstraint1(const Key& key, size_t dim, double mu = 1000.0) @@ -184,22 +184,22 @@ public: virtual ~NonlinearEqualityConstraint1() {} /** Always active, so fixed value for active() */ - virtual bool active(const Config& c) const { return true; } + virtual bool active(const Values& c) const { return true; } }; /** * A binary constraint with arbitrary cost and jacobian functions */ -template -class NonlinearConstraint2 : public NonlinearConstraint { +template +class NonlinearConstraint2 : public NonlinearConstraint { public: typedef typename Key1::Value_t X1; typedef typename Key2::Value_t X2; protected: - typedef NonlinearConstraint2 This; - typedef NonlinearConstraint Base; + typedef NonlinearConstraint2 This; + typedef NonlinearConstraint Base; /** keys for the constrained variables */ Key1 key1_; @@ -230,14 +230,14 @@ public: } /** Check if two factors are equal. Note type is Factor and needs cast. */ - virtual bool equals(const NonlinearFactor& f, double tol = 1e-9) const { + virtual bool equals(const NonlinearFactor& f, double tol = 1e-9) const { const This* p = dynamic_cast (&f); if (p == NULL) return false; return Base::equals(*p, tol) && (key1_ == p->key1_) && (key2_ == p->key2_); } /** error function g(x), switched depending on whether the constraint is active */ - inline Vector unwhitenedError(const Config& x) const { + inline Vector unwhitenedError(const Values& x) const { if (!active(x)) { return zero(this->dim()); } @@ -249,7 +249,7 @@ public: } /** Linearize from config */ - boost::shared_ptr linearize(const Config& c, const Ordering& ordering) const { + boost::shared_ptr linearize(const Values& c, const Ordering& ordering) const { if (!active(c)) { boost::shared_ptr factor; return factor; @@ -271,16 +271,16 @@ public: /** * Binary Equality constraint - simply forces the value of active() to true */ -template -class NonlinearEqualityConstraint2 : public NonlinearConstraint2 { +template +class NonlinearEqualityConstraint2 : public NonlinearConstraint2 { public: typedef typename Key1::Value_t X1; typedef typename Key2::Value_t X2; protected: - typedef NonlinearEqualityConstraint2 This; - typedef NonlinearConstraint2 Base; + typedef NonlinearEqualityConstraint2 This; + typedef NonlinearConstraint2 Base; public: NonlinearEqualityConstraint2(const Key1& key1, const Key2& key2, size_t dim, double mu = 1000.0) @@ -289,14 +289,14 @@ public: /** Always active, so fixed value for active() */ - virtual bool active(const Config& c) const { return true; } + virtual bool active(const Values& c) const { return true; } }; /** * A ternary constraint */ -template -class NonlinearConstraint3 : public NonlinearConstraint { +template +class NonlinearConstraint3 : public NonlinearConstraint { public: typedef typename Key1::Value_t X1; @@ -304,8 +304,8 @@ public: typedef typename Key3::Value_t X3; protected: - typedef NonlinearConstraint3 This; - typedef NonlinearConstraint Base; + typedef NonlinearConstraint3 This; + typedef NonlinearConstraint Base; /** keys for the constrained variables */ Key1 key1_; @@ -341,14 +341,14 @@ public: } /** Check if two factors are equal. Note type is Factor and needs cast. */ - virtual bool equals(const NonlinearFactor& f, double tol = 1e-9) const { + virtual bool equals(const NonlinearFactor& f, double tol = 1e-9) const { const This* p = dynamic_cast (&f); if (p == NULL) return false; return Base::equals(*p, tol) && (key1_ == p->key1_) && (key2_ == p->key2_) && (key3_ == p->key3_); } /** error function g(x), switched depending on whether the constraint is active */ - inline Vector unwhitenedError(const Config& x) const { + inline Vector unwhitenedError(const Values& x) const { if (!active(x)) { return zero(this->dim()); } @@ -362,7 +362,7 @@ public: } /** Linearize from config */ - boost::shared_ptr linearize(const Config& c) const { + boost::shared_ptr linearize(const Values& c) const { if (!active(c)) { boost::shared_ptr factor; return factor; @@ -385,8 +385,8 @@ public: /** * Ternary Equality constraint - simply forces the value of active() to true */ -template -class NonlinearEqualityConstraint3 : public NonlinearConstraint3 { +template +class NonlinearEqualityConstraint3 : public NonlinearConstraint3 { public: typedef typename Key1::Value_t X1; @@ -394,8 +394,8 @@ public: typedef typename Key3::Value_t X3; protected: - typedef NonlinearEqualityConstraint3 This; - typedef NonlinearConstraint3 Base; + typedef NonlinearEqualityConstraint3 This; + typedef NonlinearConstraint3 Base; public: NonlinearEqualityConstraint3(const Key1& key1, const Key2& key2, const Key3& key3, @@ -404,27 +404,27 @@ public: virtual ~NonlinearEqualityConstraint3() {} /** Always active, so fixed value for active() */ - virtual bool active(const Config& c) const { return true; } + virtual bool active(const Values& c) const { return true; } }; /** * Simple unary equality constraint - fixes a value for a variable */ -template -class NonlinearEquality1 : public NonlinearEqualityConstraint1 { +template +class NonlinearEquality1 : public NonlinearEqualityConstraint1 { public: typedef typename Key::Value_t X; protected: - typedef NonlinearEqualityConstraint1 Base; + typedef NonlinearEqualityConstraint1 Base; X value_; /// fixed value for variable public: - typedef boost::shared_ptr > shared_ptr; + typedef boost::shared_ptr > shared_ptr; NonlinearEquality1(const X& value, const Key& key1, double mu = 1000.0) : Base(key1, X::Dim(), mu), value_(value) {} @@ -443,17 +443,17 @@ public: * Simple binary equality constraint - this constraint forces two factors to * be the same. This constraint requires the underlying type to a Lie type */ -template -class NonlinearEquality2 : public NonlinearEqualityConstraint2 { +template +class NonlinearEquality2 : public NonlinearEqualityConstraint2 { public: typedef typename Key::Value_t X; protected: - typedef NonlinearEqualityConstraint2 Base; + typedef NonlinearEqualityConstraint2 Base; public: - typedef boost::shared_ptr > shared_ptr; + typedef boost::shared_ptr > shared_ptr; NonlinearEquality2(const Key& key1, const Key& key2, double mu = 1000.0) : Base(key1, key2, X::Dim(), mu) {} diff --git a/nonlinear/NonlinearEquality.h b/nonlinear/NonlinearEquality.h index e09a4af72..66048a82d 100644 --- a/nonlinear/NonlinearEquality.h +++ b/nonlinear/NonlinearEquality.h @@ -30,8 +30,8 @@ namespace gtsam { * - ALLLOW_ERROR : if we allow that there can be nonzero error, does not throw, and uses gain * - ONLY_EXACT : throws error at linearization if not at exact feasible point, and infinite error */ - template - class NonlinearEquality: public NonlinearFactor1 { + template + class NonlinearEquality: public NonlinearFactor1 { public: typedef typename Key::Value_t T; @@ -54,7 +54,7 @@ namespace gtsam { */ bool (*compare_)(const T& a, const T& b); - typedef NonlinearFactor1 Base; + typedef NonlinearFactor1 Base; /** * Constructor - forces exact evaluation @@ -81,13 +81,13 @@ namespace gtsam { } /** Check if two factors are equal */ - bool equals(const NonlinearEquality& f, double tol = 1e-9) const { + bool equals(const NonlinearEquality& f, double tol = 1e-9) const { if (!Base::equals(f)) return false; return compare_(feasible_, f.feasible_); } /** actual error function calculation */ - virtual double error(const Config& c) const { + virtual double error(const Values& c) const { const T& xj = c[this->key_]; Vector e = this->unwhitenedError(c); if (allow_error_ || !compare_(xj, feasible_)) { @@ -114,7 +114,7 @@ namespace gtsam { } // Linearize is over-written, because base linearization tries to whiten - virtual boost::shared_ptr linearize(const Config& x, const Ordering& ordering) const { + virtual boost::shared_ptr linearize(const Values& x, const Ordering& ordering) const { const T& xj = x[this->key_]; Matrix A; Vector b = evaluateError(xj, A); diff --git a/nonlinear/NonlinearFactor.h b/nonlinear/NonlinearFactor.h index 12f74972d..f6ce52aa5 100644 --- a/nonlinear/NonlinearFactor.h +++ b/nonlinear/NonlinearFactor.h @@ -33,23 +33,23 @@ namespace gtsam { * Nonlinear factor which assumes zero-mean Gaussian noise on the * on a measurement predicted by a non-linear function h. * - * Templated on a configuration type. The configurations are typically + * Templated on a values structure type. The values structures are typically * more general than just vectors, e.g., Rot3 or Pose3, * which are objects in non-linear manifolds (Lie groups). */ - template - class NonlinearFactor: public Testable > { + template + class NonlinearFactor: public Testable > { protected: - typedef NonlinearFactor This; + typedef NonlinearFactor This; SharedGaussian noiseModel_; /** Noise model */ std::list keys_; /** cached keys */ public: - typedef boost::shared_ptr > shared_ptr; + typedef boost::shared_ptr > shared_ptr; /** Default constructor for I/O only */ NonlinearFactor() { @@ -70,7 +70,7 @@ namespace gtsam { } /** Check if two NonlinearFactor objects are equal */ - bool equals(const NonlinearFactor& f, double tol = 1e-9) const { + bool equals(const NonlinearFactor& f, double tol = 1e-9) const { return noiseModel_->equals(*f.noiseModel_, tol); } @@ -78,7 +78,7 @@ namespace gtsam { * calculate the error of the factor * Override for systems with unusual noise models */ - virtual double error(const Config& c) const { + virtual double error(const Values& c) const { return 0.5 * noiseModel_->Mahalanobis(unwhitenedError(c)); } @@ -109,16 +109,16 @@ namespace gtsam { } /** Vector of errors, unwhitened ! */ - virtual Vector unwhitenedError(const Config& c) const = 0; + virtual Vector unwhitenedError(const Values& c) const = 0; /** Vector of errors, whitened ! */ - Vector whitenedError(const Config& c) const { + Vector whitenedError(const Values& c) const { return noiseModel_->whiten(unwhitenedError(c)); } /** linearize to a GaussianFactor */ virtual boost::shared_ptr - linearize(const Config& c, const Ordering& ordering) const = 0; + linearize(const Values& c, const Ordering& ordering) const = 0; /** * Create a symbolic factor using the given ordering to determine the @@ -141,13 +141,13 @@ namespace gtsam { /** * A Gaussian nonlinear factor that takes 1 parameter * implementing the density P(z|x) \propto exp -0.5*|z-h(x)|^2_C - * Templated on the parameter type X and the configuration Config + * Templated on the parameter type X and the values structure Values * There is no return type specified for h(x). Instead, we require * the derived class implements error_vector(c) = h(x)-z \approx Ax-b * This allows a graph to have factors with measurements of mixed type. */ - template - class NonlinearFactor1: public NonlinearFactor { + template + class NonlinearFactor1: public NonlinearFactor { public: @@ -159,8 +159,8 @@ namespace gtsam { // The value of the key. Not const to allow serialization Key key_; - typedef NonlinearFactor Base; - typedef NonlinearFactor1 This; + typedef NonlinearFactor Base; + typedef NonlinearFactor1 This; public: @@ -175,7 +175,7 @@ namespace gtsam { /** * Constructor * @param z measurement - * @param key by which to look up X value in Config + * @param key by which to look up X value in Values */ NonlinearFactor1(const SharedGaussian& noiseModel, const Key& key1) : @@ -191,12 +191,12 @@ namespace gtsam { } /** Check if two factors are equal. Note type is Factor and needs cast. */ - bool equals(const NonlinearFactor1& f, double tol = 1e-9) const { + bool equals(const NonlinearFactor1& f, double tol = 1e-9) const { return Base::noiseModel_->equals(*f.noiseModel_, tol) && (key_ == f.key_); } /** error function h(x)-z, unwhitened !!! */ - inline Vector unwhitenedError(const Config& x) const { + inline Vector unwhitenedError(const Values& x) const { const Key& j = key_; const X& xj = x[j]; return evaluateError(xj); @@ -207,7 +207,7 @@ namespace gtsam { * Ax-b \approx h(x0+dx)-z = h(x0) + A*dx - z * Hence b = z - h(x0) = - error_vector(x) */ - virtual boost::shared_ptr linearize(const Config& x, const Ordering& ordering) const { + virtual boost::shared_ptr linearize(const Values& x, const Ordering& ordering) const { const X& xj = x[key_]; Matrix A; Vector b = - evaluateError(xj, A); @@ -256,8 +256,8 @@ namespace gtsam { /** * A Gaussian nonlinear factor that takes 2 parameters */ - template - class NonlinearFactor2: public NonlinearFactor { + template + class NonlinearFactor2: public NonlinearFactor { public: @@ -271,8 +271,8 @@ namespace gtsam { Key1 key1_; Key2 key2_; - typedef NonlinearFactor Base; - typedef NonlinearFactor2 This; + typedef NonlinearFactor Base; + typedef NonlinearFactor2 This; public: @@ -303,13 +303,13 @@ namespace gtsam { } /** Check if two factors are equal */ - bool equals(const NonlinearFactor2& f, double tol = 1e-9) const { + bool equals(const NonlinearFactor2& f, double tol = 1e-9) const { return Base::noiseModel_->equals(*f.noiseModel_, tol) && (key1_ == f.key1_) && (key2_ == f.key2_); } /** error function z-h(x1,x2) */ - inline Vector unwhitenedError(const Config& x) const { + inline Vector unwhitenedError(const Values& x) const { const X1& x1 = x[key1_]; const X2& x2 = x[key2_]; return evaluateError(x1, x2); @@ -320,7 +320,7 @@ namespace gtsam { * Ax-b \approx h(x1+dx1,x2+dx2)-z = h(x1,x2) + A2*dx1 + A2*dx2 - z * Hence b = z - h(x1,x2) = - error_vector(x) */ - boost::shared_ptr linearize(const Config& c, const Ordering& ordering) const { + boost::shared_ptr linearize(const Values& c, const Ordering& ordering) const { const X1& x1 = c[key1_]; const X2& x2 = c[key2_]; Matrix A1, A2; @@ -392,8 +392,8 @@ namespace gtsam { /** * A Gaussian nonlinear factor that takes 3 parameters */ - template - class NonlinearFactor3: public NonlinearFactor { + template + class NonlinearFactor3: public NonlinearFactor { public: @@ -409,8 +409,8 @@ namespace gtsam { Key2 key2_; Key3 key3_; - typedef NonlinearFactor Base; - typedef NonlinearFactor3 This; + typedef NonlinearFactor Base; + typedef NonlinearFactor3 This; public: @@ -443,13 +443,13 @@ namespace gtsam { } /** Check if two factors are equal */ - bool equals(const NonlinearFactor3& f, double tol = 1e-9) const { + bool equals(const NonlinearFactor3& f, double tol = 1e-9) const { return Base::noiseModel_->equals(*f.noiseModel_, tol) && (key1_ == f.key1_) && (key2_ == f.key2_) && (key3_ == f.key3_); } /** error function z-h(x1,x2) */ - inline Vector unwhitenedError(const Config& x) const { + inline Vector unwhitenedError(const Values& x) const { const X1& x1 = x[key1_]; const X2& x2 = x[key2_]; const X3& x3 = x[key3_]; @@ -461,7 +461,7 @@ namespace gtsam { * Ax-b \approx h(x1+dx1,x2+dx2,x3+dx3)-z = h(x1,x2,x3) + A2*dx1 + A2*dx2 + A3*dx3 - z * Hence b = z - h(x1,x2,x3) = - error_vector(x) */ - boost::shared_ptr linearize(const Config& c, const Ordering& ordering) const { + boost::shared_ptr linearize(const Values& c, const Ordering& ordering) const { const X1& x1 = c[key1_]; const X2& x2 = c[key2_]; const X3& x3 = c[key3_]; diff --git a/nonlinear/NonlinearFactorGraph-inl.h b/nonlinear/NonlinearFactorGraph-inl.h index 4b2a554a1..572d6983f 100644 --- a/nonlinear/NonlinearFactorGraph-inl.h +++ b/nonlinear/NonlinearFactorGraph-inl.h @@ -23,14 +23,14 @@ using namespace std; namespace gtsam { /* ************************************************************************* */ -template -void NonlinearFactorGraph::print(const std::string& str) const { +template +void NonlinearFactorGraph::print(const std::string& str) const { Base::print(str); } /* ************************************************************************* */ - template - Vector NonlinearFactorGraph::unwhitenedError(const Config& c) const { + template + Vector NonlinearFactorGraph::unwhitenedError(const Values& c) const { list errors; BOOST_FOREACH(const sharedFactor& factor, this->factors_) errors.push_back(factor->unwhitenedError(c)); @@ -38,8 +38,8 @@ void NonlinearFactorGraph::print(const std::string& str) const { } /* ************************************************************************* */ - template - double NonlinearFactorGraph::error(const Config& c) const { + template + double NonlinearFactorGraph::error(const Values& c) const { double total_error = 0.; // iterate over all the factors_ to accumulate the log probabilities BOOST_FOREACH(const sharedFactor& factor, this->factors_) @@ -48,9 +48,9 @@ void NonlinearFactorGraph::print(const std::string& str) const { } /* ************************************************************************* */ - template + template pair::shared_ptr> - NonlinearFactorGraph::orderingCOLAMD(const Config& config) const { + NonlinearFactorGraph::orderingCOLAMD(const Values& config) const { // Create symbolic graph and initial (iterator) ordering FactorGraph::shared_ptr symbolic; @@ -76,9 +76,9 @@ void NonlinearFactorGraph::print(const std::string& str) const { } /* ************************************************************************* */ - template - SymbolicFactorGraph::shared_ptr NonlinearFactorGraph::symbolic( - const Config& config, const Ordering& ordering) const { + template + SymbolicFactorGraph::shared_ptr NonlinearFactorGraph::symbolic( + const Values& config, const Ordering& ordering) const { // Generate the symbolic factor graph SymbolicFactorGraph::shared_ptr symbolicfg(new FactorGraph); symbolicfg->reserve(this->size()); @@ -89,18 +89,18 @@ void NonlinearFactorGraph::print(const std::string& str) const { } /* ************************************************************************* */ - template + template pair - NonlinearFactorGraph::symbolic(const Config& config) const { + NonlinearFactorGraph::symbolic(const Values& config) const { // Generate an initial key ordering in iterator order Ordering::shared_ptr ordering(config.orderingArbitrary()); return make_pair(symbolic(config, *ordering), ordering); } /* ************************************************************************* */ - template - boost::shared_ptr NonlinearFactorGraph::linearize( - const Config& config, const Ordering& ordering) const { + template + boost::shared_ptr NonlinearFactorGraph::linearize( + const Values& config, const Ordering& ordering) const { // create an empty linear FG GaussianFactorGraph::shared_ptr linearFG(new GaussianFactorGraph); diff --git a/nonlinear/NonlinearFactorGraph.h b/nonlinear/NonlinearFactorGraph.h index cb3ed45d2..9984c1b96 100644 --- a/nonlinear/NonlinearFactorGraph.h +++ b/nonlinear/NonlinearFactorGraph.h @@ -20,32 +20,32 @@ namespace gtsam { /** - * A non-linear factor graph is templated on a configuration, but the factor type - * is fixed as a NonlinearFactor. The configurations are typically (in SAM) more general + * A non-linear factor graph is templated on a values structure, but the factor type + * is fixed as a NonlinearFactor. The values structures are typically (in SAM) more general * than just vectors, e.g., Rot3 or Pose3, which are objects in non-linear manifolds. * Linearizing the non-linear factor graph creates a linear factor graph on the * tangent vector space at the linearization point. Because the tangent space is a true - * vector space, the config type will be an VectorConfig in that linearized factor graph. + * vector space, the config type will be an VectorValues in that linearized factor graph. */ - template - class NonlinearFactorGraph: public FactorGraph > { + template + class NonlinearFactorGraph: public FactorGraph > { public: - typedef FactorGraph > Base; - typedef typename boost::shared_ptr > sharedFactor; + typedef FactorGraph > Base; + typedef typename boost::shared_ptr > sharedFactor; /** print just calls base class */ void print(const std::string& str = "NonlinearFactorGraph: ") const; /** unnormalized error */ - double error(const Config& c) const; + double error(const Values& c) const; /** all individual errors */ - Vector unwhitenedError(const Config& c) const; + Vector unwhitenedError(const Values& c) const; /** Unnormalized probability. O(n) */ - double probPrime(const Config& c) const { + double probPrime(const Values& c) const { return exp(-0.5 * error(c)); } @@ -57,7 +57,7 @@ namespace gtsam { /** * Create a symbolic factor graph using an existing ordering */ - SymbolicFactorGraph::shared_ptr symbolic(const Config& config, const Ordering& ordering) const; + SymbolicFactorGraph::shared_ptr symbolic(const Values& config, const Ordering& ordering) const; /** * Create a symbolic factor graph and initial variable ordering that can @@ -66,7 +66,7 @@ namespace gtsam { * ordering is found. */ std::pair - symbolic(const Config& config) const; + symbolic(const Values& config) const; /** * Compute a fill-reducing ordering using COLAMD. This returns the @@ -74,13 +74,13 @@ namespace gtsam { * computation. */ std::pair::shared_ptr> - orderingCOLAMD(const Config& config) const; + orderingCOLAMD(const Values& config) const; /** * linearize a nonlinear factor graph */ boost::shared_ptr - linearize(const Config& config, const Ordering& ordering) const; + linearize(const Values& config, const Ordering& ordering) const; }; diff --git a/nonlinear/NonlinearOptimizer-inl.h b/nonlinear/NonlinearOptimizer-inl.h index 08b2d8b50..7bc60bef9 100644 --- a/nonlinear/NonlinearOptimizer-inl.h +++ b/nonlinear/NonlinearOptimizer-inl.h @@ -63,7 +63,7 @@ namespace gtsam { /* ************************************************************************* */ template NonlinearOptimizer::NonlinearOptimizer(shared_graph graph, - shared_config config, shared_solver solver, double lambda) : + shared_values config, shared_solver solver, double lambda) : graph_(graph), config_(config), lambda_(lambda), solver_(solver) { if (!graph) throw std::invalid_argument( "NonlinearOptimizer constructor: graph = NULL"); @@ -78,7 +78,7 @@ namespace gtsam { // linearize and optimize /* ************************************************************************* */ template - VectorConfig NonlinearOptimizer::linearizeAndOptimizeForDelta() const { + VectorValues NonlinearOptimizer::linearizeAndOptimizeForDelta() const { boost::shared_ptr linearized = solver_->linearize(*graph_, *config_); NonlinearOptimizer prepared(graph_, config_, solver_->prepareLinear(*linearized), error_, lambda_); return prepared.solver_->optimize(*linearized); @@ -91,20 +91,20 @@ namespace gtsam { NonlinearOptimizer NonlinearOptimizer::iterate( verbosityLevel verbosity) const { // linearize and optimize - VectorConfig delta = linearizeAndOptimizeForDelta(); + VectorValues delta = linearizeAndOptimizeForDelta(); // maybe show output if (verbosity >= DELTA) delta.print("delta"); // take old config and update it - shared_config newConfig(new C(solver_->expmap(*config_, delta))); + shared_values newValues(new C(solver_->expmap(*config_, delta))); // maybe show output if (verbosity >= CONFIG) - newConfig->print("newConfig"); + newValues->print("newValues"); - NonlinearOptimizer newOptimizer = NonlinearOptimizer(graph_, newConfig, solver_, lambda_); + NonlinearOptimizer newOptimizer = NonlinearOptimizer(graph_, newValues, solver_, lambda_); if (verbosity >= ERROR) cout << "error: " << newOptimizer.error_ << endl; @@ -166,17 +166,17 @@ namespace gtsam { damped.print("damped"); // solve - VectorConfig delta = solver_->optimize(damped); + VectorValues delta = solver_->optimize(damped); if (verbosity >= TRYDELTA) delta.print("delta"); // update config - shared_config newConfig(new C(solver_->expmap(*config_, delta))); // TODO: updateConfig + shared_values newValues(new C(solver_->expmap(*config_, delta))); // TODO: updateValues // if (verbosity >= TRYCONFIG) -// newConfig->print("config"); +// newValues->print("config"); // create new optimization state with more adventurous lambda - NonlinearOptimizer next(graph_, newConfig, solver_, lambda_ / factor); + NonlinearOptimizer next(graph_, newValues, solver_, lambda_ / factor); if (verbosity >= TRYLAMBDA) cout << "next error = " << next.error_ << endl; if(lambdaMode >= CAUTIOUS) { @@ -188,7 +188,7 @@ namespace gtsam { // If we're cautious, see if the current lambda is better // todo: include stopping criterion here? if(lambdaMode == CAUTIOUS) { - NonlinearOptimizer sameLambda(graph_, newConfig, solver_, lambda_); + NonlinearOptimizer sameLambda(graph_, newValues, solver_, lambda_); if(sameLambda.error_ <= next.error_) return sameLambda; } @@ -201,7 +201,7 @@ namespace gtsam { // A more adventerous lambda was worse. If we're cautious, try the same lambda. if(lambdaMode == CAUTIOUS) { - NonlinearOptimizer sameLambda(graph_, newConfig, solver_, lambda_); + NonlinearOptimizer sameLambda(graph_, newValues, solver_, lambda_); if(sameLambda.error_ <= error_) return sameLambda; } @@ -212,7 +212,7 @@ namespace gtsam { // TODO: can we avoid copying the config ? if(lambdaMode >= BOUNDED && lambda_ >= 1.0e5) { - return NonlinearOptimizer(graph_, newConfig, solver_, lambda_);; + return NonlinearOptimizer(graph_, newValues, solver_, lambda_);; } else { NonlinearOptimizer cautious(graph_, config_, solver_, lambda_ * factor); return cautious.try_lambda(linear, verbosity, factor, lambdaMode); diff --git a/nonlinear/NonlinearOptimizer.h b/nonlinear/NonlinearOptimizer.h index 8386d2d37..2a8ef86c0 100644 --- a/nonlinear/NonlinearOptimizer.h +++ b/nonlinear/NonlinearOptimizer.h @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include @@ -29,13 +29,13 @@ namespace gtsam { * and then one of the optimization routines is called. These recursively iterate * until convergence. All methods are functional and return a new state. * - * The class is parameterized by the Graph type $G$, Config class type $T$, + * The class is parameterized by the Graph type $G$, Values class type $T$, * linear system class $L$ and the non linear solver type $S$. - * the config type is in order to be able to optimize over non-vector configurations. + * the config type is in order to be able to optimize over non-vector values structures. * To use in code, include in your cpp file * - * For example, in a 2D case, $G$ can be Pose2Graph, $T$ can be Pose2Config, - * $L$ can be GaussianFactorGraph and $S$ can be Factorization. + * For example, in a 2D case, $G$ can be Pose2Graph, $T$ can be Pose2Values, + * $L$ can be GaussianFactorGraph and $S$ can be Factorization. * The solver class has two main functions: linearize and optimize. The first one * linearizes the nonlinear cost function around the current estimate, and the second * one optimizes the linearized system using various methods. @@ -45,7 +45,7 @@ namespace gtsam { public: // For performance reasons in recursion, we store configs in a shared_ptr - typedef boost::shared_ptr shared_config; + typedef boost::shared_ptr shared_values; typedef boost::shared_ptr shared_graph; typedef boost::shared_ptr shared_solver; typedef const S solver; @@ -99,9 +99,9 @@ namespace gtsam { // These normally do not change const shared_graph graph_; - // keep a configuration and its error + // keep a values structure and its error // These typically change once per iteration (in a functional way) - const shared_config config_; + const shared_values config_; double error_; // TODO FD: no more const because in constructor I need to set it after checking :-( // keep current lambda for use within LM only @@ -120,13 +120,13 @@ namespace gtsam { /** * Constructor that evaluates new error */ - NonlinearOptimizer(shared_graph graph, shared_config config, shared_solver solver, + NonlinearOptimizer(shared_graph graph, shared_values config, shared_solver solver, const double lambda = 1e-5); /** * Constructor that does not do any computation */ - NonlinearOptimizer(shared_graph graph, shared_config config, shared_solver solver, + NonlinearOptimizer(shared_graph graph, shared_values config, shared_solver solver, const double error, const double lambda): graph_(graph), config_(config), error_(error), lambda_(lambda), solver_(solver) {} @@ -154,15 +154,15 @@ namespace gtsam { /** * Return the config */ - shared_config config() const{ + shared_values config() const{ return config_; } /** * linearize and optimize - * This returns an VectorConfig, i.e., vectors in tangent space of T + * This returns an VectorValues, i.e., vectors in tangent space of T */ - VectorConfig linearizeAndOptimizeForDelta() const; + VectorValues linearizeAndOptimizeForDelta() const; /** * Do one Gauss-Newton iteration and return next state @@ -213,9 +213,9 @@ namespace gtsam { * @param graph Nonlinear factor graph to optimize * @param config Initial config * @param verbosity Integer specifying how much output to provide - * @return an optimized configuration + * @return an optimized values structure */ - static shared_config optimizeLM(shared_graph graph, shared_config config, + static shared_values optimizeLM(shared_graph graph, shared_values config, verbosityLevel verbosity = SILENT) { // Use a variable ordering from COLAMD @@ -238,7 +238,7 @@ namespace gtsam { /** * Static interface to LM optimization (no shared_ptr arguments) - see above */ - inline static shared_config optimizeLM(const G& graph, const T& config, + inline static shared_values optimizeLM(const G& graph, const T& config, verbosityLevel verbosity = SILENT) { return optimizeLM(boost::make_shared(graph), boost::make_shared(config), verbosity); @@ -249,9 +249,9 @@ namespace gtsam { * @param graph Nonlinear factor graph to optimize * @param config Initial config * @param verbosity Integer specifying how much output to provide - * @return an optimized configuration + * @return an optimized values structure */ - static shared_config optimizeGN(shared_graph graph, shared_config config, + static shared_values optimizeGN(shared_graph graph, shared_values config, verbosityLevel verbosity = SILENT) { Ordering::shared_ptr ordering; GaussianVariableIndex<>::shared_ptr variableIndex; @@ -271,7 +271,7 @@ namespace gtsam { /** * Static interface to GN optimization (no shared_ptr arguments) - see above */ - inline static shared_config optimizeGN(const G& graph, const T& config, + inline static shared_values optimizeGN(const G& graph, const T& config, verbosityLevel verbosity = SILENT) { return optimizeGN(boost::make_shared(graph), boost::make_shared(config), verbosity); diff --git a/nonlinear/TupleValues-inl.h b/nonlinear/TupleValues-inl.h index 8eea3d2e7..884c5ab1e 100644 --- a/nonlinear/TupleValues-inl.h +++ b/nonlinear/TupleValues-inl.h @@ -11,23 +11,23 @@ #include // TupleValues instantiations for N = 1-6 -#define INSTANTIATE_TUPLE_CONFIG1(Config1) \ - template class TupleValues1; +#define INSTANTIATE_TUPLE_CONFIG1(Values1) \ + template class TupleValues1; -#define INSTANTIATE_TUPLE_CONFIG2(Config1, Config2) \ - template class TupleValues2; +#define INSTANTIATE_TUPLE_CONFIG2(Values1, Values2) \ + template class TupleValues2; -#define INSTANTIATE_TUPLE_CONFIG3(Config1, Config2, Config3) \ - template class TupleValues3; +#define INSTANTIATE_TUPLE_CONFIG3(Values1, Values2, Values3) \ + template class TupleValues3; -#define INSTANTIATE_TUPLE_CONFIG4(Config1, Config2, Config3, Config4) \ - template class TupleValues4; +#define INSTANTIATE_TUPLE_CONFIG4(Values1, Values2, Values3, Values4) \ + template class TupleValues4; -#define INSTANTIATE_TUPLE_CONFIG5(Config1, Config2, Config3, Config4, Config5) \ - template class TupleValues5; +#define INSTANTIATE_TUPLE_CONFIG5(Values1, Values2, Values3, Values4, Values5) \ + template class TupleValues5; -#define INSTANTIATE_TUPLE_CONFIG6(Config1, Config2, Config3, Config4, Config5, Config6) \ - template class TupleValues6; +#define INSTANTIATE_TUPLE_CONFIG6(Values1, Values2, Values3, Values4, Values5, Values6) \ + template class TupleValues6; namespace gtsam { @@ -38,140 +38,140 @@ namespace gtsam { /* ************************************************************************* */ /** TupleValues 1 */ -template -TupleValues1::TupleValues1(const TupleValues1& config) : - TupleValuesEnd (config) {} +template +TupleValues1::TupleValues1(const TupleValues1& config) : + TupleValuesEnd (config) {} -template -TupleValues1::TupleValues1(const Config1& cfg1) : - TupleValuesEnd (cfg1) {} +template +TupleValues1::TupleValues1(const Values1& cfg1) : + TupleValuesEnd (cfg1) {} -template -TupleValues1::TupleValues1(const TupleValuesEnd& config) : - TupleValuesEnd(config) {} +template +TupleValues1::TupleValues1(const TupleValuesEnd& config) : + TupleValuesEnd(config) {} /* ************************************************************************* */ /** TupleValues 2 */ -template -TupleValues2::TupleValues2(const TupleValues2& config) : - TupleValues >(config) {} +template +TupleValues2::TupleValues2(const TupleValues2& config) : + TupleValues >(config) {} -template -TupleValues2::TupleValues2(const Config1& cfg1, const Config2& cfg2) : - TupleValues >( - cfg1, TupleValuesEnd(cfg2)) {} +template +TupleValues2::TupleValues2(const Values1& cfg1, const Values2& cfg2) : + TupleValues >( + cfg1, TupleValuesEnd(cfg2)) {} -template -TupleValues2::TupleValues2(const TupleValues >& config) : - TupleValues >(config) {} +template +TupleValues2::TupleValues2(const TupleValues >& config) : + TupleValues >(config) {} /* ************************************************************************* */ /** TupleValues 3 */ -template -TupleValues3::TupleValues3( - const TupleValues3& config) : - TupleValues > >(config) {} +template +TupleValues3::TupleValues3( + const TupleValues3& config) : + TupleValues > >(config) {} -template -TupleValues3::TupleValues3( - const Config1& cfg1, const Config2& cfg2, const Config3& cfg3) : - TupleValues > >( - cfg1, TupleValues >( - cfg2, TupleValuesEnd(cfg3))) {} +template +TupleValues3::TupleValues3( + const Values1& cfg1, const Values2& cfg2, const Values3& cfg3) : + TupleValues > >( + cfg1, TupleValues >( + cfg2, TupleValuesEnd(cfg3))) {} -template -TupleValues3::TupleValues3( - const TupleValues > >& config) : - TupleValues > >(config) {} +template +TupleValues3::TupleValues3( + const TupleValues > >& config) : + TupleValues > >(config) {} /* ************************************************************************* */ /** TupleValues 4 */ -template -TupleValues4::TupleValues4( - const TupleValues4& config) : - TupleValues > > >(config) {} +template +TupleValues4::TupleValues4( + const TupleValues4& config) : + TupleValues > > >(config) {} -template -TupleValues4::TupleValues4( - const Config1& cfg1, const Config2& cfg2, - const Config3& cfg3,const Config4& cfg4) : - TupleValues > > >( - cfg1, TupleValues > >( - cfg2, TupleValues >( - cfg3, TupleValuesEnd(cfg4)))) {} +template +TupleValues4::TupleValues4( + const Values1& cfg1, const Values2& cfg2, + const Values3& cfg3,const Values4& cfg4) : + TupleValues > > >( + cfg1, TupleValues > >( + cfg2, TupleValues >( + cfg3, TupleValuesEnd(cfg4)))) {} -template -TupleValues4::TupleValues4( - const TupleValues > > >& config) : - TupleValues > > >(config) {} +template +TupleValues4::TupleValues4( + const TupleValues > > >& config) : + TupleValues > > >(config) {} /* ************************************************************************* */ /** TupleValues 5 */ -template -TupleValues5::TupleValues5( - const TupleValues5& config) : - TupleValues > > > >(config) {} +template +TupleValues5::TupleValues5( + const TupleValues5& config) : + TupleValues > > > >(config) {} -template -TupleValues5::TupleValues5( - const Config1& cfg1, const Config2& cfg2, const Config3& cfg3, - const Config4& cfg4, const Config5& cfg5) : - TupleValues > > > >( - cfg1, TupleValues > > >( - cfg2, TupleValues > >( - cfg3, TupleValues >( - cfg4, TupleValuesEnd(cfg5))))) {} +template +TupleValues5::TupleValues5( + const Values1& cfg1, const Values2& cfg2, const Values3& cfg3, + const Values4& cfg4, const Values5& cfg5) : + TupleValues > > > >( + cfg1, TupleValues > > >( + cfg2, TupleValues > >( + cfg3, TupleValues >( + cfg4, TupleValuesEnd(cfg5))))) {} -template -TupleValues5::TupleValues5( - const TupleValues > > > >& config) : - TupleValues > > > >(config) {} +template +TupleValues5::TupleValues5( + const TupleValues > > > >& config) : + TupleValues > > > >(config) {} /* ************************************************************************* */ /** TupleValues 6 */ -template -TupleValues6::TupleValues6( - const TupleValues6& config) : - TupleValues > > > > >(config) {} +template +TupleValues6::TupleValues6( + const TupleValues6& config) : + TupleValues > > > > >(config) {} -template -TupleValues6::TupleValues6( - const Config1& cfg1, const Config2& cfg2, const Config3& cfg3, - const Config4& cfg4, const Config5& cfg5, const Config6& cfg6) : - TupleValues > > > > >( - cfg1, TupleValues > > > >( - cfg2, TupleValues > > >( - cfg3, TupleValues > >( - cfg4, TupleValues >( - cfg5, TupleValuesEnd(cfg6)))))) {} +template +TupleValues6::TupleValues6( + const Values1& cfg1, const Values2& cfg2, const Values3& cfg3, + const Values4& cfg4, const Values5& cfg5, const Values6& cfg6) : + TupleValues > > > > >( + cfg1, TupleValues > > > >( + cfg2, TupleValues > > >( + cfg3, TupleValues > >( + cfg4, TupleValues >( + cfg5, TupleValuesEnd(cfg6)))))) {} -template -TupleValues6::TupleValues6( - const TupleValues > > > > >& config) : - TupleValues > > > > >(config) {} +template +TupleValues6::TupleValues6( + const TupleValues > > > > >& config) : + TupleValues > > > > >(config) {} } diff --git a/nonlinear/TupleValues.h b/nonlinear/TupleValues.h index 158f00faf..34250fbe8 100644 --- a/nonlinear/TupleValues.h +++ b/nonlinear/TupleValues.h @@ -6,7 +6,7 @@ */ #include -#include +#include #pragma once @@ -27,7 +27,7 @@ namespace gtsam { * For an easy interface, there are TupleValuesN classes, which wrap * the recursive TupleValuess together as a single class, so you can have * mixed-type classes from 2-6 types. Note that a TupleValues2 is equivalent - * to the previously used PairConfig. + * to the previously used PairValues. * * Design and extension note: * To implement a recursively templated data structure, note that most operations @@ -35,28 +35,28 @@ namespace gtsam { * for the arguments to be passed to the next config, while the specialized one * operates on the "first" config. TupleValuesEnd contains only the specialized version. */ - template - class TupleValues : public Testable > { + template + class TupleValues : public Testable > { protected: // Data for internal configs - Config1 first_; /// Arbitrary config - Config2 second_; /// A TupleValues or TupleValuesEnd, which wraps an arbitrary config + Values1 first_; /// Arbitrary config + Values2 second_; /// A TupleValues or TupleValuesEnd, which wraps an arbitrary config public: // typedefs for config subtypes - typedef class Config1::Key Key1; - typedef class Config1::Value Value1; + typedef class Values1::Key Key1; + typedef class Values1::Value Value1; /** default constructor */ TupleValues() {} /** Copy constructor */ - TupleValues(const TupleValues& config) : + TupleValues(const TupleValues& config) : first_(config.first_), second_(config.second_) {} /** Construct from configs */ - TupleValues(const Config1& cfg1, const Config2& cfg2) : + TupleValues(const Values1& cfg1, const Values2& cfg2) : first_(cfg1), second_(cfg2) {} /** Print */ @@ -66,7 +66,7 @@ namespace gtsam { } /** Equality with tolerance for keys and values */ - bool equals(const TupleValues& c, double tol=1e-9) const { + bool equals(const TupleValues& c, double tol=1e-9) const { return first_.equals(c.first_, tol) && second_.equals(c.second_, tol); } @@ -90,7 +90,7 @@ namespace gtsam { */ template void insert(const TupleValues& config) { second_.insert(config); } - void insert(const TupleValues& config) { + void insert(const TupleValues& config) { first_.insert(config.first_); second_.insert(config.second_); } @@ -101,7 +101,7 @@ namespace gtsam { */ template void update(const TupleValues& config) { second_.update(config); } - void update(const TupleValues& config) { + void update(const TupleValues& config) { first_.update(config.first_); second_.update(config.second_); } @@ -121,7 +121,7 @@ namespace gtsam { */ template void insertSub(const Cfg& config) { second_.insertSub(config); } - void insertSub(const Config1& config) { first_.insert(config); } + void insertSub(const Values1& config) { first_.insert(config); } /** erase an element by key */ template @@ -152,12 +152,12 @@ namespace gtsam { const Value1& at(const Key1& j) const { return first_.at(j); } /** direct config access */ - const Config1& config() const { return first_; } - const Config2& rest() const { return second_; } + const Values1& config() const { return first_; } + const Values2& rest() const { return second_; } - /** zero: create VectorConfig of appropriate structure */ - VectorConfig zero(const Ordering& ordering) const { - VectorConfig z(this->dims(ordering)); + /** zero: create VectorValues of appropriate structure */ + VectorValues zero(const Ordering& ordering) const { + VectorValues z(this->dims(ordering)); z.makeZero(); return z; } @@ -173,7 +173,7 @@ namespace gtsam { /** Create an array of variable dimensions using the given ordering */ std::vector dims(const Ordering& ordering) const { - _ConfigDimensionCollector dimCollector(ordering); + _ValuesDimensionCollector dimCollector(ordering); this->apply(dimCollector); return dimCollector.dimensions; } @@ -186,25 +186,25 @@ namespace gtsam { */ Ordering::shared_ptr orderingArbitrary(varid_t firstVar = 0) const { // Generate an initial key ordering in iterator order - _ConfigKeyOrderer keyOrderer(firstVar); + _ValuesKeyOrderer keyOrderer(firstVar); this->apply(keyOrderer); return keyOrderer.ordering; } /** Expmap */ - TupleValues expmap(const VectorConfig& delta, const Ordering& ordering) const { + TupleValues expmap(const VectorValues& delta, const Ordering& ordering) const { return TupleValues(first_.expmap(delta, ordering), second_.expmap(delta, ordering)); } /** logmap each element */ - VectorConfig logmap(const TupleValues& cp, const Ordering& ordering) const { - VectorConfig delta(this->dims(ordering)); + VectorValues logmap(const TupleValues& cp, const Ordering& ordering) const { + VectorValues delta(this->dims(ordering)); logmap(cp, ordering, delta); return delta; } /** logmap each element */ - void logmap(const TupleValues& cp, const Ordering& ordering, VectorConfig& delta) const { + void logmap(const TupleValues& cp, const Ordering& ordering, VectorValues& delta) const { first_.logmap(cp.first_, ordering, delta); second_.logmap(cp.second_, ordering, delta); } @@ -212,7 +212,7 @@ namespace gtsam { /** * Apply a class with an application operator() to a const_iterator over * every pair. The operator must be able to handle such an - * iterator for every type in the Config, (i.e. through templating). + * iterator for every type in the Values, (i.e. through templating). */ template void apply(A& operation) { @@ -242,48 +242,48 @@ namespace gtsam { * Do not use this class directly - it should only be used as a part * of a recursive structure */ - template - class TupleValuesEnd : public Testable > { + template + class TupleValuesEnd : public Testable > { protected: // Data for internal configs - Config first_; + Values first_; public: // typedefs - typedef class Config::Key Key1; - typedef class Config::Value Value1; + typedef class Values::Key Key1; + typedef class Values::Value Value1; TupleValuesEnd() {} - TupleValuesEnd(const TupleValuesEnd& config) : + TupleValuesEnd(const TupleValuesEnd& config) : first_(config.first_) {} - TupleValuesEnd(const Config& cfg) : + TupleValuesEnd(const Values& cfg) : first_(cfg) {} void print(const std::string& s = "") const { first_.print(); } - bool equals(const TupleValuesEnd& c, double tol=1e-9) const { + bool equals(const TupleValuesEnd& c, double tol=1e-9) const { return first_.equals(c.first_, tol); } void insert(const Key1& key, const Value1& value) {first_.insert(key, value); } void insert(int key, const Value1& value) {first_.insert(Key1(key), value);} - void insert(const TupleValuesEnd& config) {first_.insert(config.first_); } + void insert(const TupleValuesEnd& config) {first_.insert(config.first_); } - void update(const TupleValuesEnd& config) {first_.update(config.first_); } + void update(const TupleValuesEnd& config) {first_.update(config.first_); } void update(const Key1& key, const Value1& value) { first_.update(key, value); } - void insertSub(const Config& config) {first_.insert(config); } + void insertSub(const Values& config) {first_.insert(config); } const Value1& operator[](const Key1& j) const { return first_[j]; } - const Config& config() const { return first_; } + const Values& config() const { return first_; } void erase(const Key1& j) { first_.erase(j); } @@ -297,8 +297,8 @@ namespace gtsam { const Value1& at(const Key1& j) const { return first_.at(j); } - VectorConfig zero(const Ordering& ordering) const { - VectorConfig z(this->dims(ordering)); + VectorValues zero(const Ordering& ordering) const { + VectorValues z(this->dims(ordering)); z.makeZero(); return z; } @@ -307,24 +307,24 @@ namespace gtsam { size_t dim() const { return first_.dim(); } - TupleValuesEnd expmap(const VectorConfig& delta, const Ordering& ordering) const { + TupleValuesEnd expmap(const VectorValues& delta, const Ordering& ordering) const { return TupleValuesEnd(first_.expmap(delta, ordering)); } - VectorConfig logmap(const TupleValuesEnd& cp, const Ordering& ordering) const { - VectorConfig delta(this->dims(ordering)); + VectorValues logmap(const TupleValuesEnd& cp, const Ordering& ordering) const { + VectorValues delta(this->dims(ordering)); logmap(cp, ordering, delta); return delta; } - void logmap(const TupleValuesEnd& cp, const Ordering& ordering, VectorConfig& delta) const { + void logmap(const TupleValuesEnd& cp, const Ordering& ordering, VectorValues& delta) const { first_.logmap(cp.first_, ordering, delta); } /** * Apply a class with an application operator() to a const_iterator over * every pair. The operator must be able to handle such an - * iterator for every type in the Config, (i.e. through templating). + * iterator for every type in the Values, (i.e. through templating). */ template void apply(A& operation) { @@ -348,14 +348,14 @@ namespace gtsam { * recursively, as they are TupleValuess, and are primarily a short form of the config * structure to make use of the TupleValuess easier. * - * The interface is designed to mimic PairConfig, but for 2-6 config types. + * The interface is designed to mimic PairValues, but for 2-6 config types. */ template class TupleValues1 : public TupleValuesEnd { public: // typedefs - typedef C1 Config1; + typedef C1 Values1; typedef TupleValuesEnd Base; typedef TupleValues1 This; @@ -363,18 +363,18 @@ namespace gtsam { TupleValues1() {} TupleValues1(const This& config); TupleValues1(const Base& config); - TupleValues1(const Config1& cfg1); + TupleValues1(const Values1& cfg1); // access functions - inline const Config1& first() const { return this->config(); } + inline const Values1& first() const { return this->config(); } }; template class TupleValues2 : public TupleValues > { public: // typedefs - typedef C1 Config1; - typedef C2 Config2; + typedef C1 Values1; + typedef C2 Values2; typedef TupleValues > Base; typedef TupleValues2 This; @@ -382,40 +382,40 @@ namespace gtsam { TupleValues2() {} TupleValues2(const This& config); TupleValues2(const Base& config); - TupleValues2(const Config1& cfg1, const Config2& cfg2); + TupleValues2(const Values1& cfg1, const Values2& cfg2); // access functions - inline const Config1& first() const { return this->config(); } - inline const Config2& second() const { return this->rest().config(); } + inline const Values1& first() const { return this->config(); } + inline const Values2& second() const { return this->rest().config(); } }; template class TupleValues3 : public TupleValues > > { public: // typedefs - typedef C1 Config1; - typedef C2 Config2; - typedef C3 Config3; + typedef C1 Values1; + typedef C2 Values2; + typedef C3 Values3; TupleValues3() {} TupleValues3(const TupleValues > >& config); TupleValues3(const TupleValues3& config); - TupleValues3(const Config1& cfg1, const Config2& cfg2, const Config3& cfg3); + TupleValues3(const Values1& cfg1, const Values2& cfg2, const Values3& cfg3); // access functions - inline const Config1& first() const { return this->config(); } - inline const Config2& second() const { return this->rest().config(); } - inline const Config3& third() const { return this->rest().rest().config(); } + inline const Values1& first() const { return this->config(); } + inline const Values2& second() const { return this->rest().config(); } + inline const Values3& third() const { return this->rest().rest().config(); } }; template class TupleValues4 : public TupleValues > > > { public: // typedefs - typedef C1 Config1; - typedef C2 Config2; - typedef C3 Config3; - typedef C4 Config4; + typedef C1 Values1; + typedef C2 Values2; + typedef C3 Values3; + typedef C4 Values4; typedef TupleValues > > > Base; typedef TupleValues4 This; @@ -423,62 +423,62 @@ namespace gtsam { TupleValues4() {} TupleValues4(const This& config); TupleValues4(const Base& config); - TupleValues4(const Config1& cfg1, const Config2& cfg2, const Config3& cfg3,const Config4& cfg4); + TupleValues4(const Values1& cfg1, const Values2& cfg2, const Values3& cfg3,const Values4& cfg4); // access functions - inline const Config1& first() const { return this->config(); } - inline const Config2& second() const { return this->rest().config(); } - inline const Config3& third() const { return this->rest().rest().config(); } - inline const Config4& fourth() const { return this->rest().rest().rest().config(); } + inline const Values1& first() const { return this->config(); } + inline const Values2& second() const { return this->rest().config(); } + inline const Values3& third() const { return this->rest().rest().config(); } + inline const Values4& fourth() const { return this->rest().rest().rest().config(); } }; template class TupleValues5 : public TupleValues > > > > { public: // typedefs - typedef C1 Config1; - typedef C2 Config2; - typedef C3 Config3; - typedef C4 Config4; - typedef C5 Config5; + typedef C1 Values1; + typedef C2 Values2; + typedef C3 Values3; + typedef C4 Values4; + typedef C5 Values5; TupleValues5() {} TupleValues5(const TupleValues5& config); TupleValues5(const TupleValues > > > >& config); - TupleValues5(const Config1& cfg1, const Config2& cfg2, const Config3& cfg3, - const Config4& cfg4, const Config5& cfg5); + TupleValues5(const Values1& cfg1, const Values2& cfg2, const Values3& cfg3, + const Values4& cfg4, const Values5& cfg5); // access functions - inline const Config1& first() const { return this->config(); } - inline const Config2& second() const { return this->rest().config(); } - inline const Config3& third() const { return this->rest().rest().config(); } - inline const Config4& fourth() const { return this->rest().rest().rest().config(); } - inline const Config5& fifth() const { return this->rest().rest().rest().rest().config(); } + inline const Values1& first() const { return this->config(); } + inline const Values2& second() const { return this->rest().config(); } + inline const Values3& third() const { return this->rest().rest().config(); } + inline const Values4& fourth() const { return this->rest().rest().rest().config(); } + inline const Values5& fifth() const { return this->rest().rest().rest().rest().config(); } }; template class TupleValues6 : public TupleValues > > > > > { public: // typedefs - typedef C1 Config1; - typedef C2 Config2; - typedef C3 Config3; - typedef C4 Config4; - typedef C5 Config5; - typedef C6 Config6; + typedef C1 Values1; + typedef C2 Values2; + typedef C3 Values3; + typedef C4 Values4; + typedef C5 Values5; + typedef C6 Values6; TupleValues6() {} TupleValues6(const TupleValues6& config); TupleValues6(const TupleValues > > > > >& config); - TupleValues6(const Config1& cfg1, const Config2& cfg2, const Config3& cfg3, - const Config4& cfg4, const Config5& cfg5, const Config6& cfg6); + TupleValues6(const Values1& cfg1, const Values2& cfg2, const Values3& cfg3, + const Values4& cfg4, const Values5& cfg5, const Values6& cfg6); // access functions - inline const Config1& first() const { return this->config(); } - inline const Config2& second() const { return this->rest().config(); } - inline const Config3& third() const { return this->rest().rest().config(); } - inline const Config4& fourth() const { return this->rest().rest().rest().config(); } - inline const Config5& fifth() const { return this->rest().rest().rest().rest().config(); } - inline const Config6& sixth() const { return this->rest().rest().rest().rest().rest().config(); } + inline const Values1& first() const { return this->config(); } + inline const Values2& second() const { return this->rest().config(); } + inline const Values3& third() const { return this->rest().rest().config(); } + inline const Values4& fourth() const { return this->rest().rest().rest().config(); } + inline const Values5& fifth() const { return this->rest().rest().rest().rest().config(); } + inline const Values6& sixth() const { return this->rest().rest().rest().rest().rest().config(); } }; } diff --git a/nonlinear/tests/testConstraintOptimizer.cpp b/nonlinear/tests/testConstraintOptimizer.cpp index a8a2b98d8..85ec3bac8 100644 --- a/nonlinear/tests/testConstraintOptimizer.cpp +++ b/nonlinear/tests/testConstraintOptimizer.cpp @@ -43,7 +43,7 @@ TEST (NonlinearConstraint, problem1_cholesky ) { Symbol x1("x1"), y1("y1"), L1("L1"); // state structure: [x y \lambda] - VectorConfig init, state; + VectorValues init, state; init.insert(x1, Vector_(1, 1.0)); init.insert(y1, Vector_(1, 1.0)); init.insert(L1, Vector_(1, 1.0)); @@ -106,18 +106,18 @@ TEST (NonlinearConstraint, problem1_cholesky ) { // solve Ordering ord; ord += x1, y1, L1; - VectorConfig delta = fg.optimize(ord).scale(-1.0); + VectorValues delta = fg.optimize(ord).scale(-1.0); if (verbose) delta.print("Delta"); // update initial estimate - VectorConfig newState = expmap(state, delta); + VectorValues newState = expmap(state, delta); state = newState; if (verbose) state.print("Updated State"); } // verify that it converges to the nearest optimal point - VectorConfig expected; + VectorValues expected; expected.insert(L1, Vector_(1, -1.0)); expected.insert(x1, Vector_(1, 2.12)); expected.insert(y1, Vector_(1, -0.5)); diff --git a/nonlinear/tests/testLieValues.cpp b/nonlinear/tests/testLieValues.cpp index aa68c8813..d83a2d631 100644 --- a/nonlinear/tests/testLieValues.cpp +++ b/nonlinear/tests/testLieValues.cpp @@ -17,17 +17,17 @@ using namespace std; static double inf = std::numeric_limits::infinity(); typedef TypedSymbol VecKey; -typedef LieValues Config; +typedef LieValues Values; VecKey key1(1), key2(2), key3(3), key4(4); /* ************************************************************************* */ TEST( LieValues, equals1 ) { - Config expected; + Values expected; Vector v = Vector_(3, 5.0, 6.0, 7.0); expected.insert(key1,v); - Config actual; + Values actual; actual.insert(key1,v); CHECK(assert_equal(expected,actual)); } @@ -35,7 +35,7 @@ TEST( LieValues, equals1 ) /* ************************************************************************* */ TEST( LieValues, equals2 ) { - Config cfg1, cfg2; + Values cfg1, cfg2; Vector v1 = Vector_(3, 5.0, 6.0, 7.0); Vector v2 = Vector_(3, 5.0, 6.0, 8.0); cfg1.insert(key1, v1); @@ -47,7 +47,7 @@ TEST( LieValues, equals2 ) /* ************************************************************************* */ TEST( LieValues, equals_nan ) { - Config cfg1, cfg2; + Values cfg1, cfg2; Vector v1 = Vector_(3, 5.0, 6.0, 7.0); Vector v2 = Vector_(3, inf, inf, inf); cfg1.insert(key1, v1); @@ -59,7 +59,7 @@ TEST( LieValues, equals_nan ) /* ************************************************************************* */ TEST( LieValues, insert_config ) { - Config cfg1, cfg2, expected; + Values cfg1, cfg2, expected; Vector v1 = Vector_(3, 5.0, 6.0, 7.0); Vector v2 = Vector_(3, 8.0, 9.0, 1.0); Vector v3 = Vector_(3, 2.0, 4.0, 3.0); @@ -82,7 +82,7 @@ TEST( LieValues, insert_config ) /* ************************************************************************* */ TEST( LieValues, update_element ) { - Config cfg; + Values cfg; Vector v1 = Vector_(3, 5.0, 6.0, 7.0); Vector v2 = Vector_(3, 8.0, 9.0, 1.0); @@ -98,12 +98,12 @@ TEST( LieValues, update_element ) ///* ************************************************************************* */ //TEST(LieValues, dim_zero) //{ -// Config config0; +// Values config0; // config0.insert(key1, Vector_(2, 2.0, 3.0)); // config0.insert(key2, Vector_(3, 5.0, 6.0, 7.0)); // LONGS_EQUAL(5,config0.dim()); // -// VectorConfig expected; +// VectorValues expected; // expected.insert(key1, zero(2)); // expected.insert(key2, zero(3)); // CHECK(assert_equal(expected, config0.zero())); @@ -112,16 +112,16 @@ TEST( LieValues, update_element ) /* ************************************************************************* */ TEST(LieValues, expmap_a) { - Config config0; + Values config0; config0.insert(key1, Vector_(3, 1.0, 2.0, 3.0)); config0.insert(key2, Vector_(3, 5.0, 6.0, 7.0)); Ordering ordering(*config0.orderingArbitrary()); - VectorConfig increment(config0.dims(ordering)); + VectorValues increment(config0.dims(ordering)); increment[ordering[key1]] = Vector_(3, 1.0, 1.1, 1.2); increment[ordering[key2]] = Vector_(3, 1.3, 1.4, 1.5); - Config expected; + Values expected; expected.insert(key1, Vector_(3, 2.0, 3.1, 4.2)); expected.insert(key2, Vector_(3, 6.3, 7.4, 8.5)); @@ -131,15 +131,15 @@ TEST(LieValues, expmap_a) ///* ************************************************************************* */ //TEST(LieValues, expmap_b) //{ -// Config config0; +// Values config0; // config0.insert(key1, Vector_(3, 1.0, 2.0, 3.0)); // config0.insert(key2, Vector_(3, 5.0, 6.0, 7.0)); // // Ordering ordering(*config0.orderingArbitrary()); -// VectorConfig increment(config0.dims(ordering)); +// VectorValues increment(config0.dims(ordering)); // increment[ordering[key2]] = Vector_(3, 1.3, 1.4, 1.5); // -// Config expected; +// Values expected; // expected.insert(key1, Vector_(3, 1.0, 2.0, 3.0)); // expected.insert(key2, Vector_(3, 6.3, 7.4, 8.5)); // @@ -149,7 +149,7 @@ TEST(LieValues, expmap_a) ///* ************************************************************************* */ //TEST(LieValues, expmap_c) //{ -// Config config0; +// Values config0; // config0.insert(key1, Vector_(3, 1.0, 2.0, 3.0)); // config0.insert(key2, Vector_(3, 5.0, 6.0, 7.0)); // @@ -157,7 +157,7 @@ TEST(LieValues, expmap_a) // 1.0, 1.1, 1.2, // 1.3, 1.4, 1.5); // -// Config expected; +// Values expected; // expected.insert(key1, Vector_(3, 2.0, 3.1, 4.2)); // expected.insert(key2, Vector_(3, 6.3, 7.4, 8.5)); // @@ -167,7 +167,7 @@ TEST(LieValues, expmap_a) /* ************************************************************************* */ /*TEST(LieValues, expmap_d) { - Config config0; + Values config0; config0.insert(key1, Vector_(3, 1.0, 2.0, 3.0)); config0.insert(key2, Vector_(3, 5.0, 6.0, 7.0)); //config0.print("config0"); @@ -207,7 +207,7 @@ TEST(LieValues, expmap_a) /* ************************************************************************* */ TEST(LieValues, exists_) { - Config config0; + Values config0; config0.insert(key1, Vector_(1, 1.)); config0.insert(key2, Vector_(1, 2.)); @@ -218,17 +218,17 @@ TEST(LieValues, exists_) /* ************************************************************************* */ TEST(LieValues, update) { - Config config0; + Values config0; config0.insert(key1, Vector_(1, 1.)); config0.insert(key2, Vector_(1, 2.)); - Config superset; + Values superset; superset.insert(key1, Vector_(1, -1.)); superset.insert(key2, Vector_(1, -2.)); superset.insert(key3, Vector_(1, -3.)); config0.update(superset); - Config expected; + Values expected; expected.insert(key1, Vector_(1, -1.)); expected.insert(key2, Vector_(1, -2.)); CHECK(assert_equal(expected,config0)); @@ -238,18 +238,18 @@ TEST(LieValues, update) TEST(LieValues, dummy_initialization) { typedef TypedSymbol Key; - typedef LieValues Config1; + typedef LieValues Values1; - Config1 init1; + Values1 init1; init1.insert(Key(1), Vector_(2, 1.0, 2.0)); init1.insert(Key(2), Vector_(2, 4.0, 3.0)); - Config init2; + Values init2; init2.insert(key1, Vector_(2, 1.0, 2.0)); init2.insert(key2, Vector_(2, 4.0, 3.0)); - Config1 actual1(init2); - Config actual2(init1); + Values1 actual1(init2); + Values actual2(init1); EXPECT(actual1.empty()); EXPECT(actual2.empty()); diff --git a/slam/BearingFactor.h b/slam/BearingFactor.h index 838789f89..20e37a38f 100644 --- a/slam/BearingFactor.h +++ b/slam/BearingFactor.h @@ -15,13 +15,13 @@ namespace gtsam { /** * Binary factor for a bearing measurement */ - template - class BearingFactor: public NonlinearFactor2 { + template + class BearingFactor: public NonlinearFactor2 { private: Rot2 z_; /** measurement */ - typedef NonlinearFactor2 Base; + typedef NonlinearFactor2 Base; public: diff --git a/slam/BearingRangeFactor.h b/slam/BearingRangeFactor.h index 64842ba93..fdf2c4f38 100644 --- a/slam/BearingRangeFactor.h +++ b/slam/BearingRangeFactor.h @@ -16,15 +16,15 @@ namespace gtsam { /** * Binary factor for a bearing measurement */ - template - class BearingRangeFactor: public NonlinearFactor2 { + template + class BearingRangeFactor: public NonlinearFactor2 { private: // the measurement Rot2 bearing_; double range_; - typedef NonlinearFactor2 Base; + typedef NonlinearFactor2 Base; public: diff --git a/slam/BetweenConstraint.h b/slam/BetweenConstraint.h index 540d7d622..9dba9240b 100644 --- a/slam/BetweenConstraint.h +++ b/slam/BetweenConstraint.h @@ -14,19 +14,19 @@ namespace gtsam { * Binary between constraint - forces between to a given value * This constraint requires the underlying type to a Lie type */ - template - class BetweenConstraint : public NonlinearEqualityConstraint2 { + template + class BetweenConstraint : public NonlinearEqualityConstraint2 { public: typedef typename Key::Value_t X; protected: - typedef NonlinearEqualityConstraint2 Base; + typedef NonlinearEqualityConstraint2 Base; X measured_; /// fixed between value public: - typedef boost::shared_ptr > shared_ptr; + typedef boost::shared_ptr > shared_ptr; BetweenConstraint(const X& measured, const Key& key1, const Key& key2, double mu = 1000.0) : Base(key1, key2, X::Dim(), mu), measured_(measured) {} diff --git a/slam/BetweenFactor.h b/slam/BetweenFactor.h index 03159a600..df5c99d41 100644 --- a/slam/BetweenFactor.h +++ b/slam/BetweenFactor.h @@ -15,19 +15,19 @@ namespace gtsam { /** * A class for a measurement predicted by "between(config[key1],config[key2])" - * T is the Lie group type, Config where the T's are gotten from + * T is the Lie group type, Values where the T's are gotten from * * FIXME: This should only need one key type, as we can't have different types */ - template - class BetweenFactor: public NonlinearFactor2 { + template + class BetweenFactor: public NonlinearFactor2 { public: typedef typename Key1::Value_t T; private: - typedef NonlinearFactor2 Base; + typedef NonlinearFactor2 Base; T measured_; /** The measurement */ @@ -51,9 +51,9 @@ namespace gtsam { } /** equals */ - bool equals(const NonlinearFactor& expected, double tol) const { - const BetweenFactor *e = - dynamic_cast*> (&expected); + bool equals(const NonlinearFactor& expected, double tol) const { + const BetweenFactor *e = + dynamic_cast*> (&expected); return e != NULL && Base::equals(expected, tol) && this->measured_.equals( e->measured_, tol); } diff --git a/slam/GaussianISAM2.cpp b/slam/GaussianISAM2.cpp index f60de707a..544cfd399 100644 --- a/slam/GaussianISAM2.cpp +++ b/slam/GaussianISAM2.cpp @@ -12,15 +12,15 @@ using namespace gtsam; // Explicitly instantiate so we don't have to include everywhere #include -template class ISAM2; -template class ISAM2; +template class ISAM2; +template class ISAM2; namespace gtsam { /* ************************************************************************* */ void optimize2(const GaussianISAM2::sharedClique& clique, double threshold, - vector& changed, const vector& replaced, Permuted& delta, int& count) { + vector& changed, const vector& replaced, Permuted& delta, int& count) { // if none of the variables in this clique (frontal and separator!) changed // significantly, then by the running intersection property, none of the // cliques in the children need to be processed @@ -60,7 +60,7 @@ void optimize2(const GaussianISAM2::sharedClique& clique, double threshold, if (!redo) { // change is measured against the previous delta! // if (delta.contains(cg->key())) { - const VectorConfig::mapped_type d_old(delta[cg->key()]); + const VectorValues::mapped_type d_old(delta[cg->key()]); assert(d_old.size() == d.size()); for(size_t i=0; i= threshold) { @@ -97,7 +97,7 @@ void optimize2(const GaussianISAM2::sharedClique& clique, double threshold, /* ************************************************************************* */ // fast full version without threshold -void optimize2(const GaussianISAM2::sharedClique& clique, VectorConfig& delta) { +void optimize2(const GaussianISAM2::sharedClique& clique, VectorValues& delta) { // parents are assumed to already be solved and available in result GaussianISAM2::Clique::const_reverse_iterator it; for (it = clique->rbegin(); it!=clique->rend(); it++) { @@ -112,8 +112,8 @@ void optimize2(const GaussianISAM2::sharedClique& clique, VectorConfig& delta) { } ///* ************************************************************************* */ -//boost::shared_ptr optimize2(const GaussianISAM2::sharedClique& root) { -// boost::shared_ptr delta(new VectorConfig()); +//boost::shared_ptr optimize2(const GaussianISAM2::sharedClique& root) { +// boost::shared_ptr delta(new VectorValues()); // set changed; // // starting from the root, call optimize on each conditional // optimize2(root, delta); @@ -121,7 +121,7 @@ void optimize2(const GaussianISAM2::sharedClique& clique, VectorConfig& delta) { //} /* ************************************************************************* */ -int optimize2(const GaussianISAM2::sharedClique& root, double threshold, const vector& keys, Permuted& delta) { +int optimize2(const GaussianISAM2::sharedClique& root, double threshold, const vector& keys, Permuted& delta) { vector changed(keys.size(), false); int count = 0; // starting from the root, call optimize on each conditional diff --git a/slam/GaussianISAM2.h b/slam/GaussianISAM2.h index f455cf135..a2a5bc710 100644 --- a/slam/GaussianISAM2.h +++ b/slam/GaussianISAM2.h @@ -16,11 +16,11 @@ namespace gtsam { - typedef ISAM2 GaussianISAM2; - typedef ISAM2 GaussianISAM2_P; + typedef ISAM2 GaussianISAM2; + typedef ISAM2 GaussianISAM2_P; // optimize the BayesTree, starting from the root - void optimize2(const GaussianISAM2::sharedClique& root, VectorConfig& delta); + void optimize2(const GaussianISAM2::sharedClique& root, VectorValues& delta); // optimize the BayesTree, starting from the root; "replaced" needs to contain // all variables that are contained in the top of the Bayes tree that has been @@ -31,7 +31,7 @@ namespace gtsam { // variables are contained in the subtree. // returns the number of variables that were solved for int optimize2(const GaussianISAM2::sharedClique& root, - double threshold, const std::vector& replaced, Permuted& delta); + double threshold, const std::vector& replaced, Permuted& delta); // calculate the number of non-zero entries for the tree starting at clique (use root for complete matrix) int calculate_nnz(const GaussianISAM2::sharedClique& clique); diff --git a/slam/LinearApproxFactor-inl.h b/slam/LinearApproxFactor-inl.h index 62e22444c..c678db376 100644 --- a/slam/LinearApproxFactor-inl.h +++ b/slam/LinearApproxFactor-inl.h @@ -14,8 +14,8 @@ namespace gtsam { /* ************************************************************************* */ -template -LinearApproxFactor::LinearApproxFactor(GaussianFactor::shared_ptr lin_factor, const Config& lin_points) +template +LinearApproxFactor::LinearApproxFactor(GaussianFactor::shared_ptr lin_factor, const Values& lin_points) : Base(noiseModel::Unit::Create(lin_factor->get_model()->dim())), lin_factor_(lin_factor), lin_points_(lin_points) { @@ -27,10 +27,10 @@ LinearApproxFactor::LinearApproxFactor(GaussianFactor::shared_ptr li } /* ************************************************************************* */ -template -Vector LinearApproxFactor::unwhitenedError(const Config& c) const { +template +Vector LinearApproxFactor::unwhitenedError(const Values& c) const { // extract the points in the new config - VectorConfig delta; + VectorValues delta; BOOST_FOREACH(const Key& key, nonlinearKeys_) { X newPt = c[key], linPt = lin_points_[key]; Vector d = linPt.logmap(newPt); @@ -41,9 +41,9 @@ Vector LinearApproxFactor::unwhitenedError(const Config& c) const { } /* ************************************************************************* */ -template +template boost::shared_ptr -LinearApproxFactor::linearize(const Config& c) const { +LinearApproxFactor::linearize(const Values& c) const { Vector b = lin_factor_->getb(); SharedDiagonal model = lin_factor_->get_model(); std::vector > terms; @@ -56,9 +56,9 @@ LinearApproxFactor::linearize(const Config& c) const { } /* ************************************************************************* */ -template -void LinearApproxFactor::print(const std::string& s) const { - LinearApproxFactor::Base::print(s); +template +void LinearApproxFactor::print(const std::string& s) const { + LinearApproxFactor::Base::print(s); lin_factor_->print(); } diff --git a/slam/LinearApproxFactor.h b/slam/LinearApproxFactor.h index 62418f876..32bdec7b8 100644 --- a/slam/LinearApproxFactor.h +++ b/slam/LinearApproxFactor.h @@ -8,7 +8,7 @@ #include #include -#include +#include #include namespace gtsam { @@ -17,18 +17,18 @@ namespace gtsam { * A dummy factor that takes a linearized factor and inserts it into * a nonlinear graph. This version uses exactly one type of variable. */ -template -class LinearApproxFactor : public NonlinearFactor { +template +class LinearApproxFactor : public NonlinearFactor { public: /** type for the variable */ typedef typename Key::Value_t X; /** base type */ - typedef NonlinearFactor Base; + typedef NonlinearFactor Base; /** shared pointer for convenience */ - typedef boost::shared_ptr > shared_ptr; + typedef boost::shared_ptr > shared_ptr; /** typedefs for key vectors */ typedef std::vector KeyVector; @@ -38,7 +38,7 @@ protected: GaussianFactor::shared_ptr lin_factor_; /** linearization points for error calculation */ - Config lin_points_; + Values lin_points_; /** keep keys for the factor */ KeyVector nonlinearKeys_; @@ -46,18 +46,18 @@ protected: /** * use this for derived classes with keys that don't copy easily */ - LinearApproxFactor(size_t dim, const Config& lin_points) + LinearApproxFactor(size_t dim, const Values& lin_points) : Base(noiseModel::Unit::Create(dim)), lin_points_(lin_points) {} public: /** use this constructor when starting with nonlinear keys */ - LinearApproxFactor(GaussianFactor::shared_ptr lin_factor, const Config& lin_points); + LinearApproxFactor(GaussianFactor::shared_ptr lin_factor, const Values& lin_points); virtual ~LinearApproxFactor() {} /** Vector of errors, unwhitened ! */ - virtual Vector unwhitenedError(const Config& c) const; + virtual Vector unwhitenedError(const Values& c) const; /** * linearize to a GaussianFactor @@ -65,7 +65,7 @@ public: * NOTE: copies to avoid actual factor getting destroyed * during elimination */ - virtual boost::shared_ptr linearize(const Config& c) const; + virtual boost::shared_ptr linearize(const Values& c) const; /** get access to nonlinear keys */ KeyVector nonlinearKeys() const { return nonlinearKeys_; } diff --git a/slam/Makefile.am b/slam/Makefile.am index 1c2607a87..34b96c9f1 100644 --- a/slam/Makefile.am +++ b/slam/Makefile.am @@ -11,14 +11,14 @@ sources = check_PROGRAMS = # simulated2D example -headers += Simulated2DConfig.h +headers += Simulated2DValues.h headers += Simulated2DPosePrior.h Simulated2DPointPrior.h headers += Simulated2DOdometry.h Simulated2DMeasurement.h sources += simulated2D.cpp smallExample.cpp check_PROGRAMS += tests/testSimulated2D # simulated2DOriented example -headers += Simulated2DOrientedConfig.h +headers += Simulated2DOrientedValues.h headers += Simulated2DOrientedPosePrior.h headers += Simulated2DOrientedOdometry.h sources += simulated2DOriented.cpp @@ -40,7 +40,7 @@ headers += LinearApproxFactor.h LinearApproxFactor-inl.h # 2D Pose SLAM sources += pose2SLAM.cpp dataset.cpp #sources += Pose2SLAMOptimizer.cpp -check_PROGRAMS += tests/testPose2Factor tests/testPose2Config tests/testPose2SLAM tests/testPose2Prior +check_PROGRAMS += tests/testPose2Factor tests/testPose2Values tests/testPose2SLAM tests/testPose2Prior # 2D SLAM using Bearing and Range headers += BearingFactor.h RangeFactor.h BearingRangeFactor.h @@ -49,11 +49,11 @@ check_PROGRAMS += tests/testPlanarSLAM # 3D Pose constraints sources += pose3SLAM.cpp -check_PROGRAMS += tests/testPose3Factor tests/testPose3Config tests/testPose3SLAM +check_PROGRAMS += tests/testPose3Factor tests/testPose3Values tests/testPose3SLAM # Visual SLAM sources += visualSLAM.cpp -check_PROGRAMS += tests/testVSLAMFactor tests/testVSLAMGraph tests/testVSLAMConfig +check_PROGRAMS += tests/testVSLAMFactor tests/testVSLAMGraph tests/testVSLAMValues # GaussianISAM2 is fairly SLAM-specific sources += GaussianISAM2.cpp diff --git a/slam/Pose2SLAMOptimizer.cpp b/slam/Pose2SLAMOptimizer.cpp index bad299ab9..706486b18 100644 --- a/slam/Pose2SLAMOptimizer.cpp +++ b/slam/Pose2SLAMOptimizer.cpp @@ -48,7 +48,7 @@ namespace gtsam { /* ************************************************************************* */ void Pose2SLAMOptimizer::update(const Vector& x) { - VectorConfig X = system_->assembleConfig(x, *solver_.ordering()); + VectorValues X = system_->assembleValues(x, *solver_.ordering()); *theta_ = theta_->expmap(X); linearize(); } @@ -61,7 +61,7 @@ namespace gtsam { /* ************************************************************************* */ Vector Pose2SLAMOptimizer::optimize() const { - VectorConfig X = solver_.optimize(*system_); + VectorValues X = solver_.optimize(*system_); std::list vs; BOOST_FOREACH(const Symbol& key, *solver_.ordering()) vs.push_back(X[key]); diff --git a/slam/Pose2SLAMOptimizer.h b/slam/Pose2SLAMOptimizer.h index b2f733085..8968daa4e 100644 --- a/slam/Pose2SLAMOptimizer.h +++ b/slam/Pose2SLAMOptimizer.h @@ -18,7 +18,7 @@ namespace gtsam { /** * Optimizer class for use in MATLAB - * Keeps a Pose2Config estimate + * Keeps a Pose2Values estimate * Returns all relevant matrices so MATLAB can optimize :-) */ class Pose2SLAMOptimizer { @@ -28,10 +28,10 @@ namespace gtsam { boost::shared_ptr graph_; /** Current non-linear estimate */ - boost::shared_ptr theta_; + boost::shared_ptr theta_; /** Non-linear solver */ - typedef SubgraphSolver SPCG_Solver; + typedef SubgraphSolver SPCG_Solver; SPCG_Solver solver_; /** Linear Solver */ @@ -65,7 +65,7 @@ namespace gtsam { /** * return the current linearization point */ - boost::shared_ptr theta() const { + boost::shared_ptr theta() const { return theta_; } diff --git a/slam/PriorFactor.h b/slam/PriorFactor.h index d6014d5de..2fa22b70e 100644 --- a/slam/PriorFactor.h +++ b/slam/PriorFactor.h @@ -15,20 +15,20 @@ namespace gtsam { * A class for a soft prior on any Lie type * It takes three template parameters: * T is the Lie group type for which the prior is define - * Key (typically TypedSymbol) is used to look up T's in a Config - * Config where the T's are stored, typically LieValues or a TupleValues<...> + * Key (typically TypedSymbol) is used to look up T's in a Values + * Values where the T's are stored, typically LieValues or a TupleValues<...> * The Key type is not arbitrary: we need to cast to a Symbol at linearize, so * a simple type like int will not work */ - template - class PriorFactor: public NonlinearFactor1 { + template + class PriorFactor: public NonlinearFactor1 { public: typedef typename Key::Value_t T; private: - typedef NonlinearFactor1 Base; + typedef NonlinearFactor1 Base; T prior_; /** The measurement */ @@ -52,9 +52,9 @@ namespace gtsam { } /** equals */ - bool equals(const NonlinearFactor& expected, double tol) const { - const PriorFactor *e = dynamic_cast*> (&expected); + bool equals(const NonlinearFactor& expected, double tol) const { + const PriorFactor *e = dynamic_cast*> (&expected); return e != NULL && Base::equals(expected, tol) && this->prior_.equals( e->prior_, tol); } diff --git a/slam/RangeFactor.h b/slam/RangeFactor.h index 57cb5372b..da8894852 100644 --- a/slam/RangeFactor.h +++ b/slam/RangeFactor.h @@ -14,13 +14,13 @@ namespace gtsam { /** * Binary factor for a range measurement */ - template - class RangeFactor: public NonlinearFactor2 { + template + class RangeFactor: public NonlinearFactor2 { private: double z_; /** measurement */ - typedef NonlinearFactor2 Base; + typedef NonlinearFactor2 Base; public: diff --git a/slam/Simulated2DMeasurement.h b/slam/Simulated2DMeasurement.h index 65b2b8d2a..6aaeb079e 100644 --- a/slam/Simulated2DMeasurement.h +++ b/slam/Simulated2DMeasurement.h @@ -8,7 +8,7 @@ #pragma once #include -#include +#include namespace gtsam { diff --git a/slam/Simulated2DOdometry.h b/slam/Simulated2DOdometry.h index 59df8ce55..5e154f013 100644 --- a/slam/Simulated2DOdometry.h +++ b/slam/Simulated2DOdometry.h @@ -8,7 +8,7 @@ #pragma once #include -#include +#include namespace gtsam { diff --git a/slam/Simulated2DOrientedOdometry.h b/slam/Simulated2DOrientedOdometry.h index bc6d73ef7..a53851a75 100644 --- a/slam/Simulated2DOrientedOdometry.h +++ b/slam/Simulated2DOrientedOdometry.h @@ -8,7 +8,7 @@ #pragma once #include -#include +#include namespace gtsam { diff --git a/slam/Simulated2DOrientedPosePrior.h b/slam/Simulated2DOrientedPosePrior.h index ad893bdcc..391600cea 100644 --- a/slam/Simulated2DOrientedPosePrior.h +++ b/slam/Simulated2DOrientedPosePrior.h @@ -8,12 +8,12 @@ #pragma once #include -#include +#include namespace gtsam { /** Create a prior on a pose Point2 with key 'x1' etc... */ - typedef simulated2DOriented::GenericPosePrior Simulated2DOrientedPosePrior; + typedef simulated2DOriented::GenericPosePrior Simulated2DOrientedPosePrior; } diff --git a/slam/Simulated2DOrientedConfig.h b/slam/Simulated2DOrientedValues.h similarity index 87% rename from slam/Simulated2DOrientedConfig.h rename to slam/Simulated2DOrientedValues.h index 7ea9268b0..a230e5f84 100644 --- a/slam/Simulated2DOrientedConfig.h +++ b/slam/Simulated2DOrientedValues.h @@ -1,5 +1,5 @@ /* - * Simulated2DConfig.h + * Simulated2DValues.h * * Re-created on Feb 22, 2010 for compatibility with MATLAB * Author: Frank Dellaert @@ -11,12 +11,12 @@ namespace gtsam { - class Simulated2DOrientedConfig: public simulated2DOriented::Config { + class Simulated2DOrientedValues: public simulated2DOriented::Values { public: typedef boost::shared_ptr sharedPoint; typedef boost::shared_ptr sharedPose; - Simulated2DOrientedConfig() { + Simulated2DOrientedValues() { } void insertPose(const simulated2DOriented::PoseKey& i, const Pose2& p) { diff --git a/slam/Simulated2DPointPrior.h b/slam/Simulated2DPointPrior.h index 82b36cbf5..f7c03e090 100644 --- a/slam/Simulated2DPointPrior.h +++ b/slam/Simulated2DPointPrior.h @@ -8,12 +8,12 @@ #pragma once #include -#include +#include namespace gtsam { /** Create a prior on a landmark Point2 with key 'l1' etc... */ - typedef simulated2D::GenericPrior Simulated2DPointPrior; + typedef simulated2D::GenericPrior Simulated2DPointPrior; } diff --git a/slam/Simulated2DPosePrior.h b/slam/Simulated2DPosePrior.h index ae01f524a..e88f44d51 100644 --- a/slam/Simulated2DPosePrior.h +++ b/slam/Simulated2DPosePrior.h @@ -8,12 +8,12 @@ #pragma once #include -#include +#include namespace gtsam { /** Create a prior on a pose Point2 with key 'x1' etc... */ - typedef simulated2D::GenericPrior Simulated2DPosePrior; + typedef simulated2D::GenericPrior Simulated2DPosePrior; } diff --git a/slam/Simulated2DConfig.h b/slam/Simulated2DValues.h similarity index 88% rename from slam/Simulated2DConfig.h rename to slam/Simulated2DValues.h index e9e9d0612..388c72fb3 100644 --- a/slam/Simulated2DConfig.h +++ b/slam/Simulated2DValues.h @@ -1,5 +1,5 @@ /* - * Simulated2DConfig.h + * Simulated2DValues.h * * Re-created on Feb 22, 2010 for compatibility with MATLAB * Author: Frank Dellaert @@ -11,11 +11,11 @@ namespace gtsam { - class Simulated2DConfig: public simulated2D::Config { + class Simulated2DValues: public simulated2D::Values { public: typedef boost::shared_ptr sharedPoint; - Simulated2DConfig() { + Simulated2DValues() { } void insertPose(const simulated2D::PoseKey& i, const Point2& p) { diff --git a/slam/Simulated3D.cpp b/slam/Simulated3D.cpp index 5be9a49ff..9041e0645 100644 --- a/slam/Simulated3D.cpp +++ b/slam/Simulated3D.cpp @@ -13,7 +13,7 @@ namespace gtsam { using namespace simulated3D; INSTANTIATE_LIE_CONFIG(PointKey) INSTANTIATE_LIE_CONFIG(PoseKey) -INSTANTIATE_TUPLE_CONFIG2(PoseConfig, PointConfig) +INSTANTIATE_TUPLE_CONFIG2(PoseValues, PointValues) namespace simulated3D { diff --git a/slam/Simulated3D.h b/slam/Simulated3D.h index 9162d8803..615523a86 100644 --- a/slam/Simulated3D.h +++ b/slam/Simulated3D.h @@ -10,7 +10,7 @@ #include #include -#include +#include #include #include #include @@ -23,9 +23,9 @@ namespace simulated3D { typedef gtsam::TypedSymbol PoseKey; typedef gtsam::TypedSymbol PointKey; - typedef LieValues PoseConfig; - typedef LieValues PointConfig; - typedef TupleValues2 Config; + typedef LieValues PoseValues; + typedef LieValues PointValues; + typedef TupleValues2 Values; /** * Prior on a single pose @@ -46,13 +46,13 @@ namespace simulated3D { boost::optional H1 = boost::none, boost::optional H2 = boost::none); - struct PointPrior3D: public NonlinearFactor1 { + struct PointPrior3D: public NonlinearFactor1 { Point3 z_; PointPrior3D(const Point3& z, const SharedGaussian& model, const PoseKey& j) : - NonlinearFactor1 (model, j), z_(z) { + NonlinearFactor1 (model, j), z_(z) { } Vector evaluateError(const Point3& x, boost::optional H = @@ -61,14 +61,14 @@ namespace simulated3D { } }; - struct Simulated3DMeasurement: public NonlinearFactor2 { Point3 z_; Simulated3DMeasurement(const Point3& z, const SharedGaussian& model, PoseKey& j1, PointKey j2) : - NonlinearFactor2 ( + NonlinearFactor2 ( model, j1, j2), z_(z) { } diff --git a/slam/TransformConstraint.h b/slam/TransformConstraint.h index 6313fbf2e..d8636c2af 100644 --- a/slam/TransformConstraint.h +++ b/slam/TransformConstraint.h @@ -13,7 +13,7 @@ namespace gtsam { /** * A constraint between two landmarks in separate maps * Templated on: - * Config : The overall config + * Values : The overall config * PKey : Key of landmark being constrained * Point : Type of landmark * TKey : Key of transform used @@ -25,15 +25,15 @@ namespace gtsam { * This base class should be specialized to implement the cost function for * specific classes of landmarks */ -template -class TransformConstraint : public NonlinearEqualityConstraint3 { +template +class TransformConstraint : public NonlinearEqualityConstraint3 { public: typedef typename PKey::Value_t Point; typedef typename TKey::Value_t Transform; - typedef NonlinearEqualityConstraint3 Base; - typedef TransformConstraint This; + typedef NonlinearEqualityConstraint3 Base; + typedef TransformConstraint This; /** * General constructor with all of the keys to variables in the map diff --git a/slam/dataset.cpp b/slam/dataset.cpp index 8f0de954c..9b8cc44e0 100644 --- a/slam/dataset.cpp +++ b/slam/dataset.cpp @@ -20,7 +20,7 @@ using namespace gtsam; #define LINESIZE 81920 typedef boost::shared_ptr sharedPose2Graph; -typedef boost::shared_ptr sharedPose2Config; +typedef boost::shared_ptr sharedPose2Values; namespace gtsam { @@ -68,13 +68,13 @@ pair > dataset(const string& dataset, c /* ************************************************************************* */ -pair load2D( +pair load2D( pair > dataset, int maxID, bool addNoise, bool smart) { return load2D(dataset.first, dataset.second, maxID, addNoise, smart); } -pair load2D(const string& filename, +pair load2D(const string& filename, boost::optional model, int maxID, bool addNoise, bool smart) { cout << "Will try to read " << filename << endl; ifstream is(filename.c_str()); @@ -83,7 +83,7 @@ pair load2D(const string& filename, exit(-1); } - sharedPose2Config poses(new Pose2Config); + sharedPose2Values poses(new Pose2Values); sharedPose2Graph graph(new Pose2Graph); string tag; @@ -151,14 +151,14 @@ pair load2D(const string& filename, } /* ************************************************************************* */ -void save2D(const Pose2Graph& graph, const Pose2Config& config, const SharedDiagonal model, +void save2D(const Pose2Graph& graph, const Pose2Values& config, const SharedDiagonal model, const string& filename) { - typedef Pose2Config::Key Key; + typedef Pose2Values::Key Key; fstream stream(filename.c_str(), fstream::out); // save poses - Pose2Config::Key key; + Pose2Values::Key key; Pose2 pose; BOOST_FOREACH(boost::tie(key, pose), config) stream << "VERTEX2 " << key.index() << " " << pose.x() << " " << pose.y() << " " << pose.theta() << endl; @@ -166,7 +166,7 @@ void save2D(const Pose2Graph& graph, const Pose2Config& config, const SharedDiag // save edges Matrix R = model->R(); Matrix RR = prod(trans(R),R); - BOOST_FOREACH(boost::shared_ptr > factor_, graph) { + BOOST_FOREACH(boost::shared_ptr > factor_, graph) { boost::shared_ptr factor = boost::dynamic_pointer_cast(factor_); if (!factor) continue; diff --git a/slam/dataset.h b/slam/dataset.h index 862b36db4..83d93bffd 100644 --- a/slam/dataset.h +++ b/slam/dataset.h @@ -32,16 +32,16 @@ std::pair > * @param maxID, if non-zero cut out vertices >= maxID * @param smart: try to reduce complexity of covariance to cheapest model */ -std::pair, boost::shared_ptr > load2D( +std::pair, boost::shared_ptr > load2D( std::pair > dataset, int maxID = 0, bool addNoise=false, bool smart=true); -std::pair, boost::shared_ptr > load2D( +std::pair, boost::shared_ptr > load2D( const std::string& filename, boost::optional model = boost::optional(), int maxID = 0, bool addNoise=false, bool smart=true); /** save 2d graph */ -void save2D(const gtsam::Pose2Graph& graph, const gtsam::Pose2Config& config, const gtsam::SharedDiagonal model, +void save2D(const gtsam::Pose2Graph& graph, const gtsam::Pose2Values& config, const gtsam::SharedDiagonal model, const std::string& filename); /** diff --git a/slam/planarSLAM.cpp b/slam/planarSLAM.cpp index 4eb6a702c..1b103deed 100644 --- a/slam/planarSLAM.cpp +++ b/slam/planarSLAM.cpp @@ -14,14 +14,14 @@ namespace gtsam { using namespace planarSLAM; INSTANTIATE_LIE_CONFIG(PointKey) - INSTANTIATE_TUPLE_CONFIG2(PoseConfig, PointConfig) - INSTANTIATE_NONLINEAR_FACTOR_GRAPH(Config) - INSTANTIATE_NONLINEAR_OPTIMIZER(Graph, Config) + INSTANTIATE_TUPLE_CONFIG2(PoseValues, PointValues) + INSTANTIATE_NONLINEAR_FACTOR_GRAPH(Values) + INSTANTIATE_NONLINEAR_OPTIMIZER(Graph, Values) namespace planarSLAM { - Graph::Graph(const NonlinearFactorGraph& graph) : - NonlinearFactorGraph(graph) {} + Graph::Graph(const NonlinearFactorGraph& graph) : + NonlinearFactorGraph(graph) {} void Graph::addPrior(const PoseKey& i, const Pose2& p, const SharedGaussian& model) { diff --git a/slam/planarSLAM.h b/slam/planarSLAM.h index cdf02c94a..779f65a9e 100644 --- a/slam/planarSLAM.h +++ b/slam/planarSLAM.h @@ -20,25 +20,25 @@ namespace gtsam { // Use planarSLAM namespace for specific SLAM instance namespace planarSLAM { - // Keys and Config + // Keys and Values typedef TypedSymbol PoseKey; typedef TypedSymbol PointKey; - typedef LieValues PoseConfig; - typedef LieValues PointConfig; - typedef TupleValues2 Config; + typedef LieValues PoseValues; + typedef LieValues PointValues; + typedef TupleValues2 Values; // Factors - typedef NonlinearEquality Constraint; - typedef PriorFactor Prior; - typedef BetweenFactor Odometry; - typedef BearingFactor Bearing; - typedef RangeFactor Range; - typedef BearingRangeFactor BearingRange; + typedef NonlinearEquality Constraint; + typedef PriorFactor Prior; + typedef BetweenFactor Odometry; + typedef BearingFactor Bearing; + typedef RangeFactor Range; + typedef BearingRangeFactor BearingRange; // Graph - struct Graph: public NonlinearFactorGraph { + struct Graph: public NonlinearFactorGraph { Graph(){} - Graph(const NonlinearFactorGraph& graph); + Graph(const NonlinearFactorGraph& graph); void addPrior(const PoseKey& i, const Pose2& p, const SharedGaussian& model); void addPoseConstraint(const PoseKey& i, const Pose2& p); void addOdometry(const PoseKey& i, const PoseKey& j, const Pose2& z, @@ -52,7 +52,7 @@ namespace gtsam { }; // Optimizer - typedef NonlinearOptimizer Optimizer; + typedef NonlinearOptimizer Optimizer; } // planarSLAM diff --git a/slam/pose2SLAM.cpp b/slam/pose2SLAM.cpp index b5c3a047a..5de15ddad 100644 --- a/slam/pose2SLAM.cpp +++ b/slam/pose2SLAM.cpp @@ -14,14 +14,14 @@ namespace gtsam { using namespace pose2SLAM; INSTANTIATE_LIE_CONFIG(Key) - INSTANTIATE_NONLINEAR_FACTOR_GRAPH(Config) - INSTANTIATE_NONLINEAR_OPTIMIZER(Graph, Config) + INSTANTIATE_NONLINEAR_FACTOR_GRAPH(Values) + INSTANTIATE_NONLINEAR_OPTIMIZER(Graph, Values) namespace pose2SLAM { /* ************************************************************************* */ - Config circle(size_t n, double R) { - Config x; + Values circle(size_t n, double R) { + Values x; double theta = 0, dtheta = 2 * M_PI / n; for (size_t i = 0; i < n; i++, theta += dtheta) x.insert(i, Pose2(cos(theta), sin(theta), M_PI_2 + theta)); diff --git a/slam/pose2SLAM.h b/slam/pose2SLAM.h index 288805e06..6d0af4b4f 100644 --- a/slam/pose2SLAM.h +++ b/slam/pose2SLAM.h @@ -21,9 +21,9 @@ namespace gtsam { // Use pose2SLAM namespace for specific SLAM instance namespace pose2SLAM { - // Keys and Config + // Keys and Values typedef TypedSymbol Key; - typedef LieValues Config; + typedef LieValues Values; /** * Create a circle of n 2D poses tangent to circle of radius R, first pose at (R,0) @@ -32,16 +32,16 @@ namespace gtsam { * @param c character to use for keys * @return circle of n 2D poses */ - Config circle(size_t n, double R); + Values circle(size_t n, double R); // Factors - typedef PriorFactor Prior; - typedef BetweenFactor Constraint; - typedef NonlinearEquality HardConstraint; + typedef PriorFactor Prior; + typedef BetweenFactor Constraint; + typedef NonlinearEquality HardConstraint; // Graph - struct Graph: public NonlinearFactorGraph { - typedef BetweenFactor Constraint; + struct Graph: public NonlinearFactorGraph { + typedef BetweenFactor Constraint; typedef Pose2 Pose; void addPrior(const Key& i, const Pose2& p, const SharedGaussian& model); void addConstraint(const Key& i, const Key& j, const Pose2& z, const SharedGaussian& model); @@ -49,14 +49,14 @@ namespace gtsam { }; // Optimizer - typedef NonlinearOptimizer Optimizer; + typedef NonlinearOptimizer Optimizer; } // pose2SLAM /** * Backwards compatibility */ - typedef pose2SLAM::Config Pose2Config; + typedef pose2SLAM::Values Pose2Values; typedef pose2SLAM::Prior Pose2Prior; typedef pose2SLAM::Constraint Pose2Factor; typedef pose2SLAM::Graph Pose2Graph; diff --git a/slam/pose3SLAM.cpp b/slam/pose3SLAM.cpp index da2590543..a803d9998 100644 --- a/slam/pose3SLAM.cpp +++ b/slam/pose3SLAM.cpp @@ -14,14 +14,14 @@ namespace gtsam { using namespace pose3SLAM; INSTANTIATE_LIE_CONFIG(Key) - INSTANTIATE_NONLINEAR_FACTOR_GRAPH(Config) - INSTANTIATE_NONLINEAR_OPTIMIZER(Graph, Config) + INSTANTIATE_NONLINEAR_FACTOR_GRAPH(Values) + INSTANTIATE_NONLINEAR_OPTIMIZER(Graph, Values) namespace pose3SLAM { /* ************************************************************************* */ - Config circle(size_t n, double radius) { - Config x; + Values circle(size_t n, double radius) { + Values x; double theta = 0, dtheta = 2 * M_PI / n; // We use aerospace/navlab convention, X forward, Y right, Z down // First pose will be at (R,0,0) diff --git a/slam/pose3SLAM.h b/slam/pose3SLAM.h index d3cf818d1..4f4593f76 100644 --- a/slam/pose3SLAM.h +++ b/slam/pose3SLAM.h @@ -20,9 +20,9 @@ namespace gtsam { // Use pose3SLAM namespace for specific SLAM instance namespace pose3SLAM { - // Keys and Config + // Keys and Values typedef TypedSymbol Key; - typedef LieValues Config; + typedef LieValues Values; /** * Create a circle of n 3D poses tangent to circle of radius R, first pose at (R,0) @@ -31,15 +31,15 @@ namespace gtsam { * @param c character to use for keys * @return circle of n 3D poses */ - Config circle(size_t n, double R); + Values circle(size_t n, double R); // Factors - typedef PriorFactor Prior; - typedef BetweenFactor Constraint; - typedef NonlinearEquality HardConstraint; + typedef PriorFactor Prior; + typedef BetweenFactor Constraint; + typedef NonlinearEquality HardConstraint; // Graph - struct Graph: public NonlinearFactorGraph { + struct Graph: public NonlinearFactorGraph { void addPrior(const Key& i, const Pose3& p, const SharedGaussian& model); void addConstraint(const Key& i, const Key& j, const Pose3& z, @@ -48,14 +48,14 @@ namespace gtsam { }; // Optimizer - typedef NonlinearOptimizer Optimizer; + typedef NonlinearOptimizer Optimizer; } // pose3SLAM /** * Backwards compatibility */ - typedef pose3SLAM::Config Pose3Config; + typedef pose3SLAM::Values Pose3Values; typedef pose3SLAM::Prior Pose3Prior; typedef pose3SLAM::Constraint Pose3Factor; typedef pose3SLAM::Graph Pose3Graph; diff --git a/slam/saveGraph.cpp b/slam/saveGraph.cpp index 6ce97c3b4..585b63933 100644 --- a/slam/saveGraph.cpp +++ b/slam/saveGraph.cpp @@ -20,7 +20,7 @@ namespace gtsam { INSTANTIATE_LIE_CONFIG(Symbol, Point2) /* ************************************************************************* */ - void saveGraph(const SymbolicFactorGraph& fg, const SymbolicConfig& config, const std::string& s) { + void saveGraph(const SymbolicFactorGraph& fg, const SymbolicValues& config, const std::string& s) { Symbol key; Point2 pt; diff --git a/slam/saveGraph.h b/slam/saveGraph.h index f6b12479e..8c62111d8 100644 --- a/slam/saveGraph.h +++ b/slam/saveGraph.h @@ -17,9 +17,9 @@ namespace gtsam { class Point2; - typedef LieValues SymbolicConfig; + typedef LieValues SymbolicValues; // save graph to the graphviz format - void saveGraph(const SymbolicFactorGraph& fg, const SymbolicConfig& config, const std::string& s); + void saveGraph(const SymbolicFactorGraph& fg, const SymbolicValues& config, const std::string& s); } // namespace gtsam diff --git a/slam/simulated2D.cpp b/slam/simulated2D.cpp index 85702cf67..f4836fc74 100644 --- a/slam/simulated2D.cpp +++ b/slam/simulated2D.cpp @@ -13,9 +13,9 @@ namespace gtsam { using namespace simulated2D; // INSTANTIATE_LIE_CONFIG(PointKey) INSTANTIATE_LIE_CONFIG(PoseKey) - INSTANTIATE_TUPLE_CONFIG2(PoseConfig, PointConfig) -// INSTANTIATE_NONLINEAR_FACTOR_GRAPH(Config) -// INSTANTIATE_NONLINEAR_OPTIMIZER(Graph, Config) + INSTANTIATE_TUPLE_CONFIG2(PoseValues, PointValues) +// INSTANTIATE_NONLINEAR_FACTOR_GRAPH(Values) +// INSTANTIATE_NONLINEAR_OPTIMIZER(Graph, Values) namespace simulated2D { diff --git a/slam/simulated2D.h b/slam/simulated2D.h index 77ad1344e..80fe6b224 100644 --- a/slam/simulated2D.h +++ b/slam/simulated2D.h @@ -21,9 +21,9 @@ namespace gtsam { // Simulated2D robots have no orientation, just a position typedef TypedSymbol PoseKey; typedef TypedSymbol PointKey; - typedef LieValues PoseConfig; - typedef LieValues PointConfig; - typedef TupleValues2 Config; + typedef LieValues PoseValues; + typedef LieValues PointValues; + typedef TupleValues2 Values; /** * Prior on a single pose, and optional derivative version @@ -54,7 +54,7 @@ namespace gtsam { /** * Unary factor encoding a soft prior on a vector */ - template + template struct GenericPrior: public NonlinearFactor1 { typedef boost::shared_ptr > shared_ptr; typedef typename Key::Value_t Pose; @@ -74,7 +74,7 @@ namespace gtsam { /** * Binary factor simulating "odometry" between two Vectors */ - template + template struct GenericOdometry: public NonlinearFactor2 { typedef boost::shared_ptr > shared_ptr; typedef typename Key::Value_t Pose; @@ -95,7 +95,7 @@ namespace gtsam { /** * Binary factor simulating "measurement" between two Vectors */ - template + template class GenericMeasurement: public NonlinearFactor2 { public: typedef boost::shared_ptr > shared_ptr; @@ -117,9 +117,9 @@ namespace gtsam { }; /** Typedefs for regular use */ - typedef GenericPrior Prior; - typedef GenericOdometry Odometry; - typedef GenericMeasurement Measurement; + typedef GenericPrior Prior; + typedef GenericOdometry Odometry; + typedef GenericMeasurement Measurement; } // namespace simulated2D } // namespace gtsam diff --git a/slam/simulated2DConstraints.h b/slam/simulated2DConstraints.h index d3995dc5c..adc04d715 100644 --- a/slam/simulated2DConstraints.h +++ b/slam/simulated2DConstraints.h @@ -24,13 +24,13 @@ namespace gtsam { namespace equality_constraints { /** Typedefs for regular use */ - typedef NonlinearEquality1 UnaryEqualityConstraint; - typedef NonlinearEquality1 UnaryEqualityPointConstraint; - typedef BetweenConstraint OdoEqualityConstraint; + typedef NonlinearEquality1 UnaryEqualityConstraint; + typedef NonlinearEquality1 UnaryEqualityPointConstraint; + typedef BetweenConstraint OdoEqualityConstraint; /** Equality between variables */ - typedef NonlinearEquality2 PoseEqualityConstraint; - typedef NonlinearEquality2 PointEqualityConstraint; + typedef NonlinearEquality2 PoseEqualityConstraint; + typedef NonlinearEquality2 PointEqualityConstraint; } // \namespace equality_constraints @@ -65,8 +65,8 @@ namespace gtsam { }; /** typedefs for use with simulated2D systems */ - typedef ScalarCoordConstraint1 PoseXInequality; - typedef ScalarCoordConstraint1 PoseYInequality; + typedef ScalarCoordConstraint1 PoseXInequality; + typedef ScalarCoordConstraint1 PoseYInequality; double range(const Point2& a, const Point2& b) { return a.dist(b); } @@ -91,7 +91,7 @@ namespace gtsam { } }; - typedef MaxDistanceConstraint PoseMaxDistConstraint; + typedef MaxDistanceConstraint PoseMaxDistConstraint; /** * Binary inequality constraint forcing a minimum range @@ -114,7 +114,7 @@ namespace gtsam { } }; - typedef MinDistanceConstraint LandmarkAvoid; + typedef MinDistanceConstraint LandmarkAvoid; } // \namespace inequality_constraints diff --git a/slam/simulated2DOriented.cpp b/slam/simulated2DOriented.cpp index 80d1382ce..aec537abf 100644 --- a/slam/simulated2DOriented.cpp +++ b/slam/simulated2DOriented.cpp @@ -11,8 +11,8 @@ namespace gtsam { using namespace simulated2DOriented; // INSTANTIATE_LIE_CONFIG(PointKey) -// INSTANTIATE_NONLINEAR_FACTOR_GRAPH(Config) -// INSTANTIATE_NONLINEAR_OPTIMIZER(Graph, Config) +// INSTANTIATE_NONLINEAR_FACTOR_GRAPH(Values) +// INSTANTIATE_NONLINEAR_OPTIMIZER(Graph, Values) namespace simulated2DOriented { diff --git a/slam/simulated2DOriented.h b/slam/simulated2DOriented.h index 1801b5228..1262857ed 100644 --- a/slam/simulated2DOriented.h +++ b/slam/simulated2DOriented.h @@ -21,9 +21,9 @@ namespace gtsam { // The types that take an oriented pose2 rather than point2 typedef TypedSymbol PointKey; typedef TypedSymbol PoseKey; - typedef LieValues PoseConfig; - typedef LieValues PointConfig; - typedef TupleValues2 Config; + typedef LieValues PoseValues; + typedef LieValues PointValues; + typedef TupleValues2 Values; //TODO:: point prior is not implemented right now @@ -47,7 +47,7 @@ namespace gtsam { /** * Unary factor encoding a soft prior on a vector */ - template + template struct GenericPosePrior: public NonlinearFactor1 { Pose2 z_; @@ -66,7 +66,7 @@ namespace gtsam { /** * Binary factor simulating "odometry" between two Vectors */ - template + template struct GenericOdometry: public NonlinearFactor2 { Pose2 z_; @@ -82,7 +82,7 @@ namespace gtsam { }; - typedef GenericOdometry Odometry; + typedef GenericOdometry Odometry; } // namespace simulated2DOriented } // namespace gtsam diff --git a/slam/smallExample.cpp b/slam/smallExample.cpp index a74a6e242..7e22e51eb 100644 --- a/slam/smallExample.cpp +++ b/slam/smallExample.cpp @@ -28,7 +28,7 @@ using namespace std; namespace gtsam { namespace example { - typedef boost::shared_ptr > shared; + typedef boost::shared_ptr > shared; static SharedDiagonal sigma1_0 = noiseModel::Isotropic::Sigma(2,1.0); static SharedDiagonal sigma0_1 = noiseModel::Isotropic::Sigma(2,0.1); @@ -73,8 +73,8 @@ namespace example { } /* ************************************************************************* */ - Config createConfig() { - Config c; + Values createValues() { + Values c; c.insert(simulated2D::PoseKey(1), Point2(0.0, 0.0)); c.insert(simulated2D::PoseKey(2), Point2(1.5, 0.0)); c.insert(simulated2D::PointKey(1), Point2(0.0, -1.0)); @@ -82,8 +82,8 @@ namespace example { } /* ************************************************************************* */ - VectorConfig createVectorConfig() { - VectorConfig c(vector(3, 2)); + VectorValues createVectorValues() { + VectorValues c(vector(3, 2)); c[_l1_] = Vector_(2, 0.0, -1.0); c[_x1_] = Vector_(2, 0.0, 0.0); c[_x2_] = Vector_(2, 1.5, 0.0); @@ -91,8 +91,8 @@ namespace example { } /* ************************************************************************* */ - boost::shared_ptr sharedNoisyConfig() { - boost::shared_ptr c(new Config); + boost::shared_ptr sharedNoisyValues() { + boost::shared_ptr c(new Values); c->insert(simulated2D::PoseKey(1), Point2(0.1, 0.1)); c->insert(simulated2D::PoseKey(2), Point2(1.4, 0.2)); c->insert(simulated2D::PointKey(1), Point2(0.1, -1.1)); @@ -100,13 +100,13 @@ namespace example { } /* ************************************************************************* */ - Config createNoisyConfig() { - return *sharedNoisyConfig(); + Values createNoisyValues() { + return *sharedNoisyValues(); } /* ************************************************************************* */ - VectorConfig createCorrectDelta(const Ordering& ordering) { - VectorConfig c(vector(3,2)); + VectorValues createCorrectDelta(const Ordering& ordering) { + VectorValues c(vector(3,2)); c[ordering["l1"]] = Vector_(2, -0.1, 0.1); c[ordering["x1"]] = Vector_(2, -0.1, -0.1); c[ordering["x2"]] = Vector_(2, 0.1, -0.2); @@ -114,8 +114,8 @@ namespace example { } /* ************************************************************************* */ - VectorConfig createZeroDelta(const Ordering& ordering) { - VectorConfig c(vector(3,2)); + VectorValues createZeroDelta(const Ordering& ordering) { + VectorValues c(vector(3,2)); c[ordering["l1"]] = zero(2); c[ordering["x1"]] = zero(2); c[ordering["x2"]] = zero(2); @@ -195,14 +195,14 @@ namespace example { 0.0, cos(v.y())); } - struct UnaryFactor: public gtsam::NonlinearFactor1 { Point2 z_; UnaryFactor(const Point2& z, const SharedGaussian& model, const simulated2D::PoseKey& key) : - gtsam::NonlinearFactor1(model, key), z_(z) { + gtsam::NonlinearFactor1(model, key), z_(z) { } Vector evaluateError(const Point2& x, boost::optional A = @@ -231,11 +231,11 @@ namespace example { } /* ************************************************************************* */ - pair createNonlinearSmoother(int T) { + pair createNonlinearSmoother(int T) { // Create Graph nlfg; - Config poses; + Values poses; // prior on x1 Point2 x1(1.0, 0.0); @@ -264,7 +264,7 @@ namespace example { /* ************************************************************************* */ pair createSmoother(int T, boost::optional ordering) { Graph nlfg; - Config poses; + Values poses; boost::tie(nlfg, poses) = createNonlinearSmoother(T); if(!ordering) ordering = *poses.orderingArbitrary(); @@ -300,8 +300,8 @@ namespace example { } /* ************************************************************************* */ - VectorConfig createSimpleConstraintConfig() { - VectorConfig config(vector(2,2)); + VectorValues createSimpleConstraintValues() { + VectorValues config(vector(2,2)); Vector v = Vector_(2, 1.0, -1.0); config[_x_] = v; config[_y_] = v; @@ -342,8 +342,8 @@ namespace example { } /* ************************************************************************* */ - VectorConfig createSingleConstraintConfig() { - VectorConfig config(vector(2,2)); + VectorValues createSingleConstraintValues() { + VectorValues config(vector(2,2)); config[_x_] = Vector_(2, 1.0, -1.0); config[_y_] = Vector_(2, 0.2, 0.1); return config; @@ -404,8 +404,8 @@ namespace example { } /* ************************************************************************* */ - VectorConfig createMultiConstraintConfig() { - VectorConfig config(vector(3,2)); + VectorValues createMultiConstraintValues() { + VectorValues config(vector(3,2)); config[_x_] = Vector_(2, -2.0, 2.0); config[_y_] = Vector_(2, -0.1, 0.4); config[_z_] = Vector_(2, -4.0, 5.0); @@ -437,9 +437,9 @@ namespace example { //} /* ************************************************************************* */ - // ConstrainedNonlinearFactorGraph , VectorConfig> createConstrainedNonlinearFactorGraph() { - // ConstrainedNonlinearFactorGraph , VectorConfig> graph; - // VectorConfig c = createConstrainedConfig(); + // ConstrainedNonlinearFactorGraph , VectorValues> createConstrainedNonlinearFactorGraph() { + // ConstrainedNonlinearFactorGraph , VectorValues> graph; + // VectorValues c = createConstrainedValues(); // // // equality constraint for initial pose // GaussianFactor::shared_ptr f1(new GaussianFactor(c["x0"], "x0")); @@ -453,9 +453,9 @@ namespace example { // } /* ************************************************************************* */ - //VectorConfig createConstrainedConfig() + //VectorValues createConstrainedValues() //{ - // VectorConfig config; + // VectorValues config; // // Vector x0(2); x0(0)=1.0; x0(1)=2.0; // config.insert("x0", x0); @@ -467,9 +467,9 @@ namespace example { //} /* ************************************************************************* */ - //VectorConfig createConstrainedLinConfig() + //VectorValues createConstrainedLinValues() //{ - // VectorConfig config; + // VectorValues config; // // Vector x0(2); x0(0)=1.0; x0(1)=2.0; // value doesn't actually matter // config.insert("x0", x0); @@ -481,9 +481,9 @@ namespace example { //} /* ************************************************************************* */ - //VectorConfig createConstrainedCorrectDelta() + //VectorValues createConstrainedCorrectDelta() //{ - // VectorConfig config; + // VectorValues config; // // Vector x0(2); x0(0)=0.; x0(1)=0.; // config.insert("x0", x0); @@ -498,7 +498,7 @@ namespace example { //ConstrainedGaussianBayesNet createConstrainedGaussianBayesNet() //{ // ConstrainedGaussianBayesNet cbn; - // VectorConfig c = createConstrainedConfig(); + // VectorValues c = createConstrainedValues(); // // // add regular conditional gaussian - no parent // Matrix R = eye(2); @@ -521,10 +521,10 @@ namespace example { } /* ************************************************************************* */ - boost::tuple planarGraph(size_t N) { + boost::tuple planarGraph(size_t N) { // create empty graph - NonlinearFactorGraph nlfg; + NonlinearFactorGraph nlfg; // Create almost hard constraint on x11, sigma=0 will work for PCG not for normal shared constraint(new simulated2D::Prior(Point2(1.0, 1.0), sharedSigma(2,1e-3), key(1,1))); @@ -547,12 +547,12 @@ namespace example { } // Create linearization and ground xtrue config - Config zeros; + Values zeros; for (size_t x = 1; x <= N; x++) for (size_t y = 1; y <= N; y++) zeros.insert(key(x, y), Point2()); Ordering ordering(planarOrdering(N)); - VectorConfig xtrue(zeros.dims(ordering)); + VectorValues xtrue(zeros.dims(ordering)); for (size_t x = 1; x <= N; x++) for (size_t y = 1; y <= N; y++) xtrue[ordering[key(x, y)]] = Point2(x,y).vector(); diff --git a/slam/smallExample.h b/slam/smallExample.h index f4f1be672..200565162 100644 --- a/slam/smallExample.h +++ b/slam/smallExample.h @@ -20,8 +20,8 @@ namespace gtsam { namespace example { - typedef simulated2D::Config Config; - typedef NonlinearFactorGraph Graph; + typedef simulated2D::Values Values; + typedef NonlinearFactorGraph Graph; /** * Create small example for non-linear factor graph @@ -30,33 +30,33 @@ namespace gtsam { Graph createNonlinearFactorGraph(); /** - * Create configuration to go with it - * The ground truth configuration for the example above + * Create values structure to go with it + * The ground truth values structure for the example above */ - Config createConfig(); + Values createValues(); - /** Vector Config equivalent */ - VectorConfig createVectorConfig(); + /** Vector Values equivalent */ + VectorValues createVectorValues(); /** - * create a noisy configuration for a nonlinear factor graph + * create a noisy values structure for a nonlinear factor graph */ - boost::shared_ptr sharedNoisyConfig(); - Config createNoisyConfig(); + boost::shared_ptr sharedNoisyValues(); + Values createNoisyValues(); /** * Zero delta config */ - VectorConfig createZeroDelta(const Ordering& ordering); + VectorValues createZeroDelta(const Ordering& ordering); /** - * Delta config that, when added to noisyConfig, returns the ground truth + * Delta config that, when added to noisyValues, returns the ground truth */ - VectorConfig createCorrectDelta(const Ordering& ordering); + VectorValues createCorrectDelta(const Ordering& ordering); /** * create a linear factor graph - * The non-linear graph above evaluated at NoisyConfig + * The non-linear graph above evaluated at NoisyValues */ GaussianFactorGraph createGaussianFactorGraph(const Ordering& ordering); @@ -76,7 +76,7 @@ namespace gtsam { * Create a full nonlinear smoother * @param T number of time-steps */ - std::pair createNonlinearSmoother(int T); + std::pair createNonlinearSmoother(int T); /** * Create a Kalman smoother by linearizing a non-linear factor graph @@ -93,21 +93,21 @@ namespace gtsam { * one binary equality constraint that sets x = y */ GaussianFactorGraph createSimpleConstraintGraph(); - VectorConfig createSimpleConstraintConfig(); + VectorValues createSimpleConstraintValues(); /** * Creates a simple constrained graph with one linear factor and * one binary constraint. */ GaussianFactorGraph createSingleConstraintGraph(); - VectorConfig createSingleConstraintConfig(); + VectorValues createSingleConstraintValues(); /** * Creates a constrained graph with a linear factor and two * binary constraints that share a node */ GaussianFactorGraph createMultiConstraintGraph(); - VectorConfig createMultiConstraintConfig(); + VectorValues createMultiConstraintValues(); /* ******************************************************* */ // Planar graph with easy subtree for SubgraphPreconditioner @@ -122,7 +122,7 @@ namespace gtsam { * -x11-x21-x31 * with x11 clamped at (1,1), and others related by 2D odometry. */ - boost::tuple planarGraph(size_t N); + boost::tuple planarGraph(size_t N); /* * Create canonical ordering for planar graph that also works for tree diff --git a/slam/tests/testGaussianISAM2.cpp b/slam/tests/testGaussianISAM2.cpp index 859d25f51..c39756620 100644 --- a/slam/tests/testGaussianISAM2.cpp +++ b/slam/tests/testGaussianISAM2.cpp @@ -28,18 +28,18 @@ const double tol = 1e-4; //TEST( ISAM2, solving ) //{ // Graph nlfg = createNonlinearFactorGraph(); -// Config noisy = createNoisyConfig(); +// Values noisy = createNoisyValues(); // Ordering ordering; // ordering += symbol('x', 1); // ordering += symbol('x', 2); // ordering += symbol('l', 1); // // FIXME: commented out due due to compile error in ISAM - this should be fixed //// GaussianISAM2 btree(nlfg, ordering, noisy); -//// VectorConfig actualDelta = optimize2(btree); -//// VectorConfig delta = createCorrectDelta(); +//// VectorValues actualDelta = optimize2(btree); +//// VectorValues delta = createCorrectDelta(); //// CHECK(assert_equal(delta, actualDelta, 0.01)); -//// Config actualSolution = noisy.expmap(actualDelta); -//// Config solution = createConfig(); +//// Values actualSolution = noisy.expmap(actualDelta); +//// Values solution = createValues(); //// CHECK(assert_equal(solution, actualSolution, tol)); //} // @@ -48,12 +48,12 @@ const double tol = 1e-4; //{ // // Create smoother with 7 nodes // Graph smoother; -// Config poses; +// Values poses; // boost::tie(smoother, poses) = createNonlinearSmoother(7); // // // run ISAM2 for every factor // GaussianISAM2 actual; -// BOOST_FOREACH(boost::shared_ptr > factor, smoother) { +// BOOST_FOREACH(boost::shared_ptr > factor, smoother) { // Graph factorGraph; // factorGraph.push_back(factor); // actual.update(factorGraph, poses); @@ -68,12 +68,12 @@ const double tol = 1e-4; // CHECK(assert_equal(expected, actual)); // // // obtain solution -// VectorConfig e; // expected solution +// VectorValues e; // expected solution // Vector v = Vector_(2, 0., 0.); // // FIXME: commented out due due to compile error in ISAM - this should be fixed //// for (int i=1; i<=7; i++) //// e.insert(symbol('x', i), v); -//// VectorConfig optimized = optimize2(actual); // actual solution +//// VectorValues optimized = optimize2(actual); // actual solution //// CHECK(assert_equal(e, optimized)); //} // @@ -82,7 +82,7 @@ const double tol = 1e-4; //{ // // Create smoother with 7 nodes // Graph smoother; -// Config poses; +// Values poses; // boost::tie(smoother, poses) = createNonlinearSmoother(7); // // // Create initial tree from first 4 timestamps in reverse order ! @@ -111,8 +111,8 @@ TEST(ISAM2, slamlike_solution) typedef planarSLAM::PointKey PointKey; double wildfire = -1.0; - planarSLAM::Config init; - planarSLAM::Config fullinit; + planarSLAM::Values init; + planarSLAM::Values fullinit; GaussianISAM2_P isam; planarSLAM::Graph newfactors; planarSLAM::Graph fullgraph; @@ -184,15 +184,15 @@ TEST(ISAM2, slamlike_solution) // isam.update(newfactors, init, wildfire, 0.0, true); // Compare solutions - planarSLAM::Config actual = isam.calculateEstimate(); + planarSLAM::Values actual = isam.calculateEstimate(); Ordering ordering = isam.getOrdering(); // *fullgraph.orderingCOLAMD(fullinit).first; GaussianFactorGraph linearized = *fullgraph.linearize(fullinit, ordering); // linearized.print("Expected linearized: "); GaussianBayesNet gbn = *Inference::Eliminate(linearized); // gbn.print("Expected bayesnet: "); - VectorConfig delta = optimize(gbn); - planarSLAM::Config expected = fullinit.expmap(delta, ordering); -// planarSLAM::Config expected = *NonlinearOptimizer::optimizeLM(fullgraph, fullinit); + VectorValues delta = optimize(gbn); + planarSLAM::Values expected = fullinit.expmap(delta, ordering); +// planarSLAM::Values expected = *NonlinearOptimizer::optimizeLM(fullgraph, fullinit); CHECK(assert_equal(expected, actual)); } diff --git a/slam/tests/testPlanarSLAM.cpp b/slam/tests/testPlanarSLAM.cpp index 1f4a2b8d9..afb3c0495 100644 --- a/slam/tests/testPlanarSLAM.cpp +++ b/slam/tests/testPlanarSLAM.cpp @@ -29,7 +29,7 @@ TEST( planarSLAM, BearingFactor ) planarSLAM::Bearing factor(2, 3, z, sigma); // create config - planarSLAM::Config c; + planarSLAM::Values c; c.insert(2, x2); c.insert(3, l3); @@ -46,7 +46,7 @@ TEST( planarSLAM, RangeFactor ) planarSLAM::Range factor(2, 3, z, sigma); // create config - planarSLAM::Config c; + planarSLAM::Values c; c.insert(2, x2); c.insert(3, l3); @@ -64,7 +64,7 @@ TEST( planarSLAM, BearingRangeFactor ) planarSLAM::BearingRange factor(2, 3, r, b, sigma2); // create config - planarSLAM::Config c; + planarSLAM::Values c; c.insert(2, x2); c.insert(3, l3); @@ -77,7 +77,7 @@ TEST( planarSLAM, BearingRangeFactor ) TEST( planarSLAM, constructor ) { // create config - planarSLAM::Config c; + planarSLAM::Values c; c.insert(2, x2); c.insert(3, x3); c.insert(3, l3); diff --git a/slam/tests/testPose2Factor.cpp b/slam/tests/testPose2Factor.cpp index 3c1262228..2f022be23 100644 --- a/slam/tests/testPose2Factor.cpp +++ b/slam/tests/testPose2Factor.cpp @@ -31,7 +31,7 @@ TEST( Pose2Factor, error ) // Choose a linearization point Pose2 p1; // robot at origin Pose2 p2(1, 0, 0); // robot at (1,0) - Pose2Config x0; + Pose2Values x0; x0.insert(1, p1); x0.insert(2, p2); @@ -44,7 +44,7 @@ TEST( Pose2Factor, error ) boost::shared_ptr linear = factor.linearize(x0, ordering); // Check error at x0, i.e. delta = zero ! - VectorConfig delta(x0.dims(ordering)); + VectorValues delta(x0.dims(ordering)); delta[ordering["x1"]] = zero(3); delta[ordering["x2"]] = zero(3); Vector error_at_zero = Vector_(3,0.0,0.0,0.0); @@ -52,9 +52,9 @@ TEST( Pose2Factor, error ) CHECK(assert_equal(-error_at_zero,linear->error_vector(delta))); // Check error after increasing p2 - VectorConfig plus = delta; + VectorValues plus = delta; plus[ordering["x2"]] = Vector_(3, 0.1, 0.0, 0.0); - Pose2Config x1 = x0.expmap(plus, ordering); + Pose2Values x1 = x0.expmap(plus, ordering); Vector error_at_plus = Vector_(3,0.1/sx,0.0,0.0); // h(x)-z = 0.1 ! CHECK(assert_equal(error_at_plus,factor.whitenedError(x1))); CHECK(assert_equal(error_at_plus,linear->error_vector(plus))); @@ -71,7 +71,7 @@ TEST( Pose2Factor, rhs ) // Choose a linearization point Pose2 p1(1.1,2,M_PI_2); // robot at (1.1,2) looking towards y (ground truth is at 1,2, see testPose2) Pose2 p2(-1,4.1,M_PI); // robot at (-1,4.1) looking at negative (ground truth is at -1,4) - Pose2Config x0; + Pose2Values x0; x0.insert(1,p1); x0.insert(2,p2); @@ -100,7 +100,7 @@ TEST( Pose2Factor, linearize ) // Choose a linearization point at ground truth Pose2 p1(1,2,M_PI_2); Pose2 p2(-1,4,M_PI); - Pose2Config x0; + Pose2Values x0; x0.insert(1,p1); x0.insert(2,p2); diff --git a/slam/tests/testPose2Prior.cpp b/slam/tests/testPose2Prior.cpp index 80e320152..a8822193e 100644 --- a/slam/tests/testPose2Prior.cpp +++ b/slam/tests/testPose2Prior.cpp @@ -25,7 +25,7 @@ TEST( Pose2Prior, error ) { // Choose a linearization point Pose2 p1(1, 0, 0); // robot at (1,0) - Pose2Config x0; + Pose2Values x0; x0.insert(1, p1); // Create factor @@ -36,7 +36,7 @@ TEST( Pose2Prior, error ) boost::shared_ptr linear = factor.linearize(x0, ordering); // Check error at x0, i.e. delta = zero ! - VectorConfig delta(x0.dims(ordering)); + VectorValues delta(x0.dims(ordering)); delta.makeZero(); delta[ordering["x1"]] = zero(3); Vector error_at_zero = Vector_(3,0.0,0.0,0.0); @@ -44,11 +44,11 @@ TEST( Pose2Prior, error ) CHECK(assert_equal(-error_at_zero,linear->error_vector(delta))); // Check error after increasing p2 - VectorConfig addition(x0.dims(ordering)); + VectorValues addition(x0.dims(ordering)); addition.makeZero(); addition[ordering["x1"]] = Vector_(3, 0.1, 0.0, 0.0); - VectorConfig plus = delta + addition; - Pose2Config x1 = x0.expmap(plus, ordering); + VectorValues plus = delta + addition; + Pose2Values x1 = x0.expmap(plus, ordering); Vector error_at_plus = Vector_(3,0.1/sx,0.0,0.0); // h(x)-z = 0.1 ! CHECK(assert_equal(error_at_plus,factor.whitenedError(x1))); CHECK(assert_equal(error_at_plus,linear->error_vector(plus))); @@ -70,7 +70,7 @@ LieVector h(const Pose2& p1) { TEST( Pose2Prior, linearize ) { // Choose a linearization point at ground truth - Pose2Config x0; + Pose2Values x0; x0.insert(1,prior); // Actual linearization diff --git a/slam/tests/testPose2SLAM.cpp b/slam/tests/testPose2SLAM.cpp index 629e61d7c..a0dade462 100644 --- a/slam/tests/testPose2SLAM.cpp +++ b/slam/tests/testPose2SLAM.cpp @@ -58,7 +58,7 @@ TEST( Pose2Graph, linearization ) // Choose a linearization point Pose2 p1(1.1,2,M_PI_2); // robot at (1.1,2) looking towards y (ground truth is at 1,2, see testPose2) Pose2 p2(-1,4.1,M_PI); // robot at (-1,4) looking at negative (ground truth is at 4.1,2) - Pose2Config config; + Pose2Values config; config.insert(1,p1); config.insert(2,p2); // Linearize @@ -94,14 +94,14 @@ TEST(Pose2Graph, optimize) { fg->addConstraint(0, 1, Pose2(1,2,M_PI_2), covariance); // Create initial config - boost::shared_ptr initial(new Pose2Config()); + boost::shared_ptr initial(new Pose2Values()); initial->insert(0, Pose2(0,0,0)); initial->insert(1, Pose2(0,0,0)); // Choose an ordering and optimize shared_ptr ordering(new Ordering); *ordering += "x0","x1"; - typedef NonlinearOptimizer Optimizer; + typedef NonlinearOptimizer Optimizer; Optimizer::shared_solver solver(new Optimizer::solver(ordering)); Optimizer optimizer0(fg, initial, solver); Optimizer::verbosityLevel verbosity = Optimizer::SILENT; @@ -109,7 +109,7 @@ TEST(Pose2Graph, optimize) { Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity); // Check with expected config - Pose2Config expected; + Pose2Values expected; expected.insert(0, Pose2(0,0,0)); expected.insert(1, Pose2(1,2,M_PI_2)); CHECK(assert_equal(expected, *optimizer.config())); @@ -120,7 +120,7 @@ TEST(Pose2Graph, optimize) { TEST(Pose2Graph, optimizeThreePoses) { // Create a hexagon of poses - Pose2Config hexagon = pose2SLAM::circle(3,1.0); + Pose2Values hexagon = pose2SLAM::circle(3,1.0); Pose2 p0 = hexagon[0], p1 = hexagon[1]; // create a Pose graph with one equality constraint and one measurement @@ -132,7 +132,7 @@ TEST(Pose2Graph, optimizeThreePoses) { fg->addConstraint(2, 0, delta, covariance); // Create initial config - boost::shared_ptr initial(new Pose2Config()); + boost::shared_ptr initial(new Pose2Values()); initial->insert(0, p0); initial->insert(1, hexagon[1].expmap(Vector_(3,-0.1, 0.1,-0.1))); initial->insert(2, hexagon[2].expmap(Vector_(3, 0.1,-0.1, 0.1))); @@ -147,7 +147,7 @@ TEST(Pose2Graph, optimizeThreePoses) { pose2SLAM::Optimizer::verbosityLevel verbosity = pose2SLAM::Optimizer::SILENT; pose2SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity); - Pose2Config actual = *optimizer.config(); + Pose2Values actual = *optimizer.config(); // Check with ground truth CHECK(assert_equal(hexagon, actual)); @@ -158,7 +158,7 @@ TEST(Pose2Graph, optimizeThreePoses) { TEST(Pose2Graph, optimizeCircle) { // Create a hexagon of poses - Pose2Config hexagon = pose2SLAM::circle(6,1.0); + Pose2Values hexagon = pose2SLAM::circle(6,1.0); Pose2 p0 = hexagon[0], p1 = hexagon[1]; // create a Pose graph with one equality constraint and one measurement @@ -173,7 +173,7 @@ TEST(Pose2Graph, optimizeCircle) { fg->addConstraint(5, 0, delta, covariance); // Create initial config - boost::shared_ptr initial(new Pose2Config()); + boost::shared_ptr initial(new Pose2Values()); initial->insert(0, p0); initial->insert(1, hexagon[1].expmap(Vector_(3,-0.1, 0.1,-0.1))); initial->insert(2, hexagon[2].expmap(Vector_(3, 0.1,-0.1, 0.1))); @@ -191,7 +191,7 @@ TEST(Pose2Graph, optimizeCircle) { pose2SLAM::Optimizer::verbosityLevel verbosity = pose2SLAM::Optimizer::SILENT; pose2SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity); - Pose2Config actual = *optimizer.config(); + Pose2Values actual = *optimizer.config(); // Check with ground truth CHECK(assert_equal(hexagon, actual)); @@ -225,7 +225,7 @@ TEST(Pose2Graph, optimizeCircle) { // // myOptimizer.update(x); // -// Pose2Config expected; +// Pose2Values expected; // expected.insert(0, Pose2(0.,0.,0.)); // expected.insert(1, Pose2(1.,0.,0.)); // expected.insert(2, Pose2(2.,0.,0.)); diff --git a/slam/tests/testPose2Config.cpp b/slam/tests/testPose2Values.cpp similarity index 82% rename from slam/tests/testPose2Config.cpp rename to slam/tests/testPose2Values.cpp index ab7994bf8..d64861492 100644 --- a/slam/tests/testPose2Config.cpp +++ b/slam/tests/testPose2Values.cpp @@ -1,5 +1,5 @@ /** - * @file testPose2Config.cpp + * @file testPose2Values.cpp * @authors Frank Dellaert **/ @@ -13,38 +13,38 @@ using namespace gtsam; using namespace gtsam::pose2SLAM; /* ************************************************************************* */ -TEST( Pose2Config, pose2Circle ) +TEST( Pose2Values, pose2Circle ) { // expected is 4 poses tangent to circle with radius 1m - Pose2Config expected; + Pose2Values expected; expected.insert(0, Pose2( 1, 0, M_PI_2)); expected.insert(1, Pose2( 0, 1, - M_PI )); expected.insert(2, Pose2(-1, 0, - M_PI_2)); expected.insert(3, Pose2( 0, -1, 0 )); - Pose2Config actual = pose2SLAM::circle(4,1.0); + Pose2Values actual = pose2SLAM::circle(4,1.0); CHECK(assert_equal(expected,actual)); } /* ************************************************************************* */ -TEST( Pose2Config, expmap ) +TEST( Pose2Values, expmap ) { // expected is circle shifted to right - Pose2Config expected; + Pose2Values expected; expected.insert(0, Pose2( 1.1, 0, M_PI_2)); expected.insert(1, Pose2( 0.1, 1, - M_PI )); expected.insert(2, Pose2(-0.9, 0, - M_PI_2)); expected.insert(3, Pose2( 0.1, -1, 0 )); // Note expmap coordinates are in local coordinates, so shifting to right requires thought !!! - Pose2Config circle(pose2SLAM::circle(4,1.0)); + Pose2Values circle(pose2SLAM::circle(4,1.0)); Ordering ordering(*circle.orderingArbitrary()); - VectorConfig delta(circle.dims(ordering)); + VectorValues delta(circle.dims(ordering)); delta[ordering[Key(0)]] = Vector_(3, 0.0,-0.1,0.0); delta[ordering[Key(1)]] = Vector_(3, -0.1,0.0,0.0); delta[ordering[Key(2)]] = Vector_(3, 0.0,0.1,0.0); delta[ordering[Key(3)]] = Vector_(3, 0.1,0.0,0.0); - Pose2Config actual = circle.expmap(delta, ordering); + Pose2Values actual = circle.expmap(delta, ordering); CHECK(assert_equal(expected,actual)); } diff --git a/slam/tests/testPose3Factor.cpp b/slam/tests/testPose3Factor.cpp index c95ce295d..bd0b91581 100644 --- a/slam/tests/testPose3Factor.cpp +++ b/slam/tests/testPose3Factor.cpp @@ -27,7 +27,7 @@ TEST( Pose3Factor, error ) Pose3Factor factor(1,2, z, I6); // Create config - Pose3Config x; + Pose3Values x; x.insert(1,t1); x.insert(2,t2); diff --git a/slam/tests/testPose3SLAM.cpp b/slam/tests/testPose3SLAM.cpp index 2a3ef5f42..3d2e6e5ae 100644 --- a/slam/tests/testPose3SLAM.cpp +++ b/slam/tests/testPose3SLAM.cpp @@ -31,7 +31,7 @@ TEST(Pose3Graph, optimizeCircle) { // Create a hexagon of poses double radius = 10; - Pose3Config hexagon = pose3SLAM::circle(6,radius); + Pose3Values hexagon = pose3SLAM::circle(6,radius); Pose3 gT0 = hexagon[0], gT1 = hexagon[1]; // create a Pose graph with one equality constraint and one measurement @@ -48,7 +48,7 @@ TEST(Pose3Graph, optimizeCircle) { fg->addConstraint(5,0, _0T1, covariance); // Create initial config - boost::shared_ptr initial(new Pose3Config()); + boost::shared_ptr initial(new Pose3Values()); initial->insert(0, gT0); initial->insert(1, hexagon[1].expmap(Vector_(6,-0.1, 0.1,-0.1,-0.1, 0.1,-0.1))); initial->insert(2, hexagon[2].expmap(Vector_(6, 0.1,-0.1, 0.1, 0.1,-0.1, 0.1))); @@ -59,14 +59,14 @@ TEST(Pose3Graph, optimizeCircle) { // Choose an ordering and optimize shared_ptr ordering(new Ordering); *ordering += "x0","x1","x2","x3","x4","x5"; - typedef NonlinearOptimizer Optimizer; + typedef NonlinearOptimizer Optimizer; Optimizer::shared_solver solver(new Optimizer::solver(ordering)); Optimizer optimizer0(fg, initial, solver); Optimizer::verbosityLevel verbosity = Optimizer::SILENT; // Optimizer::verbosityLevel verbosity = Optimizer::ERROR; Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity); - Pose3Config actual = *optimizer.config(); + Pose3Values actual = *optimizer.config(); // Check with ground truth CHECK(assert_equal(hexagon, actual,1e-4)); diff --git a/slam/tests/testPose3Config.cpp b/slam/tests/testPose3Values.cpp similarity index 87% rename from slam/tests/testPose3Config.cpp rename to slam/tests/testPose3Values.cpp index 71499fcbb..a5efa4cea 100644 --- a/slam/tests/testPose3Config.cpp +++ b/slam/tests/testPose3Values.cpp @@ -1,5 +1,5 @@ /** - * @file testPose3Config.cpp + * @file testPose3Values.cpp * @authors Frank Dellaert **/ @@ -19,23 +19,23 @@ Rot3 R3(Point3( 0,-1, 0), Point3(-1, 0, 0), Point3(0, 0, -1)); Rot3 R4(Point3( 1, 0, 0), Point3( 0,-1, 0), Point3(0, 0, -1)); /* ************************************************************************* */ -TEST( Pose3Config, pose3Circle ) +TEST( Pose3Values, pose3Circle ) { // expected is 4 poses tangent to circle with radius 1m - Pose3Config expected; + Pose3Values expected; expected.insert(0, Pose3(R1, Point3( 1, 0, 0))); expected.insert(1, Pose3(R2, Point3( 0, 1, 0))); expected.insert(2, Pose3(R3, Point3(-1, 0, 0))); expected.insert(3, Pose3(R4, Point3( 0,-1, 0))); - Pose3Config actual = pose3SLAM::circle(4,1.0); + Pose3Values actual = pose3SLAM::circle(4,1.0); CHECK(assert_equal(expected,actual)); } /* ************************************************************************* */ -TEST( Pose3Config, expmap ) +TEST( Pose3Values, expmap ) { - Pose3Config expected; + Pose3Values expected; #ifdef CORRECT_POSE3_EXMAP expected.insert(0, Pose3(R1, Point3( 1.0, 0.1, 0))); expected.insert(1, Pose3(R2, Point3(-0.1, 1.0, 0))); @@ -53,12 +53,12 @@ TEST( Pose3Config, expmap ) // so shifting to East requires little thought, different from with Pose2 !!! Ordering ordering(*expected.orderingArbitrary()); - VectorConfig delta(expected.dims(ordering), Vector_(24, + VectorValues delta(expected.dims(ordering), Vector_(24, 0.0,0.0,0.0, 0.1, 0.0, 0.0, 0.0,0.0,0.0, 0.1, 0.0, 0.0, 0.0,0.0,0.0, 0.1, 0.0, 0.0, 0.0,0.0,0.0, 0.1, 0.0, 0.0)); - Pose3Config actual = pose3SLAM::circle(4,1.0).expmap(delta, ordering); + Pose3Values actual = pose3SLAM::circle(4,1.0).expmap(delta, ordering); CHECK(assert_equal(expected,actual)); } diff --git a/slam/tests/testSimulated2D.cpp b/slam/tests/testSimulated2D.cpp index d9b7d7817..1c6289b5b 100644 --- a/slam/tests/testSimulated2D.cpp +++ b/slam/tests/testSimulated2D.cpp @@ -10,16 +10,16 @@ #include #include -#include +#include using namespace gtsam; using namespace std; using namespace simulated2D; /* ************************************************************************* */ -TEST( simulated2D, Simulated2DConfig ) +TEST( simulated2D, Simulated2DValues ) { - Simulated2DConfig actual; + Simulated2DValues actual; actual.insertPose(1,Point2(1,1)); actual.insertPoint(2,Point2(2,2)); CHECK(assert_equal(actual,actual,1e-9)); diff --git a/slam/tests/testSimulated2DOriented.cpp b/slam/tests/testSimulated2DOriented.cpp index a599e069f..fa351c844 100644 --- a/slam/tests/testSimulated2DOriented.cpp +++ b/slam/tests/testSimulated2DOriented.cpp @@ -10,7 +10,7 @@ #include #include -#include +#include #include #include @@ -19,9 +19,9 @@ using namespace std; using namespace simulated2DOriented; /* ************************************************************************* */ -TEST( simulated2DOriented, Simulated2DConfig ) +TEST( simulated2DOriented, Simulated2DValues ) { - Simulated2DConfig actual; + Simulated2DValues actual; actual.insertPose(1,Point2(1,1)); actual.insertPoint(2,Point2(2,2)); CHECK(assert_equal(actual,actual,1e-9)); @@ -56,7 +56,7 @@ TEST( simulated2DOriented, constructor ) SharedDiagonal model(Vector_(3, 1., 1., 1.)); Simulated2DOrientedOdometry factor(measurement, model, PoseKey(1), PoseKey(2)); - Config config; + Values config; config.insert(PoseKey(1), Pose2(1., 0., 0.2)); config.insert(PoseKey(2), Pose2(2., 0., 0.1)); boost::shared_ptr lf = factor.linearize(config, *config.orderingArbitrary()); diff --git a/slam/tests/testSimulated3D.cpp b/slam/tests/testSimulated3D.cpp index 16c642ac4..df1f6638e 100644 --- a/slam/tests/testSimulated3D.cpp +++ b/slam/tests/testSimulated3D.cpp @@ -15,9 +15,9 @@ using namespace gtsam; using namespace simulated3D; /* ************************************************************************* */ -TEST( simulated3D, Config ) +TEST( simulated3D, Values ) { - Config actual; + Values actual; actual.insert(PointKey(1),Point3(1,1,1)); actual.insert(PoseKey(2),Point3(2,2,2)); EXPECT(assert_equal(actual,actual,1e-9)); diff --git a/slam/tests/testVSLAMFactor.cpp b/slam/tests/testVSLAMFactor.cpp index be4bd79af..08622a4f3 100644 --- a/slam/tests/testVSLAMFactor.cpp +++ b/slam/tests/testVSLAMFactor.cpp @@ -38,8 +38,8 @@ TEST( ProjectionFactor, error ) boost::shared_ptr factor(new ProjectionFactor(z, sigma, cameraFrameNumber, landmarkNumber, sK)); - // For the following configuration, the factor predicts 320,240 - Config config; + // For the following values structure, the factor predicts 320,240 + Values config; Rot3 R;Point3 t1(0,0,-6); Pose3 x1(R,t1); config.insert(1, x1); Point3 l1; config.insert(1, l1); // Point should project to Point2(320.,240.) @@ -67,13 +67,13 @@ TEST( ProjectionFactor, error ) CHECK(assert_equal(expected_lfg,*actual_lfg)); // expmap on a config - Config expected_config; + Values expected_config; Point3 t2(1,1,-5); Pose3 x2(R,t2); expected_config.insert(1, x2); Point3 l2(1,2,3); expected_config.insert(1, l2); - VectorConfig delta(expected_config.dims(ordering)); + VectorValues delta(expected_config.dims(ordering)); delta[ordering["x1"]] = Vector_(6, 0.,0.,0., 1.,1.,1.); delta[ordering["l1"]] = Vector_(3, 1.,2.,3.); - Config actual_config = config.expmap(delta, ordering); + Values actual_config = config.expmap(delta, ordering); CHECK(assert_equal(expected_config,actual_config,1e-9)); } diff --git a/slam/tests/testVSLAMGraph.cpp b/slam/tests/testVSLAMGraph.cpp index cecd00f13..5b34837f4 100644 --- a/slam/tests/testVSLAMGraph.cpp +++ b/slam/tests/testVSLAMGraph.cpp @@ -22,7 +22,7 @@ using namespace std; using namespace gtsam; using namespace gtsam::visualSLAM; using namespace boost; -typedef NonlinearOptimizer Optimizer; +typedef NonlinearOptimizer Optimizer; static SharedGaussian sigma(noiseModel::Unit::Create(1)); /* ************************************************************************* */ @@ -79,8 +79,8 @@ TEST( Graph, optimizeLM) graph->addPointConstraint(2, landmark2); graph->addPointConstraint(3, landmark3); - // Create an initial configuration corresponding to the ground truth - boost::shared_ptr initialEstimate(new Config); + // Create an initial values structure corresponding to the ground truth + boost::shared_ptr initialEstimate(new Values); initialEstimate->insert(1, camera1); initialEstimate->insert(2, camera2); initialEstimate->insert(1, landmark1); @@ -117,8 +117,8 @@ TEST( Graph, optimizeLM2) graph->addPoseConstraint(1, camera1); graph->addPoseConstraint(2, camera2); - // Create an initial configuration corresponding to the ground truth - boost::shared_ptr initialEstimate(new Config); + // Create an initial values structure corresponding to the ground truth + boost::shared_ptr initialEstimate(new Values); initialEstimate->insert(1, camera1); initialEstimate->insert(2, camera2); initialEstimate->insert(1, landmark1); @@ -155,8 +155,8 @@ TEST( Graph, CHECK_ORDERING) graph->addPoseConstraint(1, camera1); graph->addPoseConstraint(2, camera2); - // Create an initial configuration corresponding to the ground truth - boost::shared_ptr initialEstimate(new Config); + // Create an initial values structure corresponding to the ground truth + boost::shared_ptr initialEstimate(new Values); initialEstimate->insert(1, camera1); initialEstimate->insert(2, camera2); initialEstimate->insert(1, landmark1); diff --git a/slam/tests/testVSLAMConfig.cpp b/slam/tests/testVSLAMValues.cpp similarity index 75% rename from slam/tests/testVSLAMConfig.cpp rename to slam/tests/testVSLAMValues.cpp index db56b3cf5..b8641f7f3 100644 --- a/slam/tests/testVSLAMConfig.cpp +++ b/slam/tests/testVSLAMValues.cpp @@ -1,6 +1,6 @@ /* - * @file testConfig.cpp - * @brief Tests for the Visual SLAM configuration class + * @file testValues.cpp + * @brief Tests for the Visual SLAM values structure class * @author Alex Cunningham */ @@ -8,7 +8,7 @@ #define GTSAM_MAGIC_KEY -#include +#include #include using namespace std; @@ -16,26 +16,26 @@ using namespace gtsam; using namespace gtsam::visualSLAM; /* ************************************************************************* */ -TEST( Config, update_with_large_delta) { +TEST( Values, update_with_large_delta) { // this test ensures that if the update for delta is larger than // the size of the config, it only updates existing variables - Config init; + Values init; init.insert(1, Pose3()); init.insert(1, Point3(1.0, 2.0, 3.0)); - Config expected; + Values expected; expected.insert(1, Pose3(Rot3(), Point3(0.1, 0.1, 0.1))); expected.insert(1, Point3(1.1, 2.1, 3.1)); Ordering largeOrdering; - Config largeConfig = init; - largeConfig.insert(2, Pose3()); + Values largeValues = init; + largeValues.insert(2, Pose3()); largeOrdering += "x1","l1","x2"; - VectorConfig delta(largeConfig.dims(largeOrdering)); + VectorValues delta(largeValues.dims(largeOrdering)); delta[largeOrdering["x1"]] = Vector_(6, 0.0, 0.0, 0.0, 0.1, 0.1, 0.1); delta[largeOrdering["l1"]] = Vector_(3, 0.1, 0.1, 0.1); delta[largeOrdering["x2"]] = Vector_(6, 0.0, 0.0, 0.0, 100.1, 4.1, 9.1); - Config actual = init.expmap(delta, largeOrdering); + Values actual = init.expmap(delta, largeOrdering); CHECK(assert_equal(expected,actual)); } diff --git a/slam/visualSLAM.cpp b/slam/visualSLAM.cpp index 14004ca5f..e0e93feba 100644 --- a/slam/visualSLAM.cpp +++ b/slam/visualSLAM.cpp @@ -11,8 +11,8 @@ #include namespace gtsam { - INSTANTIATE_TUPLE_CONFIG2(visualSLAM::PoseConfig, visualSLAM::PointConfig) - INSTANTIATE_NONLINEAR_FACTOR_GRAPH(visualSLAM::Config) - INSTANTIATE_NONLINEAR_OPTIMIZER(visualSLAM::Graph, visualSLAM::Config) + INSTANTIATE_TUPLE_CONFIG2(visualSLAM::PoseValues, visualSLAM::PointValues) + INSTANTIATE_NONLINEAR_FACTOR_GRAPH(visualSLAM::Values) + INSTANTIATE_NONLINEAR_OPTIMIZER(visualSLAM::Graph, visualSLAM::Values) } diff --git a/slam/visualSLAM.h b/slam/visualSLAM.h index fdbd170da..f6652ea1a 100644 --- a/slam/visualSLAM.h +++ b/slam/visualSLAM.h @@ -24,20 +24,20 @@ namespace gtsam { namespace visualSLAM { */ typedef TypedSymbol PoseKey; typedef TypedSymbol PointKey; - typedef LieValues PoseConfig; - typedef LieValues PointConfig; - typedef TupleValues2 Config; - typedef boost::shared_ptr shared_config; + typedef LieValues PoseValues; + typedef LieValues PointValues; + typedef TupleValues2 Values; + typedef boost::shared_ptr shared_values; - typedef NonlinearEquality PoseConstraint; - typedef NonlinearEquality PointConstraint; + typedef NonlinearEquality PoseConstraint; + typedef NonlinearEquality PointConstraint; /** * Non-linear factor for a constraint derived from a 2D measurement, * i.e. the main building block for visual SLAM. */ - template + template class GenericProjectionFactor : public NonlinearFactor2, Testable > { protected: @@ -115,12 +115,12 @@ namespace gtsam { namespace visualSLAM { }; // Typedef for general use - typedef GenericProjectionFactor ProjectionFactor; + typedef GenericProjectionFactor ProjectionFactor; /** * Non-linear factor graph for vanilla visual SLAM */ - class Graph: public NonlinearFactorGraph { + class Graph: public NonlinearFactorGraph { public: @@ -132,12 +132,12 @@ namespace gtsam { namespace visualSLAM { /** print out graph */ void print(const std::string& s = "") const { - NonlinearFactorGraph::print(s); + NonlinearFactorGraph::print(s); } /** equals */ bool equals(const Graph& p, double tol = 1e-9) const { - return NonlinearFactorGraph::equals(p, tol); + return NonlinearFactorGraph::equals(p, tol); } /** @@ -152,7 +152,7 @@ namespace gtsam { namespace visualSLAM { } /** - * Add a constraint on a pose (for now, *must* be satisfied in any Config) + * Add a constraint on a pose (for now, *must* be satisfied in any Values) * @param j index of camera * @param p to which pose to constrain it to */ @@ -162,7 +162,7 @@ namespace gtsam { namespace visualSLAM { } /** - * Add a constraint on a point (for now, *must* be satisfied in any Config) + * Add a constraint on a point (for now, *must* be satisfied in any Values) * @param j index of landmark * @param p to which point to constrain it to */ diff --git a/tests/testBayesNetPreconditioner.cpp b/tests/testBayesNetPreconditioner.cpp index 372abf05e..4176e98d4 100644 --- a/tests/testBayesNetPreconditioner.cpp +++ b/tests/testBayesNetPreconditioner.cpp @@ -25,7 +25,7 @@ TEST( BayesNetPreconditioner, conjugateGradients ) { // Build a planar graph GaussianFactorGraph Ab; - VectorConfig xtrue; + VectorValues xtrue; size_t N = 3; boost::tie(Ab, xtrue) = planarGraph(N); // A*x-b @@ -36,22 +36,22 @@ TEST( BayesNetPreconditioner, conjugateGradients ) // Eliminate the spanning tree to build a prior Ordering ordering = planarOrdering(N); GaussianBayesNet Rc1 = Ab1.eliminate(ordering); // R1*x-c1 - VectorConfig xbar = optimize(Rc1); // xbar = inv(R1)*c1 + VectorValues xbar = optimize(Rc1); // xbar = inv(R1)*c1 // Create BayesNet-preconditioned system BayesNetPreconditioner system(Ab,Rc1); // Create zero config y0 and perturbed config y1 - VectorConfig y0; + VectorValues y0; Vector z2 = zero(2); BOOST_FOREACH(const Symbol& j, ordering) y0.insert(j,z2); - VectorConfig y1 = y0; + VectorValues y1 = y0; y1["x2003"] = Vector_(2, 1.0, -1.0); - VectorConfig x1 = system.x(y1); + VectorValues x1 = system.x(y1); // Check gradient for y0 - VectorConfig expectedGradient0; + VectorValues expectedGradient0; expectedGradient0.insert("x1001", Vector_(2,-1000.,-1000.)); expectedGradient0.insert("x1002", Vector_(2, 0., -300.)); expectedGradient0.insert("x1003", Vector_(2, 0., -300.)); @@ -61,7 +61,7 @@ TEST( BayesNetPreconditioner, conjugateGradients ) expectedGradient0.insert("x3001", Vector_(2, -100., 100.)); expectedGradient0.insert("x3002", Vector_(2, -100., 0.)); expectedGradient0.insert("x3003", Vector_(2, -100., -100.)); - VectorConfig actualGradient0 = system.gradient(y0); + VectorValues actualGradient0 = system.gradient(y0); CHECK(assert_equal(expectedGradient0,actualGradient0)); #ifdef VECTORBTREE CHECK(actualGradient0.cloned(y0)); @@ -71,13 +71,13 @@ TEST( BayesNetPreconditioner, conjugateGradients ) bool verbose = false; double epsilon = 1e-6; // had to crank this down !!! size_t maxIterations = 100; - VectorConfig actual_y = gtsam::conjugateGradients(system, y1, verbose, epsilon, epsilon, maxIterations); - VectorConfig actual_x = system.x(actual_y); + VectorValues actual_y = gtsam::conjugateGradients(system, y1, verbose, epsilon, epsilon, maxIterations); + VectorValues actual_x = system.x(actual_y); CHECK(assert_equal(xtrue,actual_x)); // Compare with non preconditioned version: - VectorConfig actual2 = conjugateGradientDescent(Ab, x1, verbose, epsilon, + VectorValues actual2 = conjugateGradientDescent(Ab, x1, verbose, epsilon, maxIterations); CHECK(assert_equal(xtrue,actual2)); } diff --git a/tests/testBoundingConstraint.cpp b/tests/testBoundingConstraint.cpp index 943d4652b..276bc33d2 100644 --- a/tests/testBoundingConstraint.cpp +++ b/tests/testBoundingConstraint.cpp @@ -20,10 +20,10 @@ SharedDiagonal soft_model2 = noiseModel::Unit::Create(2); SharedDiagonal soft_model2_alt = noiseModel::Isotropic::Sigma(2, 0.1); SharedDiagonal hard_model1 = noiseModel::Constrained::All(1); -typedef NonlinearFactorGraph Graph; +typedef NonlinearFactorGraph Graph; typedef boost::shared_ptr shared_graph; -typedef boost::shared_ptr shared_config; -typedef NonlinearOptimizer Optimizer; +typedef boost::shared_ptr shared_values; +typedef NonlinearOptimizer Optimizer; // some simple inequality constraints simulated2D::PoseKey key(1); @@ -39,7 +39,7 @@ iq2D::PoseYInequality constraint4(key, 2.0, false, mu); /* ************************************************************************* */ TEST( testBoundingConstraint, unary_basics_inactive1 ) { Point2 pt1(2.0, 3.0); - simulated2D::Config config; + simulated2D::Values config; config.insert(key, pt1); EXPECT(!constraint1.active(config)); EXPECT(!constraint2.active(config)); @@ -58,7 +58,7 @@ TEST( testBoundingConstraint, unary_basics_inactive1 ) { /* ************************************************************************* */ TEST( testBoundingConstraint, unary_basics_inactive2 ) { Point2 pt2(-2.0, -3.0); - simulated2D::Config config; + simulated2D::Values config; config.insert(key, pt2); EXPECT(!constraint3.active(config)); EXPECT(!constraint4.active(config)); @@ -77,7 +77,7 @@ TEST( testBoundingConstraint, unary_basics_inactive2 ) { /* ************************************************************************* */ TEST( testBoundingConstraint, unary_basics_active1 ) { Point2 pt2(-2.0, -3.0); - simulated2D::Config config; + simulated2D::Values config; config.insert(key, pt2); EXPECT(constraint1.active(config)); EXPECT(constraint2.active(config)); @@ -92,7 +92,7 @@ TEST( testBoundingConstraint, unary_basics_active1 ) { /* ************************************************************************* */ TEST( testBoundingConstraint, unary_basics_active2 ) { Point2 pt1(2.0, 3.0); - simulated2D::Config config; + simulated2D::Values config; config.insert(key, pt1); EXPECT(constraint3.active(config)); EXPECT(constraint4.active(config)); @@ -107,7 +107,7 @@ TEST( testBoundingConstraint, unary_basics_active2 ) { /* ************************************************************************* */ TEST( testBoundingConstraint, unary_linearization_inactive) { Point2 pt1(2.0, 3.0); - simulated2D::Config config1; + simulated2D::Values config1; config1.insert(key, pt1); GaussianFactor::shared_ptr actual1 = constraint1.linearize(config1); GaussianFactor::shared_ptr actual2 = constraint2.linearize(config1); @@ -118,7 +118,7 @@ TEST( testBoundingConstraint, unary_linearization_inactive) { /* ************************************************************************* */ TEST( testBoundingConstraint, unary_linearization_active) { Point2 pt2(-2.0, -3.0); - simulated2D::Config config2; + simulated2D::Values config2; config2.insert(key, pt2); GaussianFactor::shared_ptr actual1 = constraint1.linearize(config2); GaussianFactor::shared_ptr actual2 = constraint2.linearize(config2); @@ -141,11 +141,11 @@ TEST( testBoundingConstraint, unary_simple_optimization1) { graph->add(iq2D::PoseYInequality(x1, 2.0, true)); graph->add(simulated2D::Prior(start_pt, soft_model2, x1)); - shared_config initConfig(new simulated2D::Config()); - initConfig->insert(x1, start_pt); + shared_values initValues(new simulated2D::Values()); + initValues->insert(x1, start_pt); - Optimizer::shared_config actual = Optimizer::optimizeLM(graph, initConfig, Optimizer::SILENT); - simulated2D::Config expected; + Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initValues, Optimizer::SILENT); + simulated2D::Values expected; expected.insert(x1, goal_pt); CHECK(assert_equal(expected, *actual, tol)); } @@ -163,11 +163,11 @@ TEST( testBoundingConstraint, unary_simple_optimization2) { graph->add(iq2D::PoseYInequality(x1, 2.0, false)); graph->add(simulated2D::Prior(start_pt, soft_model2, x1)); - shared_config initConfig(new simulated2D::Config()); - initConfig->insert(x1, start_pt); + shared_values initValues(new simulated2D::Values()); + initValues->insert(x1, start_pt); - Optimizer::shared_config actual = Optimizer::optimizeLM(graph, initConfig, Optimizer::SILENT); - simulated2D::Config expected; + Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initValues, Optimizer::SILENT); + simulated2D::Values expected; expected.insert(x1, goal_pt); CHECK(assert_equal(expected, *actual, tol)); } @@ -186,7 +186,7 @@ TEST( testBoundingConstraint, MaxDistance_basics) { EXPECT(assert_equal(zero(1), rangeBound.evaluateError(pt1, pt3))); EXPECT(assert_equal(-1.0*ones(1), rangeBound.evaluateError(pt1, pt4))); - simulated2D::Config config1; + simulated2D::Values config1; config1.insert(key1, pt1); config1.insert(key2, pt1); EXPECT(!rangeBound.active(config1)); @@ -222,13 +222,13 @@ TEST( testBoundingConstraint, MaxDistance_simple_optimization) { graph.add(simulated2D::Prior(pt2_init, soft_model2_alt, x2)); graph.add(iq2D::PoseMaxDistConstraint(x1, x2, 2.0)); - simulated2D::Config initial_state; + simulated2D::Values initial_state; initial_state.insert(x1, pt1); initial_state.insert(x2, pt2_init); - Optimizer::shared_config actual = Optimizer::optimizeLM(graph, initial_state); + Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initial_state); - simulated2D::Config expected; + simulated2D::Values expected; expected.insert(x1, pt1); expected.insert(x2, pt2_goal); @@ -252,7 +252,7 @@ TEST( testBoundingConstraint, avoid_demo) { graph.add(simulated2D::Odometry(odo, soft_model2_alt, x2, x3)); graph.add(simulated2D::equality_constraints::UnaryEqualityConstraint(x3_pt, x3)); - simulated2D::Config init, expected; + simulated2D::Values init, expected; init.insert(x1, x1_pt); init.insert(x3, x3_pt); init.insert(l1, l1_pt); @@ -260,7 +260,7 @@ TEST( testBoundingConstraint, avoid_demo) { init.insert(x2, x2_init); expected.insert(x2, x2_goal); - Optimizer::shared_config actual = Optimizer::optimizeLM(graph, init); + Optimizer::shared_values actual = Optimizer::optimizeLM(graph, init); EXPECT(assert_equal(expected, *actual, tol)); } diff --git a/tests/testGaussianBayesNet.cpp b/tests/testGaussianBayesNet.cpp index ab078e75d..c0811e930 100644 --- a/tests/testGaussianBayesNet.cpp +++ b/tests/testGaussianBayesNet.cpp @@ -78,9 +78,9 @@ TEST( GaussianBayesNet, matrix ) TEST( GaussianBayesNet, optimize ) { GaussianBayesNet cbn = createSmallGaussianBayesNet(); - VectorConfig actual = optimize(cbn); + VectorValues actual = optimize(cbn); - VectorConfig expected(vector(2,1)); + VectorValues expected(vector(2,1)); expected[_x_] = Vector_(1,4.); expected[_y_] = Vector_(1,5.); @@ -104,9 +104,9 @@ TEST( GaussianBayesNet, optimize2 ) fg.add(_x_, -eye(1), _z_, eye(1), 2*ones(1), noise); GaussianBayesNet cbn = *Inference::Eliminate(fg); - VectorConfig actual = optimize(cbn); + VectorValues actual = optimize(cbn); - VectorConfig expected(vector(3,1)); + VectorValues expected(vector(3,1)); expected[_x_] = Vector_(1,1.); expected[_y_] = Vector_(1,2.); expected[_z_] = Vector_(1,3.); @@ -122,14 +122,14 @@ TEST( GaussianBayesNet, backSubstitute ) // 3 1 3 GaussianBayesNet cbn = createSmallGaussianBayesNet(); - VectorConfig y(vector(2,1)), x(vector(2,1)); + VectorValues y(vector(2,1)), x(vector(2,1)); y[_x_] = Vector_(1,2.); y[_y_] = Vector_(1,3.); x[_x_] = Vector_(1,-1.); x[_y_] = Vector_(1, 3.); // test functional version - VectorConfig actual = backSubstitute(cbn,y); + VectorValues actual = backSubstitute(cbn,y); CHECK(assert_equal(x,actual)); // test imperative version @@ -144,9 +144,9 @@ TEST( GaussianBayesNet, rhs ) // 2 = 1 1 -1 // 3 1 3 GaussianBayesNet cbn = createSmallGaussianBayesNet(); - VectorConfig expected = gtsam::optimize(cbn); - VectorConfig d = rhs(cbn); - VectorConfig actual = backSubstitute(cbn, d); + VectorValues expected = gtsam::optimize(cbn); + VectorValues d = rhs(cbn); + VectorValues actual = backSubstitute(cbn, d); CHECK(assert_equal(expected, actual)); } @@ -168,9 +168,9 @@ TEST( GaussianBayesNet, rhs_with_sigmas ) cbn.push_back(Px_y); cbn.push_back(Py); - VectorConfig expected = gtsam::optimize(cbn); - VectorConfig d = rhs(cbn); - VectorConfig actual = backSubstitute(cbn, d); + VectorValues expected = gtsam::optimize(cbn); + VectorValues d = rhs(cbn); + VectorValues actual = backSubstitute(cbn, d); CHECK(assert_equal(expected, actual)); } @@ -182,14 +182,14 @@ TEST( GaussianBayesNet, backSubstituteTranspose ) // 5 1 1 3 GaussianBayesNet cbn = createSmallGaussianBayesNet(); - VectorConfig y(vector(2,1)), x(vector(2,1)); + VectorValues y(vector(2,1)), x(vector(2,1)); x[_x_] = Vector_(1,2.); x[_y_] = Vector_(1,5.); y[_x_] = Vector_(1,2.); y[_y_] = Vector_(1,3.); // test functional version - VectorConfig actual = backSubstituteTranspose(cbn,x); + VectorValues actual = backSubstituteTranspose(cbn,x); CHECK(assert_equal(y,actual)); } diff --git a/tests/testGaussianFactor.cpp b/tests/testGaussianFactor.cpp index ac63c6014..abfe09d75 100644 --- a/tests/testGaussianFactor.cpp +++ b/tests/testGaussianFactor.cpp @@ -161,7 +161,7 @@ TEST( GaussianFactor, error ) GaussianFactor::shared_ptr lf = fg[0]; // check the error of the first factor with noisy config - VectorConfig cfg = createZeroDelta(ordering); + VectorValues cfg = createZeroDelta(ordering); // calculate the error from the factor "f1" // note the error is the same as in testNonlinearFactor diff --git a/tests/testGaussianFactorGraph.cpp b/tests/testGaussianFactorGraph.cpp index 51b747d12..eafd29003 100644 --- a/tests/testGaussianFactorGraph.cpp +++ b/tests/testGaussianFactorGraph.cpp @@ -49,7 +49,7 @@ TEST( GaussianFactorGraph, error ) { Ordering ordering; ordering += "x1","x2","l1"; GaussianFactorGraph fg = createGaussianFactorGraph(ordering); - VectorConfig cfg = createZeroDelta(ordering); + VectorValues cfg = createZeroDelta(ordering); // note the error is the same as in testNonlinearFactorGraph as a // zero delta config in the linear graph is equivalent to noisy in @@ -476,10 +476,10 @@ TEST( GaussianFactorGraph, optimize ) GaussianFactorGraph fg = createGaussianFactorGraph(ord); // optimize the graph - VectorConfig actual = optimize(*Inference::Eliminate(fg)); + VectorValues actual = optimize(*Inference::Eliminate(fg)); // verify - VectorConfig expected = createCorrectDelta(ord); + VectorValues expected = createCorrectDelta(ord); CHECK(assert_equal(expected,actual)); } @@ -494,10 +494,10 @@ TEST( GaussianFactorGraph, optimize ) // GaussianFactorGraph fg = createGaussianFactorGraph(ord); // // // optimize the graph -// VectorConfig actual = fg.optimizeMultiFrontals(ord); +// VectorValues actual = fg.optimizeMultiFrontals(ord); // // // verify -// VectorConfig expected = createCorrectDelta(); +// VectorValues expected = createCorrectDelta(); // // CHECK(assert_equal(expected,actual)); //} @@ -635,7 +635,7 @@ TEST(GaussianFactorGraph, createSmoother) //} /* ************************************************************************* */ -double error(const VectorConfig& x) { +double error(const VectorValues& x) { // create an ordering Ordering ord; ord += "x2","l1","x1"; @@ -649,7 +649,7 @@ double error(const VectorConfig& x) { // GaussianFactorGraph fg = createGaussianFactorGraph(); // // // Construct expected gradient -// VectorConfig expected; +// VectorValues expected; // // // 2*f(x) = 100*(x1+c["x1"])^2 + 100*(x2-x1-[0.2;-0.1])^2 + 25*(l1-x1-[0.0;0.2])^2 + 25*(l1-x2-[-0.2;0.3])^2 // // worked out: df/dx1 = 100*[0.1;0.1] + 100*[0.2;-0.1]) + 25*[0.0;0.2] = [10+20;10-10+5] = [30;5] @@ -658,20 +658,20 @@ double error(const VectorConfig& x) { // expected.insert("x2",Vector_(2,-25.0, 17.5)); // // // Check the gradient at delta=0 -// VectorConfig zero = createZeroDelta(); -// VectorConfig actual = fg.gradient(zero); +// VectorValues zero = createZeroDelta(); +// VectorValues actual = fg.gradient(zero); // CHECK(assert_equal(expected,actual)); // // // Check it numerically for good measure -// Vector numerical_g = numericalGradient(error,zero,0.001); +// Vector numerical_g = numericalGradient(error,zero,0.001); // CHECK(assert_equal(Vector_(6,5.0,-12.5,30.0,5.0,-25.0,17.5),numerical_g)); // // // Check the gradient at the solution (should be zero) // Ordering ord; // ord += "x2","l1","x1"; // GaussianFactorGraph fg2 = createGaussianFactorGraph(); -// VectorConfig solution = fg2.optimize(ord); // destructive -// VectorConfig actual2 = fg.gradient(solution); +// VectorValues solution = fg2.optimize(ord); // destructive +// VectorValues actual2 = fg.gradient(solution); // CHECK(assert_equal(zero,actual2)); //} @@ -682,7 +682,7 @@ TEST( GaussianFactorGraph, multiplication ) Ordering ord; ord += "x2","l1","x1"; GaussianFactorGraph A = createGaussianFactorGraph(ord); - VectorConfig x = createCorrectDelta(ord); + VectorValues x = createCorrectDelta(ord); Errors actual = A * x; Errors expected; expected += Vector_(2,-1.0,-1.0); @@ -705,7 +705,7 @@ TEST( GaussianFactorGraph, multiplication ) // e += Vector_(2, 0.0,-5.0); // e += Vector_(2,-7.5,-5.0); // -// VectorConfig expected = createZeroDelta(ord), actual = A ^ e; +// VectorValues expected = createZeroDelta(ord), actual = A ^ e; // expected[ord["l1"]] = Vector_(2, -37.5,-50.0); // expected[ord["x1"]] = Vector_(2,-150.0, 25.0); // expected[ord["x2"]] = Vector_(2, 187.5, 25.0); @@ -770,10 +770,10 @@ TEST( GaussianFactorGraph, constrained_simple ) GaussianFactorGraph fg = createSimpleConstraintGraph(); // eliminate and solve - VectorConfig actual = optimize(*Inference::Eliminate(fg)); + VectorValues actual = optimize(*Inference::Eliminate(fg)); // verify - VectorConfig expected = createSimpleConstraintConfig(); + VectorValues expected = createSimpleConstraintValues(); CHECK(assert_equal(expected, actual)); } @@ -784,10 +784,10 @@ TEST( GaussianFactorGraph, constrained_single ) GaussianFactorGraph fg = createSingleConstraintGraph(); // eliminate and solve - VectorConfig actual = optimize(*Inference::Eliminate(fg)); + VectorValues actual = optimize(*Inference::Eliminate(fg)); // verify - VectorConfig expected = createSingleConstraintConfig(); + VectorValues expected = createSingleConstraintValues(); CHECK(assert_equal(expected, actual)); } @@ -800,10 +800,10 @@ TEST( GaussianFactorGraph, constrained_single ) // // eliminate and solve // Ordering ord; // ord += "y", "x"; -// VectorConfig actual = fg.optimize(ord); +// VectorValues actual = fg.optimize(ord); // // // verify -// VectorConfig expected = createSingleConstraintConfig(); +// VectorValues expected = createSingleConstraintValues(); // CHECK(assert_equal(expected, actual)); //} @@ -814,10 +814,10 @@ TEST( GaussianFactorGraph, constrained_multi1 ) GaussianFactorGraph fg = createMultiConstraintGraph(); // eliminate and solve - VectorConfig actual = optimize(*Inference::Eliminate(fg)); + VectorValues actual = optimize(*Inference::Eliminate(fg)); // verify - VectorConfig expected = createMultiConstraintConfig(); + VectorValues expected = createMultiConstraintValues(); CHECK(assert_equal(expected, actual)); } @@ -830,10 +830,10 @@ TEST( GaussianFactorGraph, constrained_multi1 ) // // eliminate and solve // Ordering ord; // ord += "z", "x", "y"; -// VectorConfig actual = fg.optimize(ord); +// VectorValues actual = fg.optimize(ord); // // // verify -// VectorConfig expected = createMultiConstraintConfig(); +// VectorValues expected = createMultiConstraintValues(); // CHECK(assert_equal(expected, actual)); //} diff --git a/tests/testGaussianISAM.cpp b/tests/testGaussianISAM.cpp index a3cd50e25..567a18fb9 100644 --- a/tests/testGaussianISAM.cpp +++ b/tests/testGaussianISAM.cpp @@ -54,9 +54,9 @@ TEST( ISAM, iSAM_smoother ) CHECK(assert_equal(expected, actual)); // obtain solution - VectorConfig e(vector(7,2)); // expected solution + VectorValues e(vector(7,2)); // expected solution e.makeZero(); - VectorConfig optimized = optimize(actual); // actual solution + VectorValues optimized = optimize(actual); // actual solution CHECK(assert_equal(e, optimized)); } @@ -168,10 +168,10 @@ C6 x1 : x2 // // eliminate using a "nested dissection" ordering // GaussianBayesNet chordalBayesNet = smoother.eliminate(ordering); // -// VectorConfig expectedSolution; +// VectorValues expectedSolution; // BOOST_FOREACH(string key, ordering) // expectedSolution.insert(key,zero(2)); -// VectorConfig actualSolution = optimize(chordalBayesNet); +// VectorValues actualSolution = optimize(chordalBayesNet); // CHECK(assert_equal(expectedSolution,actualSolution,tol)); // // // Create the Bayes tree diff --git a/tests/testGaussianJunctionTree.cpp b/tests/testGaussianJunctionTree.cpp index 9276ec02d..d03adba29 100644 --- a/tests/testGaussianJunctionTree.cpp +++ b/tests/testGaussianJunctionTree.cpp @@ -79,10 +79,10 @@ TEST( GaussianJunctionTree, optimizeMultiFrontal ) // optimize the graph GaussianJunctionTree tree(fg); - VectorConfig actual = tree.optimize(); + VectorValues actual = tree.optimize(); // verify - VectorConfig expected(GaussianVariableIndex<>(fg).dims()); // expected solution + VectorValues expected(GaussianVariableIndex<>(fg).dims()); // expected solution Vector v = Vector_(2, 0., 0.); for (int i=1; i<=7; i++) expected[ordering[Symbol('x',i)]] = v; @@ -94,16 +94,16 @@ TEST( GaussianJunctionTree, optimizeMultiFrontal2) { // create a graph Graph nlfg = createNonlinearFactorGraph(); - Config noisy = createNoisyConfig(); + Values noisy = createNoisyValues(); Ordering ordering; ordering += "x1","x2","l1"; GaussianFactorGraph fg = *nlfg.linearize(noisy, ordering); // optimize the graph GaussianJunctionTree tree(fg); - VectorConfig actual = tree.optimize(); + VectorValues actual = tree.optimize(); // verify - VectorConfig expected = createCorrectDelta(ordering); // expected solution + VectorValues expected = createCorrectDelta(ordering); // expected solution CHECK(assert_equal(expected,actual)); } @@ -112,7 +112,7 @@ TEST(GaussianJunctionTree, slamlike) { typedef planarSLAM::PoseKey PoseKey; typedef planarSLAM::PointKey PointKey; - planarSLAM::Config init; + planarSLAM::Values init; planarSLAM::Graph newfactors; planarSLAM::Graph fullgraph; SharedDiagonal odoNoise = sharedSigmas(Vector_(3, 0.1, 0.1, M_PI/100.0)); @@ -162,12 +162,12 @@ TEST(GaussianJunctionTree, slamlike) { GaussianFactorGraph linearized = *fullgraph.linearize(init, ordering); GaussianJunctionTree gjt(linearized); - VectorConfig deltaactual = gjt.optimize(); - planarSLAM::Config actual = init.expmap(deltaactual, ordering); + VectorValues deltaactual = gjt.optimize(); + planarSLAM::Values actual = init.expmap(deltaactual, ordering); GaussianBayesNet gbn = *Inference::Eliminate(linearized); - VectorConfig delta = optimize(gbn); - planarSLAM::Config expected = init.expmap(delta, ordering); + VectorValues delta = optimize(gbn); + planarSLAM::Values expected = init.expmap(delta, ordering); CHECK(assert_equal(expected, actual)); diff --git a/tests/testGraph.cpp b/tests/testGraph.cpp index 501a7e901..dd5963bf5 100644 --- a/tests/testGraph.cpp +++ b/tests/testGraph.cpp @@ -78,7 +78,7 @@ TEST( Graph, composePoses ) graph.addConstraint(2,3, p23, cov); graph.addConstraint(4,3, p43, cov); - PredecessorMap tree; + PredecessorMap tree; tree.insert(1,2); tree.insert(2,2); tree.insert(3,2); @@ -86,10 +86,10 @@ TEST( Graph, composePoses ) Pose2 rootPose = p2; - boost::shared_ptr actual = composePoses (graph, tree, rootPose); + boost::shared_ptr actual = composePoses (graph, tree, rootPose); - Pose2Config expected; + Pose2Values expected; expected.insert(1, p1); expected.insert(2, p2); expected.insert(3, p3); diff --git a/tests/testIterative.cpp b/tests/testIterative.cpp index 4015c684d..94362870a 100644 --- a/tests/testIterative.cpp +++ b/tests/testIterative.cpp @@ -14,7 +14,7 @@ using namespace boost::assign; #define GTSAM_MAGIC_KEY #include -#include +#include #include #include #include @@ -35,12 +35,12 @@ TEST( Iterative, steepestDescent ) Ordering ord; ord += "l1", "x1", "x2"; GaussianFactorGraph fg = createGaussianFactorGraph(); - VectorConfig expected = fg.optimize(ord); // destructive + VectorValues expected = fg.optimize(ord); // destructive // Do gradient descent GaussianFactorGraph fg2 = createGaussianFactorGraph(); - VectorConfig zero = createZeroDelta(); - VectorConfig actual = steepestDescent(fg2, zero, verbose); + VectorValues zero = createZeroDelta(); + VectorValues actual = steepestDescent(fg2, zero, verbose); CHECK(assert_equal(expected,actual,1e-2)); } @@ -51,7 +51,7 @@ TEST( Iterative, conjugateGradientDescent ) Ordering ord; ord += "l1", "x1", "x2"; GaussianFactorGraph fg = createGaussianFactorGraph(); - VectorConfig expected = fg.optimize(ord); // destructive + VectorValues expected = fg.optimize(ord); // destructive // create graph and get matrices GaussianFactorGraph fg2 = createGaussianFactorGraph(); @@ -71,21 +71,21 @@ TEST( Iterative, conjugateGradientDescent ) CHECK(assert_equal(expectedX,actualX2,1e-9)); // Do conjugate gradient descent on factor graph - VectorConfig zero = createZeroDelta(); - VectorConfig actual = conjugateGradientDescent(fg2, zero, verbose); + VectorValues zero = createZeroDelta(); + VectorValues actual = conjugateGradientDescent(fg2, zero, verbose); CHECK(assert_equal(expected,actual,1e-2)); // Test method - VectorConfig actual2 = fg2.conjugateGradientDescent(zero, verbose); + VectorValues actual2 = fg2.conjugateGradientDescent(zero, verbose); CHECK(assert_equal(expected,actual2,1e-2)); } /* ************************************************************************* */ /*TEST( Iterative, conjugateGradientDescent_hard_constraint ) { - typedef Pose2Config::Key Key; + typedef Pose2Values::Key Key; - Pose2Config config; + Pose2Values config; config.insert(1, Pose2(0.,0.,0.)); config.insert(2, Pose2(1.5,0.,0.)); @@ -94,14 +94,14 @@ TEST( Iterative, conjugateGradientDescent ) graph.push_back(Pose2Graph::sharedFactor(new Pose2Factor(Key(1), Key(2), Pose2(1.,0.,0.), cov))); graph.addHardConstraint(1, config[1]); - VectorConfig zeros; + VectorValues zeros; zeros.insert("x1",zero(3)); zeros.insert("x2",zero(3)); GaussianFactorGraph fg = graph.linearize(config); - VectorConfig actual = conjugateGradientDescent(fg, zeros, true, 1e-3, 1e-5, 10); + VectorValues actual = conjugateGradientDescent(fg, zeros, true, 1e-3, 1e-5, 10); - VectorConfig expected; + VectorValues expected; expected.insert("x1", zero(3)); expected.insert("x2", Vector_(-0.5,0.,0.)); CHECK(assert_equal(expected, actual)); @@ -110,7 +110,7 @@ TEST( Iterative, conjugateGradientDescent ) /* ************************************************************************* */ TEST( Iterative, conjugateGradientDescent_soft_constraint ) { - Pose2Config config; + Pose2Values config; config.insert(1, Pose2(0.,0.,0.)); config.insert(2, Pose2(1.5,0.,0.)); @@ -118,14 +118,14 @@ TEST( Iterative, conjugateGradientDescent_soft_constraint ) graph.addPrior(1, Pose2(0.,0.,0.), noiseModel::Isotropic::Sigma(3, 1e-10)); graph.addConstraint(1,2, Pose2(1.,0.,0.), noiseModel::Isotropic::Sigma(3, 1)); - VectorConfig zeros; + VectorValues zeros; zeros.insert("x1",zero(3)); zeros.insert("x2",zero(3)); boost::shared_ptr fg = graph.linearize(config); - VectorConfig actual = conjugateGradientDescent(*fg, zeros, verbose, 1e-3, 1e-5, 100); + VectorValues actual = conjugateGradientDescent(*fg, zeros, verbose, 1e-3, 1e-5, 100); - VectorConfig expected; + VectorValues expected; expected.insert("x1", zero(3)); expected.insert("x2", Vector_(3,-0.5,0.,0.)); CHECK(assert_equal(expected, actual)); @@ -134,9 +134,9 @@ TEST( Iterative, conjugateGradientDescent_soft_constraint ) /* ************************************************************************* */ TEST( Iterative, subgraphPCG ) { - typedef Pose2Config::Key Key; + typedef Pose2Values::Key Key; - Pose2Config theta_bar; + Pose2Values theta_bar; theta_bar.insert(1, Pose2(0.,0.,0.)); theta_bar.insert(2, Pose2(1.5,0.,0.)); @@ -161,17 +161,17 @@ TEST( Iterative, subgraphPCG ) SubgraphPreconditioner::sharedFG Ab1 = T.linearize(theta_bar); SubgraphPreconditioner::sharedFG Ab2 = C.linearize(theta_bar); SubgraphPreconditioner::sharedBayesNet Rc1 = Ab1_->eliminate_(ordering); - SubgraphPreconditioner::sharedConfig xbar = optimize_(*Rc1); + SubgraphPreconditioner::sharedValues xbar = optimize_(*Rc1); SubgraphPreconditioner system(Ab1, Ab2, Rc1, xbar); - VectorConfig zeros = VectorConfig::zero(*xbar); + VectorValues zeros = VectorValues::zero(*xbar); // Solve the subgraph PCG - VectorConfig ybar = conjugateGradients (system, zeros, verbose, 1e-5, 1e-5, 100); - VectorConfig actual = system.x(ybar); + VectorValues actual = system.x(ybar); - VectorConfig expected; + VectorValues expected; expected.insert("x1", zero(3)); expected.insert("x2", Vector_(3, -0.5, 0., 0.)); CHECK(assert_equal(expected, actual)); diff --git a/tests/testLinearApproxFactor.cpp b/tests/testLinearApproxFactor.cpp index a7b63cf58..55e7e5281 100644 --- a/tests/testLinearApproxFactor.cpp +++ b/tests/testLinearApproxFactor.cpp @@ -14,7 +14,7 @@ using namespace std; using namespace gtsam; -typedef LinearApproxFactor ApproxFactor; +typedef LinearApproxFactor ApproxFactor; /* ************************************************************************* */ TEST ( LinearApproxFactor, basic ) { @@ -23,7 +23,7 @@ TEST ( LinearApproxFactor, basic ) { Vector b = repeat(2, 1.2); SharedDiagonal model = noiseModel::Unit::Create(2); GaussianFactor::shared_ptr lin_factor(new GaussianFactor(key1, A1, b, model)); - planarSLAM::Config lin_points; + planarSLAM::Values lin_points; ApproxFactor f1(lin_factor, lin_points); EXPECT(f1.size() == 1); @@ -31,7 +31,7 @@ TEST ( LinearApproxFactor, basic ) { expKeyVec.push_back(planarSLAM::PointKey(key1.index())); EXPECT(assert_equal(expKeyVec, f1.nonlinearKeys())); - planarSLAM::Config config; // doesn't really need to have any data + planarSLAM::Values config; // doesn't really need to have any data GaussianFactor::shared_ptr actual = f1.linearize(config); // Check the linearization diff --git a/tests/testNonlinearEquality.cpp b/tests/testNonlinearEquality.cpp index e64ca8468..f52461896 100644 --- a/tests/testNonlinearEquality.cpp +++ b/tests/testNonlinearEquality.cpp @@ -17,20 +17,20 @@ using namespace std; using namespace gtsam; typedef TypedSymbol PoseKey; -typedef LieValues PoseConfig; -typedef PriorFactor PosePrior; -typedef NonlinearEquality PoseNLE; +typedef LieValues PoseValues; +typedef PriorFactor PosePrior; +typedef NonlinearEquality PoseNLE; typedef boost::shared_ptr shared_poseNLE; -typedef NonlinearFactorGraph PoseGraph; -typedef NonlinearOptimizer PoseOptimizer; +typedef NonlinearFactorGraph PoseGraph; +typedef NonlinearOptimizer PoseOptimizer; PoseKey key(1); /* ************************************************************************* */ TEST ( NonlinearEquality, linearization ) { Pose2 value = Pose2(2.1, 1.0, 2.0); - PoseConfig linearize; + PoseValues linearize; linearize.insert(key, value); // create a nonlinear equality constraint @@ -48,7 +48,7 @@ TEST ( NonlinearEquality, linearization_pose ) { PoseKey key(1); Pose2 value; - PoseConfig config; + PoseValues config; config.insert(key, value); // create a nonlinear equality constraint @@ -62,7 +62,7 @@ TEST ( NonlinearEquality, linearization_pose ) { TEST ( NonlinearEquality, linearization_fail ) { Pose2 value = Pose2(2.1, 1.0, 2.0); Pose2 wrong = Pose2(2.1, 3.0, 4.0); - PoseConfig bad_linearize; + PoseValues bad_linearize; bad_linearize.insert(key, wrong); // create a nonlinear equality constraint @@ -78,7 +78,7 @@ TEST ( NonlinearEquality, linearization_fail_pose ) { PoseKey key(1); Pose2 value(2.0, 1.0, 2.0), wrong(2.0, 3.0, 4.0); - PoseConfig bad_linearize; + PoseValues bad_linearize; bad_linearize.insert(key, wrong); // create a nonlinear equality constraint @@ -94,7 +94,7 @@ TEST ( NonlinearEquality, linearization_fail_pose_origin ) { PoseKey key(1); Pose2 value, wrong(2.0, 3.0, 4.0); - PoseConfig bad_linearize; + PoseValues bad_linearize; bad_linearize.insert(key, wrong); // create a nonlinear equality constraint @@ -108,7 +108,7 @@ TEST ( NonlinearEquality, linearization_fail_pose_origin ) { TEST ( NonlinearEquality, error ) { Pose2 value = Pose2(2.1, 1.0, 2.0); Pose2 wrong = Pose2(2.1, 3.0, 4.0); - PoseConfig feasible, bad_linearize; + PoseValues feasible, bad_linearize; feasible.insert(key, value); bad_linearize.insert(key, wrong); @@ -153,7 +153,7 @@ TEST ( NonlinearEquality, allow_error_pose ) { EXPECT(assert_equal(expVec, actVec, 1e-5)); // the actual error should have a gain on it - PoseConfig config; + PoseValues config; config.insert(key1, badPoint1); double actError = nle.error(config); DOUBLES_EQUAL(500.0, actError, 1e-9); @@ -180,7 +180,7 @@ TEST ( NonlinearEquality, allow_error_optimize ) { // initialize away from the ideal Pose2 initPose(0.0, 2.0, 3.0); - boost::shared_ptr init(new PoseConfig()); + boost::shared_ptr init(new PoseValues()); init->insert(key1, initPose); // optimize @@ -192,7 +192,7 @@ TEST ( NonlinearEquality, allow_error_optimize ) { PoseOptimizer result = optimizer.levenbergMarquardt(relThresh, absThresh, PoseOptimizer::SILENT); // verify - PoseConfig expected; + PoseValues expected; expected.insert(key1, feasible1); EXPECT(assert_equal(expected, *result.config())); } @@ -205,7 +205,7 @@ TEST ( NonlinearEquality, allow_error_optimize_with_factors ) { Pose2 feasible1(1.0, 2.0, 3.0); // initialize away from the ideal - boost::shared_ptr init(new PoseConfig()); + boost::shared_ptr init(new PoseValues()); Pose2 initPose(0.0, 2.0, 3.0); init->insert(key1, initPose); @@ -229,7 +229,7 @@ TEST ( NonlinearEquality, allow_error_optimize_with_factors ) { PoseOptimizer result = optimizer.levenbergMarquardt(relThresh, absThresh, PoseOptimizer::SILENT); // verify - PoseConfig expected; + PoseValues expected; expected.insert(key1, feasible1); EXPECT(assert_equal(expected, *result.config())); } diff --git a/tests/testNonlinearEqualityConstraint.cpp b/tests/testNonlinearEqualityConstraint.cpp index 7740eb02f..65b44d619 100644 --- a/tests/testNonlinearEqualityConstraint.cpp +++ b/tests/testNonlinearEqualityConstraint.cpp @@ -20,10 +20,10 @@ static const double tol = 1e-5; SharedDiagonal hard_model = noiseModel::Constrained::All(2); SharedDiagonal soft_model = noiseModel::Isotropic::Sigma(2, 1.0); -typedef NonlinearFactorGraph Graph; +typedef NonlinearFactorGraph Graph; typedef boost::shared_ptr shared_graph; -typedef boost::shared_ptr shared_config; -typedef NonlinearOptimizer Optimizer; +typedef boost::shared_ptr shared_values; +typedef NonlinearOptimizer Optimizer; /* ************************************************************************* */ TEST( testNonlinearEqualityConstraint, unary_basics ) { @@ -32,14 +32,14 @@ TEST( testNonlinearEqualityConstraint, unary_basics ) { double mu = 1000.0; eq2D::UnaryEqualityConstraint constraint(pt, key, mu); - simulated2D::Config config1; + simulated2D::Values config1; config1.insert(key, pt); EXPECT(constraint.active(config1)); EXPECT(assert_equal(zero(2), constraint.evaluateError(pt), tol)); EXPECT(assert_equal(zero(2), constraint.unwhitenedError(config1), tol)); EXPECT_DOUBLES_EQUAL(0.0, constraint.error(config1), tol); - simulated2D::Config config2; + simulated2D::Values config2; Point2 ptBad1(2.0, 2.0); config2.insert(key, ptBad1); EXPECT(constraint.active(config2)); @@ -55,13 +55,13 @@ TEST( testNonlinearEqualityConstraint, unary_linearization ) { double mu = 1000.0; eq2D::UnaryEqualityConstraint constraint(pt, key, mu); - simulated2D::Config config1; + simulated2D::Values config1; config1.insert(key, pt); GaussianFactor::shared_ptr actual1 = constraint.linearize(config1); GaussianFactor::shared_ptr expected1(new GaussianFactor(key, eye(2,2), zero(2), hard_model)); EXPECT(assert_equal(*expected1, *actual1, tol)); - simulated2D::Config config2; + simulated2D::Values config2; Point2 ptBad(2.0, 2.0); config2.insert(key, ptBad); GaussianFactor::shared_ptr actual2 = constraint.linearize(config2); @@ -87,11 +87,11 @@ TEST( testNonlinearEqualityConstraint, unary_simple_optimization ) { graph->push_back(constraint); graph->push_back(factor); - shared_config initConfig(new simulated2D::Config()); - initConfig->insert(key, badPt); + shared_values initValues(new simulated2D::Values()); + initValues->insert(key, badPt); - Optimizer::shared_config actual = Optimizer::optimizeLM(graph, initConfig); - simulated2D::Config expected; + Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initValues); + simulated2D::Values expected; expected.insert(key, truth_pt); CHECK(assert_equal(expected, *actual, tol)); } @@ -103,7 +103,7 @@ TEST( testNonlinearEqualityConstraint, odo_basics ) { double mu = 1000.0; eq2D::OdoEqualityConstraint constraint(odom, key1, key2, mu); - simulated2D::Config config1; + simulated2D::Values config1; config1.insert(key1, x1); config1.insert(key2, x2); EXPECT(constraint.active(config1)); @@ -111,7 +111,7 @@ TEST( testNonlinearEqualityConstraint, odo_basics ) { EXPECT(assert_equal(zero(2), constraint.unwhitenedError(config1), tol)); EXPECT_DOUBLES_EQUAL(0.0, constraint.error(config1), tol); - simulated2D::Config config2; + simulated2D::Values config2; Point2 x1bad(2.0, 2.0); Point2 x2bad(2.0, 2.0); config2.insert(key1, x1bad); @@ -129,7 +129,7 @@ TEST( testNonlinearEqualityConstraint, odo_linearization ) { double mu = 1000.0; eq2D::OdoEqualityConstraint constraint(odom, key1, key2, mu); - simulated2D::Config config1; + simulated2D::Values config1; config1.insert(key1, x1); config1.insert(key2, x2); GaussianFactor::shared_ptr actual1 = constraint.linearize(config1); @@ -137,7 +137,7 @@ TEST( testNonlinearEqualityConstraint, odo_linearization ) { new GaussianFactor(key1, -eye(2,2), key2, eye(2,2), zero(2), hard_model)); EXPECT(assert_equal(*expected1, *actual1, tol)); - simulated2D::Config config2; + simulated2D::Values config2; Point2 x1bad(2.0, 2.0); Point2 x2bad(2.0, 2.0); config2.insert(key1, x1bad); @@ -175,12 +175,12 @@ TEST( testNonlinearEqualityConstraint, odo_simple_optimize ) { graph->push_back(constraint2); graph->push_back(factor); - shared_config initConfig(new simulated2D::Config()); - initConfig->insert(key1, Point2()); - initConfig->insert(key2, badPt); + shared_values initValues(new simulated2D::Values()); + initValues->insert(key1, Point2()); + initValues->insert(key2, badPt); - Optimizer::shared_config actual = Optimizer::optimizeLM(graph, initConfig); - simulated2D::Config expected; + Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initValues); + simulated2D::Values expected; expected.insert(key1, truth_pt1); expected.insert(key2, truth_pt2); CHECK(assert_equal(expected, *actual, tol)); @@ -212,15 +212,15 @@ TEST (testNonlinearEqualityConstraint, two_pose ) { graph->add(eq2D::PointEqualityConstraint(l1, l2)); - shared_config initialEstimate(new simulated2D::Config()); + shared_values initialEstimate(new simulated2D::Values()); initialEstimate->insert(x1, pt_x1); initialEstimate->insert(x2, Point2()); initialEstimate->insert(l1, Point2(1.0, 6.0)); // ground truth initialEstimate->insert(l2, Point2(-4.0, 0.0)); // starting with a separate reference frame - Optimizer::shared_config actual = Optimizer::optimizeLM(graph, initialEstimate); + Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initialEstimate); - simulated2D::Config expected; + simulated2D::Values expected; expected.insert(x1, pt_x1); expected.insert(l1, Point2(1.0, 6.0)); expected.insert(l2, Point2(1.0, 6.0)); @@ -255,16 +255,16 @@ TEST (testNonlinearEqualityConstraint, map_warp ) { graph->add(eq2D::PointEqualityConstraint(l1, l2)); // create an initial estimate - shared_config initialEstimate(new simulated2D::Config()); + shared_values initialEstimate(new simulated2D::Values()); initialEstimate->insert(x1, Point2( 1.0, 1.0)); initialEstimate->insert(l1, Point2( 1.0, 6.0)); initialEstimate->insert(l2, Point2(-4.0, 0.0)); // starting with a separate reference frame initialEstimate->insert(x2, Point2( 0.0, 0.0)); // other pose starts at origin // optimize - Optimizer::shared_config actual = Optimizer::optimizeLM(graph, initialEstimate); + Optimizer::shared_values actual = Optimizer::optimizeLM(graph, initialEstimate); - simulated2D::Config expected; + simulated2D::Values expected; expected.insert(x1, Point2(1.0, 1.0)); expected.insert(l1, Point2(1.0, 6.0)); expected.insert(l2, Point2(1.0, 6.0)); @@ -279,13 +279,13 @@ Cal3_S2 K(fov,w,h); boost::shared_ptr shK(new Cal3_S2(K)); // typedefs for visual SLAM example -typedef visualSLAM::Config VConfig; -typedef boost::shared_ptr shared_vconfig; +typedef visualSLAM::Values VValues; +typedef boost::shared_ptr shared_vconfig; typedef visualSLAM::Graph VGraph; -typedef NonlinearOptimizer VOptimizer; +typedef NonlinearOptimizer VOptimizer; // factors for visual slam -typedef NonlinearEquality2 Point3Equality; +typedef NonlinearEquality2 Point3Equality; /* ********************************************************************* */ TEST (testNonlinearEqualityConstraint, stereo_constrained ) { @@ -324,24 +324,24 @@ TEST (testNonlinearEqualityConstraint, stereo_constrained ) { Point3 landmark1(0.5, 5.0, 0.0); Point3 landmark2(1.5, 5.0, 0.0); - shared_vconfig initConfig(new VConfig()); - initConfig->insert(x1, pose1); - initConfig->insert(x2, pose2); - initConfig->insert(l1, landmark1); - initConfig->insert(l2, landmark2); + shared_vconfig initValues(new VValues()); + initValues->insert(x1, pose1); + initValues->insert(x2, pose2); + initValues->insert(l1, landmark1); + initValues->insert(l2, landmark2); // optimize - VOptimizer::shared_config actual = VOptimizer::optimizeLM(graph, initConfig); + VOptimizer::shared_values actual = VOptimizer::optimizeLM(graph, initValues); // create config - VConfig truthConfig; - truthConfig.insert(x1, camera1.pose()); - truthConfig.insert(x2, camera2.pose()); - truthConfig.insert(l1, landmark); - truthConfig.insert(l2, landmark); + VValues truthValues; + truthValues.insert(x1, camera1.pose()); + truthValues.insert(x2, camera2.pose()); + truthValues.insert(l1, landmark); + truthValues.insert(l2, landmark); // check if correct - CHECK(assert_equal(truthConfig, *actual, 1e-5)); + CHECK(assert_equal(truthValues, *actual, 1e-5)); } diff --git a/tests/testNonlinearFactor.cpp b/tests/testNonlinearFactor.cpp index 591d1a6a3..98fd67811 100644 --- a/tests/testNonlinearFactor.cpp +++ b/tests/testNonlinearFactor.cpp @@ -1,7 +1,7 @@ /** * @file testNonlinearFactor.cpp * @brief Unit tests for Non-Linear Factor, - * create a non linear factor graph and a configuration for it and + * create a non linear factor graph and a values structure for it and * calculate the error for the factor. * @author Christian Potthast **/ @@ -25,7 +25,7 @@ using namespace std; using namespace gtsam; using namespace example; -typedef boost::shared_ptr > shared_nlf; +typedef boost::shared_ptr > shared_nlf; /* ************************************************************************* */ TEST( NonlinearFactor, equals ) @@ -66,8 +66,8 @@ TEST( NonlinearFactor, NonlinearFactor ) // create a non linear factor graph Graph fg = createNonlinearFactorGraph(); - // create a configuration for the non linear factor graph - Config cfg = createNoisyConfig(); + // create a values structure for the non linear factor graph + Values cfg = createNoisyValues(); // get the factor "f1" from the factor graph Graph::sharedFactor factor = fg[0]; @@ -89,7 +89,7 @@ TEST( NonlinearFactor, NonlinearFactor ) /* ************************************************************************* */ TEST( NonlinearFactor, linearize_f1 ) { - Config c = createNoisyConfig(); + Values c = createNoisyValues(); // Grab a non-linear factor Graph nfg = createNonlinearFactorGraph(); @@ -111,7 +111,7 @@ TEST( NonlinearFactor, linearize_f1 ) /* ************************************************************************* */ TEST( NonlinearFactor, linearize_f2 ) { - Config c = createNoisyConfig(); + Values c = createNoisyValues(); // Grab a non-linear factor Graph nfg = createNonlinearFactorGraph(); @@ -134,7 +134,7 @@ TEST( NonlinearFactor, linearize_f3 ) Graph::sharedFactor nlf = nfg[2]; // We linearize at noisy config from SmallExample - Config c = createNoisyConfig(); + Values c = createNoisyValues(); GaussianFactor::shared_ptr actual = nlf->linearize(c, *c.orderingArbitrary()); GaussianFactorGraph lfg = createGaussianFactorGraph(*c.orderingArbitrary()); @@ -151,7 +151,7 @@ TEST( NonlinearFactor, linearize_f4 ) Graph::sharedFactor nlf = nfg[3]; // We linearize at noisy config from SmallExample - Config c = createNoisyConfig(); + Values c = createNoisyValues(); GaussianFactor::shared_ptr actual = nlf->linearize(c, *c.orderingArbitrary()); GaussianFactorGraph lfg = createGaussianFactorGraph(*c.orderingArbitrary()); @@ -166,8 +166,8 @@ TEST( NonlinearFactor, size ) // create a non linear factor graph Graph fg = createNonlinearFactorGraph(); - // create a configuration for the non linear factor graph - Config cfg = createNoisyConfig(); + // create a values structure for the non linear factor graph + Values cfg = createNoisyValues(); // get some factors from the graph Graph::sharedFactor factor1 = fg[0], factor2 = fg[1], @@ -187,7 +187,7 @@ TEST( NonlinearFactor, linearize_constraint1 ) Point2 mu(1., -1.); Graph::sharedFactor f0(new simulated2D::Prior(mu, constraint, 1)); - Config config; + Values config; config.insert(simulated2D::PoseKey(1), Point2(1.0, 2.0)); GaussianFactor::shared_ptr actual = f0->linearize(config, *config.orderingArbitrary()); @@ -207,7 +207,7 @@ TEST( NonlinearFactor, linearize_constraint2 ) Point2 z3(1.,-1.); simulated2D::Measurement f0(z3, constraint, 1,1); - Config config; + Values config; config.insert(simulated2D::PoseKey(1), Point2(1.0, 2.0)); config.insert(simulated2D::PointKey(1), Point2(5.0, 4.0)); GaussianFactor::shared_ptr actual = f0.linearize(config, *config.orderingArbitrary()); diff --git a/tests/testNonlinearFactorGraph.cpp b/tests/testNonlinearFactorGraph.cpp index 36498a4ff..a3b65e4f3 100644 --- a/tests/testNonlinearFactorGraph.cpp +++ b/tests/testNonlinearFactorGraph.cpp @@ -37,11 +37,11 @@ TEST( Graph, equals ) TEST( Graph, error ) { Graph fg = createNonlinearFactorGraph(); - Config c1 = createConfig(); + Values c1 = createValues(); double actual1 = fg.error(c1); DOUBLES_EQUAL( 0.0, actual1, 1e-9 ); - Config c2 = createNoisyConfig(); + Values c2 = createNoisyValues(); double actual2 = fg.error(c2); DOUBLES_EQUAL( 5.625, actual2, 1e-9 ); } @@ -51,7 +51,7 @@ TEST( Graph, GET_ORDERING) { Ordering expected; expected += "x1","l1","x2"; Graph nlfg = createNonlinearFactorGraph(); - Ordering actual = *nlfg.orderingCOLAMD(createNoisyConfig()).first; + Ordering actual = *nlfg.orderingCOLAMD(createNoisyValues()).first; CHECK(assert_equal(expected,actual)); } @@ -59,7 +59,7 @@ TEST( Graph, GET_ORDERING) TEST( Graph, probPrime ) { Graph fg = createNonlinearFactorGraph(); - Config cfg = createConfig(); + Values cfg = createValues(); // evaluate the probability of the factor graph double actual = fg.probPrime(cfg); @@ -71,7 +71,7 @@ TEST( Graph, probPrime ) TEST( Graph, linearize ) { Graph fg = createNonlinearFactorGraph(); - Config initial = createNoisyConfig(); + Values initial = createNoisyValues(); boost::shared_ptr linearized = fg.linearize(initial, *initial.orderingArbitrary()); GaussianFactorGraph expected = createGaussianFactorGraph(*initial.orderingArbitrary()); CHECK(assert_equal(expected,*linearized)); // Needs correct linearizations diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index 49c7c19da..f80e8335f 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -32,19 +32,19 @@ using namespace gtsam; const double tol = 1e-5; -typedef NonlinearOptimizer Optimizer; +typedef NonlinearOptimizer Optimizer; /* ************************************************************************* */ TEST( NonlinearOptimizer, linearizeAndOptimizeForDelta ) { shared_ptr fg(new example::Graph( example::createNonlinearFactorGraph())); - Optimizer::shared_config initial = example::sharedNoisyConfig(); + Optimizer::shared_values initial = example::sharedNoisyValues(); - // Expected configuration is the difference between the noisy config + // Expected values structure is the difference between the noisy config // and the ground-truth config. One step only because it's linear ! Ordering ord1; ord1 += "x2","l1","x1"; - VectorConfig expected(initial->dims(ord1)); + VectorValues expected(initial->dims(ord1)); Vector dl1(2); dl1(0) = -0.1; dl1(1) = 0.1; @@ -64,7 +64,7 @@ TEST( NonlinearOptimizer, linearizeAndOptimizeForDelta ) solver = Optimizer::shared_solver(new Optimizer::solver(Ordering::shared_ptr(new Ordering(ord1)))); Optimizer optimizer1(fg, initial, solver); - VectorConfig actual1 = optimizer1.linearizeAndOptimizeForDelta(); + VectorValues actual1 = optimizer1.linearizeAndOptimizeForDelta(); CHECK(assert_equal(actual1,expected)); // SL-FIX // Check another @@ -73,7 +73,7 @@ TEST( NonlinearOptimizer, linearizeAndOptimizeForDelta ) // solver = Optimizer::shared_solver(new Optimizer::solver(ord2)); // Optimizer optimizer2(fg, initial, solver); // -// VectorConfig actual2 = optimizer2.linearizeAndOptimizeForDelta(); +// VectorValues actual2 = optimizer2.linearizeAndOptimizeForDelta(); // CHECK(assert_equal(actual2,expected)); // // // And yet another... @@ -82,7 +82,7 @@ TEST( NonlinearOptimizer, linearizeAndOptimizeForDelta ) // solver = Optimizer::shared_solver(new Optimizer::solver(ord3)); // Optimizer optimizer3(fg, initial, solver); // -// VectorConfig actual3 = optimizer3.linearizeAndOptimizeForDelta(); +// VectorValues actual3 = optimizer3.linearizeAndOptimizeForDelta(); // CHECK(assert_equal(actual3,expected)); // // // More... @@ -91,7 +91,7 @@ TEST( NonlinearOptimizer, linearizeAndOptimizeForDelta ) // solver = Optimizer::shared_solver(new Optimizer::solver(ord4)); // Optimizer optimizer4(fg, initial, solver); // -// VectorConfig actual4 = optimizer4.linearizeAndOptimizeForDelta(); +// VectorValues actual4 = optimizer4.linearizeAndOptimizeForDelta(); // CHECK(assert_equal(actual4,expected)); } @@ -104,7 +104,7 @@ TEST( NonlinearOptimizer, iterateLM ) // config far from minimum Point2 x0(3,0); - boost::shared_ptr config(new example::Config); + boost::shared_ptr config(new example::Values); config->insert(simulated2D::PoseKey(1), x0); // ordering @@ -141,13 +141,13 @@ TEST( NonlinearOptimizer, optimize ) // test error at minimum Point2 xstar(0,0); - example::Config cstar; + example::Values cstar; cstar.insert(simulated2D::PoseKey(1), xstar); DOUBLES_EQUAL(0.0,fg->error(cstar),0.0); // test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 = Point2 x0(3,3); - boost::shared_ptr c0(new example::Config); + boost::shared_ptr c0(new example::Values); c0->insert(simulated2D::PoseKey(1), x0); DOUBLES_EQUAL(199.0,fg->error(*c0),1e-3); @@ -179,10 +179,10 @@ TEST( NonlinearOptimizer, SimpleLMOptimizer ) example::createReallyNonlinearFactorGraph())); Point2 x0(3,3); - boost::shared_ptr c0(new example::Config); + boost::shared_ptr c0(new example::Values); c0->insert(simulated2D::PoseKey(1), x0); - Optimizer::shared_config actual = Optimizer::optimizeLM(fg, c0); + Optimizer::shared_values actual = Optimizer::optimizeLM(fg, c0); DOUBLES_EQUAL(0,fg->error(*actual),tol); } @@ -192,10 +192,10 @@ TEST( NonlinearOptimizer, SimpleLMOptimizer_noshared ) example::Graph fg = example::createReallyNonlinearFactorGraph(); Point2 x0(3,3); - example::Config c0; + example::Values c0; c0.insert(simulated2D::PoseKey(1), x0); - Optimizer::shared_config actual = Optimizer::optimizeLM(fg, c0); + Optimizer::shared_values actual = Optimizer::optimizeLM(fg, c0); DOUBLES_EQUAL(0,fg.error(*actual),tol); } @@ -206,10 +206,10 @@ TEST( NonlinearOptimizer, SimpleGNOptimizer ) example::createReallyNonlinearFactorGraph())); Point2 x0(3,3); - boost::shared_ptr c0(new example::Config); + boost::shared_ptr c0(new example::Values); c0->insert(simulated2D::PoseKey(1), x0); - Optimizer::shared_config actual = Optimizer::optimizeGN(fg, c0); + Optimizer::shared_values actual = Optimizer::optimizeGN(fg, c0); DOUBLES_EQUAL(0,fg->error(*actual),tol); } @@ -219,19 +219,19 @@ TEST( NonlinearOptimizer, SimpleGNOptimizer_noshared ) example::Graph fg = example::createReallyNonlinearFactorGraph(); Point2 x0(3,3); - example::Config c0; + example::Values c0; c0.insert(simulated2D::PoseKey(1), x0); - Optimizer::shared_config actual = Optimizer::optimizeGN(fg, c0); + Optimizer::shared_values actual = Optimizer::optimizeGN(fg, c0); DOUBLES_EQUAL(0,fg.error(*actual),tol); } /* ************************************************************************* */ TEST( NonlinearOptimizer, Factorization ) { - typedef NonlinearOptimizer > Optimizer; + typedef NonlinearOptimizer > Optimizer; - boost::shared_ptr config(new Pose2Config); + boost::shared_ptr config(new Pose2Values); config->insert(1, Pose2(0.,0.,0.)); config->insert(2, Pose2(1.5,0.,0.)); @@ -240,14 +240,14 @@ TEST( NonlinearOptimizer, Factorization ) graph->addConstraint(1,2, Pose2(1.,0.,0.), noiseModel::Isotropic::Sigma(3, 1)); boost::shared_ptr ordering(new Ordering); - ordering->push_back(Pose2Config::Key(1)); - ordering->push_back(Pose2Config::Key(2)); - Optimizer::shared_solver solver(new Factorization(ordering)); + ordering->push_back(Pose2Values::Key(1)); + ordering->push_back(Pose2Values::Key(2)); + Optimizer::shared_solver solver(new Factorization(ordering)); Optimizer optimizer(graph, config, solver); Optimizer optimized = optimizer.iterateLM(); - Pose2Config expected; + Pose2Values expected; expected.insert(1, Pose2(0.,0.,0.)); expected.insert(2, Pose2(1.,0.,0.)); CHECK(assert_equal(expected, *optimized.config(), 1e-5)); @@ -257,8 +257,8 @@ TEST( NonlinearOptimizer, Factorization ) // SL-FIX TEST( NonlinearOptimizer, SubgraphSolver ) //{ // using namespace pose2SLAM; -// typedef SubgraphSolver Solver; -// typedef NonlinearOptimizer Optimizer; +// typedef SubgraphSolver Solver; +// typedef NonlinearOptimizer Optimizer; // // // Create a graph // boost::shared_ptr graph(new Graph); @@ -266,13 +266,13 @@ TEST( NonlinearOptimizer, Factorization ) // graph->addConstraint(1, 2, Pose2(1., 0., 0.), noiseModel::Isotropic::Sigma(3, 1)); // // // Create an initial config -// boost::shared_ptr config(new Config); +// boost::shared_ptr config(new Values); // config->insert(1, Pose2(0., 0., 0.)); // config->insert(2, Pose2(1.5, 0., 0.)); // // // Create solver and optimizer // Optimizer::shared_solver solver -// (new SubgraphSolver (*graph, *config)); +// (new SubgraphSolver (*graph, *config)); // Optimizer optimizer(graph, config, solver); // // // Optimize !!!! @@ -282,7 +282,7 @@ TEST( NonlinearOptimizer, Factorization ) // absoluteThreshold, Optimizer::SILENT); // // // Check solution -// Config expected; +// Values expected; // expected.insert(1, Pose2(0., 0., 0.)); // expected.insert(2, Pose2(1., 0., 0.)); // CHECK(assert_equal(expected, *optimized.config(), 1e-5)); @@ -293,9 +293,9 @@ TEST( NonlinearOptimizer, Factorization ) //{ // shared_ptr fg(new example::Graph( // example::createNonlinearFactorGraph())); -// Optimizer::shared_config initial = example::sharedNoisyConfig(); +// Optimizer::shared_values initial = example::sharedNoisyValues(); // -// Config expected; +// Values expected; // expected.insert(simulated2D::PoseKey(1), Point2(0.0, 0.0)); // expected.insert(simulated2D::PoseKey(2), Point2(1.5, 0.0)); // expected.insert(simulated2D::PointKey(1), Point2(0.0, -1.0)); @@ -308,7 +308,7 @@ TEST( NonlinearOptimizer, Factorization ) // solver = Optimizer::shared_solver(new Optimizer::solver(ord1)); // Optimizer optimizer1(fg, initial, solver); // -// Config actual = optimizer1.levenbergMarquardt(); +// Values actual = optimizer1.levenbergMarquardt(); // CHECK(assert_equal(actual,expected)); //} diff --git a/tests/testSerialization.cpp b/tests/testSerialization.cpp index 493e990f3..5360699d0 100644 --- a/tests/testSerialization.cpp +++ b/tests/testSerialization.cpp @@ -121,7 +121,7 @@ bool equalsXML(const T& input = T()) { #include #include -//#include +//#include //#include //#include @@ -155,14 +155,14 @@ TEST (Serialization, xml_geometry) { /* ************************************************************************* */ TEST (Serialization, text_linear) { // NOT WORKING: -// EXPECT(equalsObj()); +// EXPECT(equalsObj()); // EXPECT(equalsObj()); } /* ************************************************************************* */ TEST (Serialization, xml_linear) { // NOT WORKING: -// EXPECT(equalsXML()); +// EXPECT(equalsXML()); // EXPECT(equalsXML()); } @@ -170,14 +170,14 @@ TEST (Serialization, xml_linear) { TEST (Serialization, text_planar) { EXPECT(equalsObj(gtsam::planarSLAM::PoseKey(2))); EXPECT(equalsObj(gtsam::planarSLAM::PointKey(2))); - EXPECT(equalsObj()); + EXPECT(equalsObj()); } /* ************************************************************************* */ TEST (Serialization, xml_planar) { EXPECT(equalsXML(gtsam::planarSLAM::PoseKey(2))); EXPECT(equalsXML(gtsam::planarSLAM::PointKey(2))); - EXPECT(equalsXML()); + EXPECT(equalsXML()); } /* ************************************************************************* */ diff --git a/tests/testSubgraphPreconditioner.cpp b/tests/testSubgraphPreconditioner.cpp index ae6acda57..d9940502a 100644 --- a/tests/testSubgraphPreconditioner.cpp +++ b/tests/testSubgraphPreconditioner.cpp @@ -28,7 +28,7 @@ TEST( SubgraphPreconditioner, planarGraph ) { // Check planar graph construction GaussianFactorGraph A; - VectorConfig xtrue; + VectorValues xtrue; boost::tie(A, xtrue) = planarGraph(3); LONGS_EQUAL(13,A.size()); LONGS_EQUAL(9,xtrue.size()); @@ -41,7 +41,7 @@ TEST( SubgraphPreconditioner, planarGraph ) // Check that xtrue is optimal GaussianBayesNet R1 = A.eliminate(ordering); - VectorConfig actual = optimize(R1); + VectorValues actual = optimize(R1); CHECK(assert_equal(xtrue,actual)); } @@ -50,7 +50,7 @@ TEST( SubgraphPreconditioner, splitOffPlanarTree ) { // Build a planar graph GaussianFactorGraph A; - VectorConfig xtrue; + VectorValues xtrue; boost::tie(A, xtrue) = planarGraph(3); // Get the spanning tree and constraints, and check their sizes @@ -62,7 +62,7 @@ TEST( SubgraphPreconditioner, splitOffPlanarTree ) // Check that the tree can be solved to give the ground xtrue Ordering ordering = planarOrdering(3); GaussianBayesNet R1 = T.eliminate(ordering); - VectorConfig xbar = optimize(R1); + VectorValues xbar = optimize(R1); CHECK(assert_equal(xtrue,xbar)); } @@ -71,7 +71,7 @@ TEST( SubgraphPreconditioner, system ) { // Build a planar graph GaussianFactorGraph Ab; - VectorConfig xtrue; + VectorValues xtrue; size_t N = 3; boost::tie(Ab, xtrue) = planarGraph(N); // A*x-b @@ -84,23 +84,23 @@ TEST( SubgraphPreconditioner, system ) // Eliminate the spanning tree to build a prior Ordering ordering = planarOrdering(N); SubgraphPreconditioner::sharedBayesNet Rc1 = Ab1_.eliminate_(ordering); // R1*x-c1 - SubgraphPreconditioner::sharedConfig xbar = optimize_(*Rc1); // xbar = inv(R1)*c1 + SubgraphPreconditioner::sharedValues xbar = optimize_(*Rc1); // xbar = inv(R1)*c1 // Create Subgraph-preconditioned system SubgraphPreconditioner system(Ab1, Ab2, Rc1, xbar); // Create zero config - VectorConfig zeros = VectorConfig::zero(*xbar); + VectorValues zeros = VectorValues::zero(*xbar); // Set up y0 as all zeros - VectorConfig y0 = zeros; + VectorValues y0 = zeros; // y1 = perturbed y0 - VectorConfig y1 = zeros; + VectorValues y1 = zeros; y1["x2003"] = Vector_(2, 1.0, -1.0); // Check corresponding x values - VectorConfig expected_x1 = xtrue, x1 = system.x(y1); + VectorValues expected_x1 = xtrue, x1 = system.x(y1); expected_x1["x2003"] = Vector_(2, 2.01, 2.99); expected_x1["x3003"] = Vector_(2, 3.01, 2.99); CHECK(assert_equal(xtrue, system.x(y0))); @@ -113,8 +113,8 @@ TEST( SubgraphPreconditioner, system ) DOUBLES_EQUAL(3,system.error(y1),1e-9); // Test gradient in x - VectorConfig expected_gx0 = zeros; - VectorConfig expected_gx1 = zeros; + VectorValues expected_gx0 = zeros; + VectorValues expected_gx1 = zeros; CHECK(assert_equal(expected_gx0,Ab.gradient(xtrue))); expected_gx1["x1003"] = Vector_(2, -100., 100.); expected_gx1["x2002"] = Vector_(2, -100., 100.); @@ -124,8 +124,8 @@ TEST( SubgraphPreconditioner, system ) CHECK(assert_equal(expected_gx1,Ab.gradient(x1))); // Test gradient in y - VectorConfig expected_gy0 = zeros; - VectorConfig expected_gy1 = zeros; + VectorValues expected_gy0 = zeros; + VectorValues expected_gy1 = zeros; expected_gy1["x1003"] = Vector_(2, 2., -2.); expected_gy1["x2002"] = Vector_(2, -2., 2.); expected_gy1["x2003"] = Vector_(2, 3., -3.); @@ -136,7 +136,7 @@ TEST( SubgraphPreconditioner, system ) // Check it numerically for good measure // TODO use boost::bind(&SubgraphPreconditioner::error,&system,_1) -// Vector numerical_g1 = numericalGradient (error, y1, 0.001); +// Vector numerical_g1 = numericalGradient (error, y1, 0.001); // Vector expected_g1 = Vector_(18, 0., 0., 0., 0., 2., -2., 0., 0., -2., 2., // 3., -3., 0., 0., -1., 1., 1., -1.); // CHECK(assert_equal(expected_g1,numerical_g1)); @@ -147,7 +147,7 @@ TEST( SubgraphPreconditioner, conjugateGradients ) { // Build a planar graph GaussianFactorGraph Ab; - VectorConfig xtrue; + VectorValues xtrue; size_t N = 3; boost::tie(Ab, xtrue) = planarGraph(N); // A*x-b @@ -160,28 +160,28 @@ TEST( SubgraphPreconditioner, conjugateGradients ) // Eliminate the spanning tree to build a prior Ordering ordering = planarOrdering(N); SubgraphPreconditioner::sharedBayesNet Rc1 = Ab1_.eliminate_(ordering); // R1*x-c1 - SubgraphPreconditioner::sharedConfig xbar = optimize_(*Rc1); // xbar = inv(R1)*c1 + SubgraphPreconditioner::sharedValues xbar = optimize_(*Rc1); // xbar = inv(R1)*c1 // Create Subgraph-preconditioned system SubgraphPreconditioner system(Ab1, Ab2, Rc1, xbar); // Create zero config y0 and perturbed config y1 - VectorConfig y0 = VectorConfig::zero(*xbar); + VectorValues y0 = VectorValues::zero(*xbar); - VectorConfig y1 = y0; + VectorValues y1 = y0; y1["x2003"] = Vector_(2, 1.0, -1.0); - VectorConfig x1 = system.x(y1); + VectorValues x1 = system.x(y1); // Solve for the remaining constraints using PCG bool verbose = false; double epsilon = 1e-3; size_t maxIterations = 100; - VectorConfig actual = gtsam::conjugateGradients(system, y1, verbose, epsilon, epsilon, maxIterations); + VectorValues actual = gtsam::conjugateGradients(system, y1, verbose, epsilon, epsilon, maxIterations); CHECK(assert_equal(y0,actual)); // Compare with non preconditioned version: - VectorConfig actual2 = conjugateGradientDescent(Ab, x1, verbose, epsilon, + VectorValues actual2 = conjugateGradientDescent(Ab, x1, verbose, epsilon, epsilon, maxIterations); CHECK(assert_equal(xtrue,actual2,1e-4)); } diff --git a/tests/testTransformConstraint.cpp b/tests/testTransformConstraint.cpp index 180d42582..ddfac5760 100644 --- a/tests/testTransformConstraint.cpp +++ b/tests/testTransformConstraint.cpp @@ -31,19 +31,19 @@ typedef TypedSymbol PoseKey; typedef TypedSymbol PointKey; typedef TypedSymbol TransformKey; -typedef LieValues PoseConfig; -typedef LieValues PointConfig; -typedef LieValues TransformConfig; +typedef LieValues PoseValues; +typedef LieValues PointValues; +typedef LieValues TransformValues; -typedef TupleValues3< PoseConfig, PointConfig, TransformConfig > DDFConfig; -typedef NonlinearFactorGraph DDFGraph; -typedef NonlinearOptimizer Optimizer; +typedef TupleValues3< PoseValues, PointValues, TransformValues > DDFValues; +typedef NonlinearFactorGraph DDFGraph; +typedef NonlinearOptimizer Optimizer; -typedef NonlinearEquality PoseConstraint; -typedef NonlinearEquality PointConstraint; -typedef NonlinearEquality TransformPriorConstraint; +typedef NonlinearEquality PoseConstraint; +typedef NonlinearEquality PointConstraint; +typedef NonlinearEquality TransformPriorConstraint; -typedef TransformConstraint PointTransformConstraint; +typedef TransformConstraint PointTransformConstraint; PointKey lA1(1), lA2(2), lB1(3); TransformKey t1(1); @@ -160,7 +160,7 @@ TEST( TransformConstraint, converge_trans ) { graph.add(PointConstraint(globalK2, global2, error_gain)); // create initial estimate - DDFConfig init; + DDFValues init; init.insert(localK1, local1); init.insert(localK2, local2); init.insert(globalK1, global1); @@ -168,9 +168,9 @@ TEST( TransformConstraint, converge_trans ) { init.insert(transK, trans); // optimize - Optimizer::shared_config actual = Optimizer::optimizeLM(graph, init); + Optimizer::shared_values actual = Optimizer::optimizeLM(graph, init); - DDFConfig expected; + DDFValues expected; expected.insert(localK1, local1); expected.insert(localK2, local2); expected.insert(globalK1, global1); @@ -208,13 +208,13 @@ TEST( TransformConstraint, converge_local ) { graph.add(TransformPriorConstraint(transK, trans, error_gain)); // create initial estimate - DDFConfig init; + DDFValues init; init.insert(localK, local); init.insert(globalK, global); init.insert(transK, trans); // optimize - Optimizer::shared_config actual = Optimizer::optimizeLM(graph, init); + Optimizer::shared_values actual = Optimizer::optimizeLM(graph, init); CHECK(assert_equal(idealLocal, actual->at(localK), 1e-5)); } @@ -246,13 +246,13 @@ TEST( TransformConstraint, converge_global ) { graph.add(TransformPriorConstraint(transK, trans, error_gain)); // create initial estimate - DDFConfig init; + DDFValues init; init.insert(localK, local); init.insert(globalK, global); init.insert(transK, trans); // optimize - Optimizer::shared_config actual = Optimizer::optimizeLM(graph, init); + Optimizer::shared_values actual = Optimizer::optimizeLM(graph, init); // verify CHECK(assert_equal(idealForeign, actual->at(globalK), 1e-5)); diff --git a/tests/testTupleValues.cpp b/tests/testTupleValues.cpp index f19395f36..d200a801c 100644 --- a/tests/testTupleValues.cpp +++ b/tests/testTupleValues.cpp @@ -27,9 +27,9 @@ static const double tol = 1e-5; typedef TypedSymbol PoseKey; typedef TypedSymbol PointKey; -typedef LieValues PoseConfig; -typedef LieValues PointConfig; -typedef TupleValues2 Config; +typedef LieValues PoseValues; +typedef LieValues PointValues; +typedef TupleValues2 Values; /* ************************************************************************* */ TEST( TupleValues, constructors ) @@ -37,14 +37,14 @@ TEST( TupleValues, constructors ) Pose2 x1(1,2,3), x2(6,7,8); Point2 l1(4,5), l2(9,10); - Config::Config1 cfg1; + Values::Values1 cfg1; cfg1.insert(PoseKey(1), x1); cfg1.insert(PoseKey(2), x2); - Config::Config2 cfg2; + Values::Values2 cfg2; cfg2.insert(PointKey(1), l1); cfg2.insert(PointKey(2), l2); - Config actual(cfg1, cfg2), expected; + Values actual(cfg1, cfg2), expected; expected.insert(PoseKey(1), x1); expected.insert(PoseKey(2), x2); expected.insert(PointKey(1), l1); @@ -59,13 +59,13 @@ TEST( TupleValues, insert_equals1 ) Pose2 x1(1,2,3), x2(6,7,8); Point2 l1(4,5), l2(9,10); - Config expected; + Values expected; expected.insert(PoseKey(1), x1); expected.insert(PoseKey(2), x2); expected.insert(PointKey(1), l1); expected.insert(PointKey(2), l2); - Config actual; + Values actual; actual.insert(PoseKey(1), x1); actual.insert(PoseKey(2), x2); actual.insert(PointKey(1), l1); @@ -80,13 +80,13 @@ TEST( TupleValues, insert_equals2 ) Pose2 x1(1,2,3), x2(6,7,8); Point2 l1(4,5), l2(9,10); - Config config1; + Values config1; config1.insert(PoseKey(1), x1); config1.insert(PoseKey(2), x2); config1.insert(PointKey(1), l1); config1.insert(PointKey(2), l2); - Config config2; + Values config2; config2.insert(PoseKey(1), x1); config2.insert(PoseKey(2), x2); config2.insert(PointKey(1), l1); @@ -104,7 +104,7 @@ TEST( TupleValues, insert_duplicate ) Pose2 x1(1,2,3), x2(6,7,8); Point2 l1(4,5), l2(9,10); - Config config1; + Values config1; config1.insert(1, x1); // 3 config1.insert(2, x2); // 6 config1.insert(1, l1); // 8 @@ -122,7 +122,7 @@ TEST( TupleValues, size_dim ) Pose2 x1(1,2,3), x2(6,7,8); Point2 l1(4,5), l2(9,10); - Config config1; + Values config1; config1.insert(PoseKey(1), x1); config1.insert(PoseKey(2), x2); config1.insert(PointKey(1), l1); @@ -138,7 +138,7 @@ TEST(TupleValues, at) Pose2 x1(1,2,3), x2(6,7,8); Point2 l1(4,5), l2(9,10); - Config config1; + Values config1; config1.insert(PoseKey(1), x1); config1.insert(PoseKey(2), x2); config1.insert(PointKey(1), l1); @@ -159,14 +159,14 @@ TEST(TupleValues, zero_expmap_logmap) Pose2 x1(1,2,3), x2(6,7,8); Point2 l1(4,5), l2(9,10); - Config config1; + Values config1; config1.insert(PoseKey(1), x1); config1.insert(PoseKey(2), x2); config1.insert(PointKey(1), l1); config1.insert(PointKey(2), l2); Ordering o; o += "x1", "x2", "l1", "l2"; - VectorConfig expected_zero(config1.dims(o)); + VectorValues expected_zero(config1.dims(o)); expected_zero[o["x1"]] = zero(3); expected_zero[o["x2"]] = zero(3); expected_zero[o["l1"]] = zero(2); @@ -174,24 +174,24 @@ TEST(TupleValues, zero_expmap_logmap) CHECK(assert_equal(expected_zero, config1.zero(o))); - VectorConfig delta(config1.dims(o)); + VectorValues delta(config1.dims(o)); delta[o["x1"]] = Vector_(3, 1.0, 1.1, 1.2); delta[o["x2"]] = Vector_(3, 1.3, 1.4, 1.5); delta[o["l1"]] = Vector_(2, 1.0, 1.1); delta[o["l2"]] = Vector_(2, 1.3, 1.4); - Config expected; + Values expected; expected.insert(PoseKey(1), x1.expmap(Vector_(3, 1.0, 1.1, 1.2))); expected.insert(PoseKey(2), x2.expmap(Vector_(3, 1.3, 1.4, 1.5))); expected.insert(PointKey(1), Point2(5.0, 6.1)); expected.insert(PointKey(2), Point2(10.3, 11.4)); - Config actual = config1.expmap(delta, o); + Values actual = config1.expmap(delta, o); CHECK(assert_equal(expected, actual)); // Check log - VectorConfig expected_log = delta; - VectorConfig actual_log = config1.logmap(actual, o); + VectorValues expected_log = delta; + VectorValues actual_log = config1.logmap(actual, o); CHECK(assert_equal(expected_log, actual_log)); } @@ -204,20 +204,20 @@ typedef TypedSymbol Point3Key; typedef TypedSymbol Point3Key2; // some config types -typedef LieValues PoseConfig; -typedef LieValues PointConfig; -typedef LieValues LamConfig; -typedef LieValues Pose3Config; -typedef LieValues Point3Config; -typedef LieValues Point3Config2; +typedef LieValues PoseValues; +typedef LieValues PointValues; +typedef LieValues LamValues; +typedef LieValues Pose3Values; +typedef LieValues Point3Values; +typedef LieValues Point3Values2; // some TupleValues types -typedef TupleValues > ConfigA; -typedef TupleValues > > ConfigB; +typedef TupleValues > ValuesA; +typedef TupleValues > > ValuesB; -typedef TupleValues1 TuplePoseConfig; -typedef TupleValues1 TuplePointConfig; -typedef TupleValues2 SimpleConfig; +typedef TupleValues1 TuplePoseValues; +typedef TupleValues1 TuplePointValues; +typedef TupleValues2 SimpleValues; /* ************************************************************************* */ TEST(TupleValues, slicing) { @@ -226,37 +226,37 @@ TEST(TupleValues, slicing) { PoseKey x1(1), x2(2); Pose2 x1_val(1.0, 2.0, 0.3), x2_val(3.0, 4.0, 0.4); - PoseConfig liePoseConfig; - liePoseConfig.insert(x1, x1_val); - liePoseConfig.insert(x2, x2_val); + PoseValues liePoseValues; + liePoseValues.insert(x1, x1_val); + liePoseValues.insert(x2, x2_val); - PointConfig liePointConfig; - liePointConfig.insert(l1, l1_val); - liePointConfig.insert(l2, l2_val); + PointValues liePointValues; + liePointValues.insert(l1, l1_val); + liePointValues.insert(l2, l2_val); // construct TupleValues1 from the base config - TuplePoseConfig tupPoseConfig1(liePoseConfig); - EXPECT(assert_equal(liePoseConfig, tupPoseConfig1.first(), tol)); + TuplePoseValues tupPoseValues1(liePoseValues); + EXPECT(assert_equal(liePoseValues, tupPoseValues1.first(), tol)); - TuplePointConfig tupPointConfig1(liePointConfig); - EXPECT(assert_equal(liePointConfig, tupPointConfig1.first(), tol)); + TuplePointValues tupPointValues1(liePointValues); + EXPECT(assert_equal(liePointValues, tupPointValues1.first(), tol)); // // construct a TupleValues2 from a TupleValues1 -// SimpleConfig pairConfig1(tupPoseConfig1); -// EXPECT(assert_equal(liePoseConfig, pairConfig1.first(), tol)); -// EXPECT(pairConfig1.second().empty()); +// SimpleValues pairValues1(tupPoseValues1); +// EXPECT(assert_equal(liePoseValues, pairValues1.first(), tol)); +// EXPECT(pairValues1.second().empty()); // -// SimpleConfig pairConfig2(tupPointConfig1); -// EXPECT(assert_equal(liePointConfig, pairConfig2.second(), tol)); -// EXPECT(pairConfig1.first().empty()); +// SimpleValues pairValues2(tupPointValues1); +// EXPECT(assert_equal(liePointValues, pairValues2.second(), tol)); +// EXPECT(pairValues1.first().empty()); } /* ************************************************************************* */ TEST(TupleValues, basic_functions) { // create some tuple configs - ConfigA configA; - ConfigB configB; + ValuesA configA; + ValuesB configB; PoseKey x1(1); PointKey l1(1); @@ -326,7 +326,7 @@ TEST(TupleValues, basic_functions) { /* ************************************************************************* */ TEST(TupleValues, insert_config) { - ConfigB config1, config2, expected; + ValuesB config1, config2, expected; PoseKey x1(1), x2(2); PointKey l1(1), l2(2); @@ -358,7 +358,7 @@ TEST(TupleValues, insert_config) { /* ************************************************************************* */ TEST( TupleValues, update_element ) { - TupleValues2 cfg; + TupleValues2 cfg; Pose2 x1(2.0, 1.0, 2.0), x2(3.0, 4.0, 5.0); Point2 l1(1.0, 2.0), l2(3.0, 4.0); PoseKey xk(1); @@ -389,7 +389,7 @@ TEST( TupleValues, equals ) Point2 l1(4,5), l2(9,10); PointKey l1k(1), l2k(2); - ConfigA config1, config2, config3, config4, config5; + ValuesA config1, config2, config3, config4, config5; config1.insert(x1k, x1); config1.insert(x2k, x2); @@ -409,7 +409,7 @@ TEST( TupleValues, equals ) config4.insert(l1k, l1); config4.insert(l2k, l2); - ConfigA config6(config1); + ValuesA config6(config1); EXPECT(assert_equal(config1,config2)); EXPECT(assert_equal(config1,config1)); @@ -429,19 +429,19 @@ TEST(TupleValues, expmap) Ordering o; o += "x1", "x2", "l1", "l2"; - ConfigA config1; + ValuesA config1; config1.insert(x1k, x1); config1.insert(x2k, x2); config1.insert(l1k, l1); config1.insert(l2k, l2); - VectorConfig delta(config1.dims(o)); + VectorValues delta(config1.dims(o)); delta[o["x1"]] = Vector_(3, 1.0, 1.1, 1.2); delta[o["x2"]] = Vector_(3, 1.3, 1.4, 1.5); delta[o["l1"]] = Vector_(2, 1.0, 1.1); delta[o["l2"]] = Vector_(2, 1.3, 1.4); - ConfigA expected; + ValuesA expected; expected.insert(x1k, x1.expmap(Vector_(3, 1.0, 1.1, 1.2))); expected.insert(x2k, x2.expmap(Vector_(3, 1.3, 1.4, 1.5))); expected.insert(l1k, Point2(5.0, 6.1)); @@ -461,13 +461,13 @@ TEST(TupleValues, expmap_typedefs) Ordering o; o += "x1", "x2", "l1", "l2"; - TupleValues2 config1, expected, actual; + TupleValues2 config1, expected, actual; config1.insert(x1k, x1); config1.insert(x2k, x2); config1.insert(l1k, l1); config1.insert(l2k, l2); - VectorConfig delta(config1.dims(o)); + VectorValues delta(config1.dims(o)); delta[o["x1"]] = Vector_(3, 1.0, 1.1, 1.2); delta[o["x2"]] = Vector_(3, 1.3, 1.4, 1.5); delta[o["l1"]] = Vector_(2, 1.0, 1.1); @@ -478,18 +478,18 @@ TEST(TupleValues, expmap_typedefs) expected.insert(l1k, Point2(5.0, 6.1)); expected.insert(l2k, Point2(10.3, 11.4)); - CHECK(assert_equal(expected, TupleValues2(config1.expmap(delta, o)))); + CHECK(assert_equal(expected, TupleValues2(config1.expmap(delta, o)))); //CHECK(assert_equal(delta, config1.logmap(expected))); } /* ************************************************************************* */ TEST(TupleValues, typedefs) { - TupleValues2 config1; - TupleValues3 config2; - TupleValues4 config3; - TupleValues5 config4; - TupleValues6 config5; + TupleValues2 config1; + TupleValues3 config2; + TupleValues4 config3; + TupleValues5 config4; + TupleValues6 config5; } /* ************************************************************************* */ @@ -502,12 +502,12 @@ TEST( TupleValues, pairconfig_style ) Point2 point1(2.0, 3.0); LieVector lam1 = LieVector(2.3); - PoseConfig config1; config1.insert(x1, pose1); - PointConfig config2; config2.insert(l1, point1); - LamConfig config3; config3.insert(L1, lam1); + PoseValues config1; config1.insert(x1, pose1); + PointValues config2; config2.insert(l1, point1); + LamValues config3; config3.insert(L1, lam1); // Constructor - TupleValues3 config(config1, config2, config3); + TupleValues3 config(config1, config2, config3); // access CHECK(assert_equal(config1, config.first())); @@ -518,7 +518,7 @@ TEST( TupleValues, pairconfig_style ) /* ************************************************************************* */ TEST(TupleValues, insert_config_typedef) { - TupleValues4 config1, config2, expected; + TupleValues4 config1, config2, expected; PoseKey x1(1), x2(2); PointKey l1(1), l2(2); @@ -549,7 +549,7 @@ TEST(TupleValues, insert_config_typedef) { /* ************************************************************************* */ TEST(TupleValues, partial_insert) { - TupleValues3 init, expected; + TupleValues3 init, expected; PoseKey x1(1), x2(2); PointKey l1(1), l2(2); @@ -562,7 +562,7 @@ TEST(TupleValues, partial_insert) { init.insert(l1, point1); init.insert(L1, lam1); - PoseConfig cfg1; + PoseValues cfg1; cfg1.insert(x2, pose2); init.insertSub(cfg1); @@ -577,7 +577,7 @@ TEST(TupleValues, partial_insert) { /* ************************************************************************* */ TEST(TupleValues, update) { - TupleValues3 init, superset, expected; + TupleValues3 init, superset, expected; PoseKey x1(1), x2(2); PointKey l1(1), l2(2); diff --git a/tests/timeGaussianFactorGraph.cpp b/tests/timeGaussianFactorGraph.cpp index e97258bc2..759fd785f 100644 --- a/tests/timeGaussianFactorGraph.cpp +++ b/tests/timeGaussianFactorGraph.cpp @@ -34,10 +34,10 @@ double timeKalmanSmoother(int T) { // Create a planar factor graph and optimize // todo: use COLAMD ordering again (removed when linear baked-in ordering added) double timePlanarSmoother(int N, bool old = true) { - boost::tuple pg = planarGraph(N); + boost::tuple pg = planarGraph(N); GaussianFactorGraph& fg(pg.get<0>()); // Ordering& ordering(pg.get<1>()); -// VectorConfig& config(pg.get<2>()); +// VectorValues& config(pg.get<2>()); clock_t start = clock(); optimize(*Inference::Eliminate(fg)); clock_t end = clock (); @@ -49,10 +49,10 @@ double timePlanarSmoother(int N, bool old = true) { // Create a planar factor graph and eliminate // todo: use COLAMD ordering again (removed when linear baked-in ordering added) double timePlanarSmootherEliminate(int N, bool old = true) { - boost::tuple pg = planarGraph(N); + boost::tuple pg = planarGraph(N); GaussianFactorGraph& fg(pg.get<0>()); // Ordering& ordering(pg.get<1>()); -// VectorConfig& config(pg.get<2>()); +// VectorValues& config(pg.get<2>()); clock_t start = clock(); Inference::Eliminate(fg); clock_t end = clock (); @@ -65,7 +65,7 @@ double timePlanarSmootherEliminate(int N, bool old = true) { //// This variation uses the original join factors approach //double timePlanarSmootherJoinAug(int N, size_t reps) { // GaussianFactorGraph fgBase; -// VectorConfig config; +// VectorValues config; // boost::tie(fgBase,config) = planarGraph(N); // Ordering ordering = fgBase.getOrdering(); // Symbol key = ordering.front(); @@ -98,7 +98,7 @@ double timePlanarSmootherEliminate(int N, bool old = true) { //// This variation uses the single-allocate version to create the matrix //double timePlanarSmootherCombined(int N, size_t reps) { // GaussianFactorGraph fgBase; -// VectorConfig config; +// VectorValues config; // boost::tie(fgBase,config) = planarGraph(N); // Ordering ordering = fgBase.getOrdering(); // Symbol key = ordering.front(); diff --git a/tests/timeLinearOnDataset.cpp b/tests/timeLinearOnDataset.cpp index 4bbf4cb74..1d7906e6f 100644 --- a/tests/timeLinearOnDataset.cpp +++ b/tests/timeLinearOnDataset.cpp @@ -22,7 +22,7 @@ int main(int argc, char *argv[]) { else datasetname = "intel"; - pair, shared_ptr > data = load2D(dataset(datasetname)); + pair, shared_ptr > data = load2D(dataset(datasetname)); tic("Z 1 order"); Ordering::shared_ptr ordering(data.first->orderingCOLAMD(*data.second).first); @@ -36,7 +36,7 @@ int main(int argc, char *argv[]) { tic("Z 3 solve"); GaussianJunctionTree gjt(*gfg); - VectorConfig soln(gjt.optimize()); + VectorValues soln(gjt.optimize()); toc("Z 3 solve"); tictoc_print();