diff --git a/.cproject b/.cproject
index 95c0f2a96..df6fdc5f5 100644
--- a/.cproject
+++ b/.cproject
@@ -309,14 +309,6 @@
true
true
-
- make
- -j2
- testGaussianFactor.run
- true
- true
- true
-
make
-j2
@@ -343,6 +335,7 @@
make
+
tests/testBayesTree.run
true
false
@@ -350,6 +343,7 @@
make
+
testBinaryBayesNet.run
true
false
@@ -397,6 +391,7 @@
make
+
testSymbolicBayesNet.run
true
false
@@ -404,6 +399,7 @@
make
+
tests/testSymbolicFactor.run
true
false
@@ -411,6 +407,7 @@
make
+
testSymbolicFactorGraph.run
true
false
@@ -426,11 +423,20 @@
make
+
tests/testBayesTree
true
false
true
+
+ make
+ -j2
+ testGaussianFactor.run
+ true
+ true
+ true
+
make
-j5
@@ -519,22 +525,6 @@
false
true
-
- make
- -j2
- all
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
make
-j2
@@ -551,6 +541,22 @@
true
true
+
+ make
+ -j2
+ all
+ true
+ true
+ true
+
+
+ make
+ -j2
+ clean
+ true
+ true
+ true
+
make
-j2
@@ -575,26 +581,26 @@
true
true
-
+
make
- -j5
- testValues.run
+ -j2
+ all
true
true
true
-
+
make
- -j5
- testOrdering.run
+ -j2
+ check
true
true
true
-
+
make
- -j5
- testKey.run
+ -j2
+ clean
true
true
true
@@ -679,26 +685,26 @@
true
true
-
+
make
- -j2
- all
+ -j5
+ testValues.run
true
true
true
-
+
make
- -j2
- check
+ -j5
+ testOrdering.run
true
true
true
-
+
make
- -j2
- clean
+ -j5
+ testKey.run
true
true
true
@@ -985,6 +991,7 @@
make
+
testGraph.run
true
false
@@ -992,6 +999,7 @@
make
+
testJunctionTree.run
true
false
@@ -999,6 +1007,7 @@
make
+
testSymbolicBayesNetB.run
true
false
@@ -1028,6 +1037,30 @@
true
true
+
+ make
+ -j5
+ testIterative.run
+ true
+ true
+ true
+
+
+ make
+ -j5
+ testSubgraphSolver.run
+ true
+ true
+ true
+
+
+ make
+ -j5
+ testGaussianFactorGraphB.run
+ true
+ true
+ true
+
make
-j5
@@ -1134,6 +1167,7 @@
make
+
testErrors.run
true
false
@@ -1179,10 +1213,10 @@
true
true
-
+
make
- -j2
- testGaussianFactor.run
+ -j5
+ testLinearContainerFactor.run
true
true
true
@@ -1267,10 +1301,10 @@
true
true
-
+
make
- -j5
- testLinearContainerFactor.run
+ -j2
+ testGaussianFactor.run
true
true
true
@@ -1605,7 +1639,6 @@
make
-
testSimulated2DOriented.run
true
false
@@ -1645,7 +1678,6 @@
make
-
testSimulated2D.run
true
false
@@ -1653,7 +1685,6 @@
make
-
testSimulated3D.run
true
false
@@ -1845,7 +1876,6 @@
make
-
tests/testGaussianISAM2
true
false
@@ -1867,102 +1897,6 @@
true
true
-
- make
- -j2
- testRot3.run
- true
- true
- true
-
-
- make
- -j2
- testRot2.run
- true
- true
- true
-
-
- make
- -j2
- testPose3.run
- true
- true
- true
-
-
- make
- -j2
- timeRot3.run
- true
- true
- true
-
-
- make
- -j2
- testPose2.run
- true
- true
- true
-
-
- make
- -j2
- testCal3_S2.run
- true
- true
- true
-
-
- make
- -j2
- testSimpleCamera.run
- true
- true
- true
-
-
- make
- -j2
- testHomography2.run
- true
- true
- true
-
-
- make
- -j2
- testCalibratedCamera.run
- true
- true
- true
-
-
- make
- -j2
- check
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -j2
- testPoint2.run
- true
- true
- true
-
make
-j1
@@ -2164,6 +2098,7 @@
cpack
+
-G DEB
true
false
@@ -2171,6 +2106,7 @@
cpack
+
-G RPM
true
false
@@ -2178,6 +2114,7 @@
cpack
+
-G TGZ
true
false
@@ -2185,6 +2122,7 @@
cpack
+
--config CPackSourceConfig.cmake
true
false
@@ -2318,34 +2256,98 @@
true
true
-
+
make
- -j5
- testSpirit.run
+ -j2
+ testRot3.run
true
true
true
-
+
make
- -j5
- testWrap.run
+ -j2
+ testRot2.run
true
true
true
-
+
make
- -j5
- check.wrap
+ -j2
+ testPose3.run
true
true
true
-
+
make
- -j5
- wrap
+ -j2
+ timeRot3.run
+ true
+ true
+ true
+
+
+ make
+ -j2
+ testPose2.run
+ true
+ true
+ true
+
+
+ make
+ -j2
+ testCal3_S2.run
+ true
+ true
+ true
+
+
+ make
+ -j2
+ testSimpleCamera.run
+ true
+ true
+ true
+
+
+ make
+ -j2
+ testHomography2.run
+ true
+ true
+ true
+
+
+ make
+ -j2
+ testCalibratedCamera.run
+ true
+ true
+ true
+
+
+ make
+ -j2
+ check
+ true
+ true
+ true
+
+
+ make
+ -j2
+ clean
+ true
+ true
+ true
+
+
+ make
+ -j2
+ testPoint2.run
true
true
true
@@ -2389,6 +2391,38 @@
false
true
+
+ make
+ -j5
+ testSpirit.run
+ true
+ true
+ true
+
+
+ make
+ -j5
+ testWrap.run
+ true
+ true
+ true
+
+
+ make
+ -j5
+ check.wrap
+ true
+ true
+ true
+
+
+ make
+ -j5
+ wrap
+ true
+ true
+ true
+
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 85984d7e0..5c1c5cdab 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -161,7 +161,7 @@ if (DOXYGEN_FOUND)
add_subdirectory(doc)
endif()
-# Set up CPack
+## Set up CPack
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "GTSAM")
set(CPACK_PACKAGE_VENDOR "Frank Dellaert, Georgia Institute of Technology")
set(CPACK_PACKAGE_CONTACT "Frank Dellaert, dellaert@cc.gatech.edu")
@@ -171,12 +171,18 @@ set(CPACK_PACKAGE_VERSION_MAJOR ${GTSAM_VERSION_MAJOR})
set(CPACK_PACKAGE_VERSION_MINOR ${GTSAM_VERSION_MINOR})
set(CPACK_PACKAGE_VERSION_PATCH ${GTSAM_VERSION_PATCH})
set(CPACK_PACKAGE_INSTALL_DIRECTORY "CMake ${CMake_VERSION_MAJOR}.${CMake_VERSION_MINOR}")
-set(CPACK_INSTALLED_DIRECTORIES "doc;.") # Include doc directory
-set(CPACK_SOURCE_IGNORE_FILES "/build;/\\\\.;/makestats.sh$")
+#set(CPACK_INSTALLED_DIRECTORIES "doc;.") # Include doc directory
+#set(CPACK_INSTALLED_DIRECTORIES ".") # FIXME: throws error
+set(CPACK_SOURCE_IGNORE_FILES "/build*;/\\\\.;/makestats.sh$")
set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/gtsam_unstable/")
+set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/package_scripts/")
set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
#set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-aspn${GTSAM_VERSION_PATCH}") # Used for creating ASPN tarballs
+# Deb-package specific cpack
+set(CPACK_DEBIAN_PACKAGE_NAME "libgtsam-dev")
+set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.43)") #Example: "libc6 (>= 2.3.1-6), libgcc1 (>= 1:3.4.2-12)")
+
# print configuration variables
message(STATUS "===============================================================")
message(STATUS "================ Configuration Options ======================")
diff --git a/README b/README
index 1a992292a..d7486c8b8 100644
--- a/README
+++ b/README
@@ -59,10 +59,12 @@ Tested compilers
- GCC 4.2-4.7
- Clang 2.9-3.2
- OSX GCC 4.2
+ - MSVC 2010, 2012
Tested systems:
- Ubuntu 11.04, 11.10, 12.04
- MacOS 10.6, 10.7
+ - Windows 7
2)
GTSAM makes extensive use of debug assertions, and we highly recommend you work
@@ -72,8 +74,8 @@ will run up to 10x faster in Release mode! See the end of this document for
additional debugging tips.
3)
-GTSAM has Doxygen documentation. To generate, run the the script
-makedoc.sh.
+GTSAM has Doxygen documentation. To generate, run 'make doc' from your
+build directory.
4)
The instructions below install the library to the default system install path and
diff --git a/examples/Pose2SLAMwSPCG.cpp b/examples/Pose2SLAMwSPCG.cpp
index 240361db7..36f2552a1 100644
--- a/examples/Pose2SLAMwSPCG.cpp
+++ b/examples/Pose2SLAMwSPCG.cpp
@@ -54,13 +54,9 @@
// for each variable, held in a Values container.
#include
-// ???
-#include
#include
#include
-
-
using namespace std;
using namespace gtsam;
@@ -110,14 +106,6 @@ int main(int argc, char** argv) {
parameters.verbosityLM = LevenbergMarquardtParams::LAMBDA;
parameters.linearSolverType = SuccessiveLinearizationParams::CONJUGATE_GRADIENT;
- {
- parameters.iterativeParams = boost::make_shared();
- LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, parameters);
- Values result = optimizer.optimize();
- result.print("Final Result:\n");
- cout << "simple spcg solver final error = " << graph.error(result) << endl;
- }
-
{
parameters.iterativeParams = boost::make_shared();
LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, parameters);
diff --git a/gtsam.h b/gtsam.h
index 4aa1480df..9c6f76527 100644
--- a/gtsam.h
+++ b/gtsam.h
@@ -1022,8 +1022,10 @@ class GaussianFactorGraph {
// Conversion to matrices
Matrix sparseJacobian_() const;
- Matrix denseJacobian() const;
- Matrix denseHessian() const;
+ Matrix augmentedJacobian() const;
+ pair jacobian() const;
+ Matrix augmentedHessian() const;
+ pair hessian() const;
};
class GaussianISAM {
@@ -1045,6 +1047,46 @@ class GaussianSequentialSolver {
Matrix marginalCovariance(size_t j) const;
};
+#include
+virtual class IterativeOptimizationParameters {
+ string getKernel() const ;
+ string getVerbosity() const;
+ void setKernel(string s) ;
+ void setVerbosity(string s) ;
+};
+
+//virtual class IterativeSolver {
+// IterativeSolver();
+// gtsam::VectorValues optimize ();
+//};
+
+#include
+virtual class ConjugateGradientParameters : gtsam::IterativeOptimizationParameters {
+ ConjugateGradientParameters();
+ size_t getMinIterations() const ;
+ size_t getMaxIterations() const ;
+ size_t getReset() const;
+ double getEpsilon_rel() const;
+ double getEpsilon_abs() const;
+
+ void setMinIterations(size_t value);
+ void setMaxIterations(size_t value);
+ void setReset(size_t value);
+ void setEpsilon_rel(double value);
+ void setEpsilon_abs(double value);
+};
+
+#include
+virtual class SubgraphSolverParameters : gtsam::ConjugateGradientParameters {
+ SubgraphSolverParameters();
+ void print(string s) const;
+};
+
+class SubgraphSolver {
+ SubgraphSolver(const gtsam::GaussianFactorGraph &A, const gtsam::SubgraphSolverParameters ¶meters);
+ gtsam::VectorValues optimize() const;
+};
+
#include
class KalmanFilter {
KalmanFilter(size_t n);
@@ -1288,6 +1330,7 @@ virtual class SuccessiveLinearizationParams : gtsam::NonlinearOptimizerParams {
void setLinearSolverType(string solver);
void setOrdering(const gtsam::Ordering& ordering);
+ void setIterativeParams(const gtsam::SubgraphSolverParameters ¶ms);
bool isMultifrontal() const;
bool isSequential() const;
@@ -1443,6 +1486,8 @@ class ISAM2 {
bool equals(const gtsam::ISAM2& other, double tol) const;
void print(string s) const;
+ void printStats() const;
+ void saveGraph(string s) const;
gtsam::ISAM2Result update();
gtsam::ISAM2Result update(const gtsam::NonlinearFactorGraph& newFactors, const gtsam::Values& newTheta);
diff --git a/gtsam_unstable/base/DSFVector.cpp b/gtsam/base/DSFVector.cpp
similarity index 98%
rename from gtsam_unstable/base/DSFVector.cpp
rename to gtsam/base/DSFVector.cpp
index cdea89f34..6d79dcb53 100644
--- a/gtsam_unstable/base/DSFVector.cpp
+++ b/gtsam/base/DSFVector.cpp
@@ -18,7 +18,7 @@
#include
#include
-#include
+#include
using namespace std;
diff --git a/gtsam_unstable/base/DSFVector.h b/gtsam/base/DSFVector.h
similarity index 100%
rename from gtsam_unstable/base/DSFVector.h
rename to gtsam/base/DSFVector.h
diff --git a/gtsam/base/Matrix.h b/gtsam/base/Matrix.h
index 6902a4a88..e6a40f727 100644
--- a/gtsam/base/Matrix.h
+++ b/gtsam/base/Matrix.h
@@ -67,11 +67,30 @@ inline Matrix Matrix_(const Vector& v) { return Matrix_(v.size(),1,v);}
*/
Matrix Matrix_(size_t m, size_t n, ...);
+// Matlab-like syntax
+
/**
- * MATLAB like constructors
+ * Creates an zeros matrix, with matlab-like syntax
+ *
+ * Note: if assigning a block (created from an Eigen block() function) of a matrix to zeros,
+ * don't use this function, instead use ".setZero(m,n)" to avoid an Eigen error.
*/
Matrix zeros(size_t m, size_t n);
+
+/**
+ * Creates an identity matrix, with matlab-like syntax
+ *
+ * Note: if assigning a block (created from an Eigen block() function) of a matrix to identity,
+ * don't use this function, instead use ".setIdentity(m,n)" to avoid an Eigen error.
+ */
Matrix eye(size_t m, size_t n);
+
+/**
+ * Creates a square identity matrix, with matlab-like syntax
+ *
+ * Note: if assigning a block (created from an Eigen block() function) of a matrix to identity,
+ * don't use this function, instead use ".setIdentity(m)" to avoid an Eigen error.
+ */
inline Matrix eye( size_t m ) { return eye(m,m); }
Matrix diag(const Vector& v);
diff --git a/gtsam_unstable/base/tests/testDSFVector.cpp b/gtsam/base/tests/testDSFVector.cpp
similarity index 99%
rename from gtsam_unstable/base/tests/testDSFVector.cpp
rename to gtsam/base/tests/testDSFVector.cpp
index 8997559f5..c0b72f1a0 100644
--- a/gtsam_unstable/base/tests/testDSFVector.cpp
+++ b/gtsam/base/tests/testDSFVector.cpp
@@ -24,7 +24,7 @@
using namespace boost::assign;
#include
-#include
+#include
using namespace std;
using namespace gtsam;
diff --git a/gtsam/inference/FactorGraph-inl.h b/gtsam/inference/FactorGraph-inl.h
index 97a7fa1c6..04490dbc6 100644
--- a/gtsam/inference/FactorGraph-inl.h
+++ b/gtsam/inference/FactorGraph-inl.h
@@ -100,7 +100,7 @@ namespace gtsam {
++ firstIndex;
// Check that number of variables is in bounds
- if(firstIndex + nFrontals >= variableIndex.size())
+ if(firstIndex + nFrontals > variableIndex.size())
throw std::invalid_argument("Requested to eliminate more frontal variables than exist in the factor graph.");
// Get set of involved factors
diff --git a/gtsam/inference/tests/testFactorGraph.cpp b/gtsam/inference/tests/testFactorGraph.cpp
index b2f9bc3f1..bfbe716d4 100644
--- a/gtsam/inference/tests/testFactorGraph.cpp
+++ b/gtsam/inference/tests/testFactorGraph.cpp
@@ -60,65 +60,6 @@ TEST(FactorGraph, eliminateFrontals) {
EXPECT(assert_equal(expectedCond, *actualCond));
}
-///* ************************************************************************* */
-// SL-FIX TEST( FactorGraph, splitMinimumSpanningTree )
-//{
-// SymbolicFactorGraph G;
-// G.push_factor("x1", "x2");
-// G.push_factor("x1", "x3");
-// G.push_factor("x1", "x4");
-// G.push_factor("x2", "x3");
-// G.push_factor("x2", "x4");
-// G.push_factor("x3", "x4");
-//
-// SymbolicFactorGraph T, C;
-// boost::tie(T, C) = G.splitMinimumSpanningTree();
-//
-// SymbolicFactorGraph expectedT, expectedC;
-// expectedT.push_factor("x1", "x2");
-// expectedT.push_factor("x1", "x3");
-// expectedT.push_factor("x1", "x4");
-// expectedC.push_factor("x2", "x3");
-// expectedC.push_factor("x2", "x4");
-// expectedC.push_factor("x3", "x4");
-// CHECK(assert_equal(expectedT,T));
-// CHECK(assert_equal(expectedC,C));
-//}
-
-///* ************************************************************************* */
-///**
-// * x1 - x2 - x3 - x4 - x5
-// * | | / |
-// * l1 l2 l3
-// */
-// SL-FIX TEST( FactorGraph, removeSingletons )
-//{
-// SymbolicFactorGraph G;
-// G.push_factor("x1", "x2");
-// G.push_factor("x2", "x3");
-// G.push_factor("x3", "x4");
-// G.push_factor("x4", "x5");
-// G.push_factor("x2", "l1");
-// G.push_factor("x3", "l2");
-// G.push_factor("x4", "l2");
-// G.push_factor("x4", "l3");
-//
-// SymbolicFactorGraph singletonGraph;
-// set singletons;
-// boost::tie(singletonGraph, singletons) = G.removeSingletons();
-//
-// set singletons_excepted; singletons_excepted += "x1", "x2", "x5", "l1", "l3";
-// CHECK(singletons_excepted == singletons);
-//
-// SymbolicFactorGraph singletonGraph_excepted;
-// singletonGraph_excepted.push_factor("x2", "l1");
-// singletonGraph_excepted.push_factor("x4", "l3");
-// singletonGraph_excepted.push_factor("x1", "x2");
-// singletonGraph_excepted.push_factor("x4", "x5");
-// singletonGraph_excepted.push_factor("x2", "x3");
-// CHECK(singletonGraph_excepted.equals(singletonGraph));
-//}
-
/* ************************************************************************* */
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
/* ************************************************************************* */
diff --git a/gtsam/linear/ConjugateGradientSolver.h b/gtsam/linear/ConjugateGradientSolver.h
index cf1d31d85..a89d7a10c 100644
--- a/gtsam/linear/ConjugateGradientSolver.h
+++ b/gtsam/linear/ConjugateGradientSolver.h
@@ -20,9 +20,7 @@ namespace gtsam {
*/
class ConjugateGradientParameters : public IterativeOptimizationParameters {
-
public:
-
typedef IterativeOptimizationParameters Base;
typedef boost::shared_ptr shared_ptr;
@@ -49,7 +47,21 @@ public:
inline double epsilon_rel() const { return epsilon_rel_; }
inline double epsilon_abs() const { return epsilon_abs_; }
- void print() const {
+ inline size_t getMinIterations() const { return minIterations_; }
+ inline size_t getMaxIterations() const { return maxIterations_; }
+ inline size_t getReset() const { return reset_; }
+ inline double getEpsilon() const { return epsilon_rel_; }
+ inline double getEpsilon_rel() const { return epsilon_rel_; }
+ inline double getEpsilon_abs() const { return epsilon_abs_; }
+
+ inline void setMinIterations(size_t value) { minIterations_ = value; }
+ inline void setMaxIterations(size_t value) { maxIterations_ = value; }
+ inline void setReset(size_t value) { reset_ = value; }
+ inline void setEpsilon(double value) { epsilon_rel_ = value; }
+ inline void setEpsilon_rel(double value) { epsilon_rel_ = value; }
+ inline void setEpsilon_abs(double value) { epsilon_abs_ = value; }
+
+ virtual void print(const std::string &s="") const {
Base::print();
std::cout << "ConjugateGradientParameters: "
<< "minIter = " << minIterations_
@@ -61,18 +73,4 @@ public:
}
};
-//class ConjugateGradientSolver : public IterativeSolver {
-//
-//public:
-//
-// typedef ConjugateGradientParameters Parameters;
-//
-// Parameters parameters_;
-//
-// ConjugateGradientSolver(const ConjugateGradientParameters ¶meters) : parameters_(parameters) {}
-// virtual VectorValues::shared_ptr optimize () = 0;
-// virtual const IterativeOptimizationParameters& _params() const = 0;
-//};
-
-
}
diff --git a/gtsam/linear/GaussianBayesNet.cpp b/gtsam/linear/GaussianBayesNet.cpp
index 0ddc4a7b3..5182bb914 100644
--- a/gtsam/linear/GaussianBayesNet.cpp
+++ b/gtsam/linear/GaussianBayesNet.cpp
@@ -16,7 +16,7 @@
*/
#include
-#include
+#include
#include
#include
@@ -242,12 +242,12 @@ double determinant(const GaussianBayesNet& bayesNet) {
/* ************************************************************************* */
VectorValues gradient(const GaussianBayesNet& bayesNet, const VectorValues& x0) {
- return gradient(FactorGraph(bayesNet), x0);
+ return gradient(GaussianFactorGraph(bayesNet), x0);
}
/* ************************************************************************* */
void gradientAtZero(const GaussianBayesNet& bayesNet, VectorValues& g) {
- gradientAtZero(FactorGraph(bayesNet), g);
+ gradientAtZero(GaussianFactorGraph(bayesNet), g);
}
/* ************************************************************************* */
diff --git a/gtsam/linear/GaussianBayesTree.cpp b/gtsam/linear/GaussianBayesTree.cpp
index f9ea3fdc8..04cb4a25a 100644
--- a/gtsam/linear/GaussianBayesTree.cpp
+++ b/gtsam/linear/GaussianBayesTree.cpp
@@ -18,7 +18,7 @@
*/
#include
-#include
+#include
namespace gtsam {
diff --git a/gtsam/linear/GaussianFactorGraph.cpp b/gtsam/linear/GaussianFactorGraph.cpp
index 02f37915c..222f025a0 100644
--- a/gtsam/linear/GaussianFactorGraph.cpp
+++ b/gtsam/linear/GaussianFactorGraph.cpp
@@ -166,7 +166,7 @@ namespace gtsam {
}
/* ************************************************************************* */
- Matrix GaussianFactorGraph::denseJacobian() const {
+ Matrix GaussianFactorGraph::augmentedJacobian() const {
// Convert to Jacobians
FactorGraph jfg;
jfg.reserve(this->size());
@@ -182,6 +182,14 @@ namespace gtsam {
return combined.matrix_augmented();
}
+ /* ************************************************************************* */
+ std::pair GaussianFactorGraph::jacobian() const {
+ Matrix augmented = augmentedJacobian();
+ return make_pair(
+ augmented.leftCols(augmented.cols()-1),
+ augmented.col(augmented.cols()-1));
+ }
+
/* ************************************************************************* */
// Helper functions for Combine
static boost::tuple, size_t, size_t> countDims(const FactorGraph& factors, const VariableSlots& variableSlots) {
@@ -317,9 +325,7 @@ break;
}
/* ************************************************************************* */
- static
- FastMap findScatterAndDims
- (const FactorGraph& factors) {
+ static FastMap findScatterAndDims(const FactorGraph& factors) {
const bool debug = ISDEBUG("findScatterAndDims");
@@ -349,7 +355,7 @@ break;
}
/* ************************************************************************* */
- Matrix GaussianFactorGraph::denseHessian() const {
+ Matrix GaussianFactorGraph::augmentedHessian() const {
Scatter scatter = findScatterAndDims(*this);
@@ -367,6 +373,14 @@ break;
return result;
}
+ /* ************************************************************************* */
+ std::pair GaussianFactorGraph::hessian() const {
+ Matrix augmented = augmentedHessian();
+ return make_pair(
+ augmented.topLeftCorner(augmented.rows()-1, augmented.rows()-1),
+ augmented.col(augmented.rows()-1).head(augmented.rows()-1));
+ }
+
/* ************************************************************************* */
GaussianFactorGraph::EliminationResult EliminateCholesky(const FactorGraph<
GaussianFactor>& factors, size_t nrFrontals) {
@@ -507,4 +521,126 @@ break;
} // \EliminatePreferCholesky
+
+
+ /* ************************************************************************* */
+ static JacobianFactor::shared_ptr convertToJacobianFactorPtr(const GaussianFactor::shared_ptr &gf) {
+ JacobianFactor::shared_ptr result = boost::dynamic_pointer_cast(gf);
+ if( !result ) {
+ result = boost::make_shared(*gf); // Convert any non-Jacobian factors to Jacobians (e.g. Hessian -> Jacobian with Cholesky)
+ }
+ return result;
+ }
+
+ /* ************************************************************************* */
+ Errors operator*(const GaussianFactorGraph& fg, const VectorValues& x) {
+ Errors e;
+ BOOST_FOREACH(const GaussianFactor::shared_ptr& Ai_G, fg) {
+ JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
+ e.push_back((*Ai)*x);
+ }
+ return e;
+ }
+
+ /* ************************************************************************* */
+ void multiplyInPlace(const GaussianFactorGraph& fg, const VectorValues& x, Errors& e) {
+ multiplyInPlace(fg,x,e.begin());
+ }
+
+ /* ************************************************************************* */
+ void multiplyInPlace(const GaussianFactorGraph& fg, const VectorValues& x, const Errors::iterator& e) {
+ Errors::iterator ei = e;
+ BOOST_FOREACH(const GaussianFactor::shared_ptr& Ai_G, fg) {
+ JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
+ *ei = (*Ai)*x;
+ ei++;
+ }
+ }
+
+ /* ************************************************************************* */
+ // x += alpha*A'*e
+ void transposeMultiplyAdd(const GaussianFactorGraph& fg, double alpha, const Errors& e, VectorValues& x) {
+ // For each factor add the gradient contribution
+ Errors::const_iterator ei = e.begin();
+ BOOST_FOREACH(const GaussianFactor::shared_ptr& Ai_G, fg) {
+ JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
+ Ai->transposeMultiplyAdd(alpha,*(ei++),x);
+ }
+ }
+
+ /* ************************************************************************* */
+ VectorValues gradient(const GaussianFactorGraph& fg, const VectorValues& x0) {
+ // It is crucial for performance to make a zero-valued clone of x
+ VectorValues g = VectorValues::Zero(x0);
+ Errors e;
+ BOOST_FOREACH(const GaussianFactor::shared_ptr& Ai_G, fg) {
+ JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
+ e.push_back(Ai->error_vector(x0));
+ }
+ transposeMultiplyAdd(fg, 1.0, e, g);
+ return g;
+ }
+
+ /* ************************************************************************* */
+ void gradientAtZero(const GaussianFactorGraph& fg, VectorValues& g) {
+ // Zero-out the gradient
+ g.setZero();
+ Errors e;
+ BOOST_FOREACH(const GaussianFactor::shared_ptr& Ai_G, fg) {
+ JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
+ e.push_back(-Ai->getb());
+ }
+ transposeMultiplyAdd(fg, 1.0, e, g);
+ }
+
+ /* ************************************************************************* */
+ void residual(const GaussianFactorGraph& fg, const VectorValues &x, VectorValues &r) {
+ Index i = 0 ;
+ BOOST_FOREACH(const GaussianFactor::shared_ptr& Ai_G, fg) {
+ JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
+ r[i] = Ai->getb();
+ i++;
+ }
+ VectorValues Ax = VectorValues::SameStructure(r);
+ multiply(fg,x,Ax);
+ axpy(-1.0,Ax,r);
+ }
+
+ /* ************************************************************************* */
+ void multiply(const GaussianFactorGraph& fg, const VectorValues &x, VectorValues &r) {
+ r.vector() = Vector::Zero(r.dim());
+ Index i = 0;
+ BOOST_FOREACH(const GaussianFactor::shared_ptr& Ai_G, fg) {
+ JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
+ SubVector &y = r[i];
+ for(JacobianFactor::const_iterator j = Ai->begin(); j != Ai->end(); ++j) {
+ y += Ai->getA(j) * x[*j];
+ }
+ ++i;
+ }
+ }
+
+ /* ************************************************************************* */
+ void transposeMultiply(const GaussianFactorGraph& fg, const VectorValues &r, VectorValues &x) {
+ x.vector() = Vector::Zero(x.dim());
+ Index i = 0;
+ BOOST_FOREACH(const GaussianFactor::shared_ptr& Ai_G, fg) {
+ JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
+ for(JacobianFactor::const_iterator j = Ai->begin(); j != Ai->end(); ++j) {
+ x[*j] += Ai->getA(j).transpose() * r[i];
+ }
+ ++i;
+ }
+ }
+
+ /* ************************************************************************* */
+ boost::shared_ptr gaussianErrors_(const GaussianFactorGraph& fg, const VectorValues& x) {
+ boost::shared_ptr e(new Errors);
+ BOOST_FOREACH(const GaussianFactor::shared_ptr& Ai_G, fg) {
+ JacobianFactor::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
+ e->push_back(Ai->error_vector(x));
+ }
+ return e;
+ }
+
} // namespace gtsam
diff --git a/gtsam/linear/GaussianFactorGraph.h b/gtsam/linear/GaussianFactorGraph.h
index 51748b79b..5e6e792be 100644
--- a/gtsam/linear/GaussianFactorGraph.h
+++ b/gtsam/linear/GaussianFactorGraph.h
@@ -31,24 +31,6 @@
namespace gtsam {
- /** return A*x-b
- * \todo Make this a member function - affects SubgraphPreconditioner */
- template
- Errors gaussianErrors(const FactorGraph& fg, const VectorValues& x) {
- return *gaussianErrors_(fg, x);
- }
-
- /** shared pointer version
- * \todo Make this a member function - affects SubgraphPreconditioner */
- template
- boost::shared_ptr gaussianErrors_(const FactorGraph& fg, const VectorValues& x) {
- boost::shared_ptr e(new Errors);
- BOOST_FOREACH(const typename boost::shared_ptr& factor, fg) {
- e->push_back(factor->error_vector(x));
- }
- return e;
- }
-
/**
* A Linear Factor Graph is a factor graph where all factors are Gaussian, i.e.
* Factor == GaussianFactor
@@ -188,15 +170,43 @@ namespace gtsam {
Matrix sparseJacobian_() const;
/**
- * Return a dense \f$ m \times n \f$ Jacobian matrix, augmented with b
- * with standard deviations are baked into A and b
+ * Return a dense \f$ [ \;A\;b\; ] \in \mathbb{R}^{m \times n+1} \f$
+ * Jacobian matrix, augmented with b with the noise models baked
+ * into A and b. The negative log-likelihood is
+ * \f$ \frac{1}{2} \Vert Ax-b \Vert^2 \f$. See also
+ * GaussianFactorGraph::jacobian and GaussianFactorGraph::sparseJacobian.
*/
- Matrix denseJacobian() const;
+ Matrix augmentedJacobian() const;
+
+ /**
+ * Return the dense Jacobian \f$ A \f$ and right-hand-side \f$ b \f$,
+ * with the noise models baked into A and b. The negative log-likelihood
+ * is \f$ \frac{1}{2} \Vert Ax-b \Vert^2 \f$. See also
+ * GaussianFactorGraph::augmentedJacobian and
+ * GaussianFactorGraph::sparseJacobian.
+ */
+ std::pair jacobian() const;
/**
- * Return a dense \f$ n \times n \f$ Hessian matrix, augmented with \f$ A^T b \f$
+ * Return a dense \f$ \Lambda \in \mathbb{R}^{n+1 \times n+1} \f$ Hessian
+ * matrix, augmented with the information vector \f$ \eta \f$. The
+ * augmented Hessian is
+ \f[ \left[ \begin{array}{ccc}
+ \Lambda & \eta \\
+ \eta^T & c
+ \end{array} \right] \f]
+ and the negative log-likelihood is
+ \f$ \frac{1}{2} x^T \Lambda x + \eta^T x + c \f$.
*/
- Matrix denseHessian() const;
+ Matrix augmentedHessian() const;
+
+ /**
+ * Return the dense Hessian \f$ \Lambda \f$ and information vector
+ * \f$ \eta \f$, with the noise models baked in. The negative log-likelihood
+ * is \frac{1}{2} x^T \Lambda x + \eta^T x + c. See also
+ * GaussianFactorGraph::augmentedHessian.
+ */
+ std::pair hessian() const;
private:
/** Serialization function */
@@ -294,4 +304,54 @@ namespace gtsam {
GaussianFactorGraph::EliminationResult EliminateCholesky(const FactorGraph<
GaussianFactor>& factors, size_t nrFrontals = 1);
+ /****** Linear Algebra Opeations ******/
+
+ /** return A*x */
+ Errors operator*(const GaussianFactorGraph& fg, const VectorValues& x);
+
+ /** In-place version e <- A*x that overwrites e. */
+ void multiplyInPlace(const GaussianFactorGraph& fg, const VectorValues& x, Errors& e);
+
+ /** In-place version e <- A*x that takes an iterator. */
+ void multiplyInPlace(const GaussianFactorGraph& fg, const VectorValues& x, const Errors::iterator& e);
+
+ /** x += alpha*A'*e */
+ void transposeMultiplyAdd(const GaussianFactorGraph& fg, double alpha, const Errors& e, VectorValues& x);
+
+ /**
+ * Compute the gradient of the energy function,
+ * \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} A x - b \right\Vert^2 \f$,
+ * centered around \f$ x = x_0 \f$.
+ * The gradient is \f$ A^T(Ax-b) \f$.
+ * @param fg The Jacobian factor graph $(A,b)$
+ * @param x0 The center about which to compute the gradient
+ * @return The gradient as a VectorValues
+ */
+ VectorValues gradient(const GaussianFactorGraph& fg, const VectorValues& x0);
+
+ /**
+ * Compute the gradient of the energy function,
+ * \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} A x - b \right\Vert^2 \f$,
+ * centered around zero.
+ * The gradient is \f$ A^T(Ax-b) \f$.
+ * @param fg The Jacobian factor graph $(A,b)$
+ * @param [output] g A VectorValues to store the gradient, which must be preallocated, see allocateVectorValues
+ * @return The gradient as a VectorValues
+ */
+ void gradientAtZero(const GaussianFactorGraph& fg, VectorValues& g);
+
+ /* matrix-vector operations */
+ void residual(const GaussianFactorGraph& fg, const VectorValues &x, VectorValues &r);
+ void multiply(const GaussianFactorGraph& fg, const VectorValues &x, VectorValues &r);
+ void transposeMultiply(const GaussianFactorGraph& fg, const VectorValues &r, VectorValues &x);
+
+ /** shared pointer version
+ * \todo Make this a member function - affects SubgraphPreconditioner */
+ boost::shared_ptr gaussianErrors_(const GaussianFactorGraph& fg, const VectorValues& x);
+
+ /** return A*x-b
+ * \todo Make this a member function - affects SubgraphPreconditioner */
+ inline Errors gaussianErrors(const GaussianFactorGraph& fg, const VectorValues& x) {
+ return *gaussianErrors_(fg, x); }
+
} // namespace gtsam
diff --git a/gtsam/linear/HessianFactor.cpp b/gtsam/linear/HessianFactor.cpp
index 6db6330b0..e152738fb 100644
--- a/gtsam/linear/HessianFactor.cpp
+++ b/gtsam/linear/HessianFactor.cpp
@@ -261,12 +261,10 @@ HessianFactor::HessianFactor(const FactorGraph& factors,
if (debug) cout << "Combining " << factors.size() << " factors" << endl;
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, factors)
{
- shared_ptr hessian(boost::dynamic_pointer_cast(factor));
- if (hessian)
- updateATA(*hessian, scatter);
- else {
- JacobianFactor::shared_ptr jacobianFactor(boost::dynamic_pointer_cast(factor));
- if (jacobianFactor)
+ if(factor) {
+ if(shared_ptr hessian = boost::dynamic_pointer_cast(factor))
+ updateATA(*hessian, scatter);
+ else if(JacobianFactor::shared_ptr jacobianFactor = boost::dynamic_pointer_cast(factor))
updateATA(*jacobianFactor, scatter);
else
throw invalid_argument("GaussianFactor is neither Hessian nor Jacobian");
@@ -309,16 +307,6 @@ bool HessianFactor::equals(const GaussianFactor& lf, double tol) const {
}
}
-/* ************************************************************************* */
-double HessianFactor::constantTerm() const {
- return info_(this->size(), this->size())(0,0);
-}
-
-/* ************************************************************************* */
-HessianFactor::constColumn HessianFactor::linearTerm() const {
- return info_.rangeColumn(0, this->size(), this->size(), 0);
-}
-
/* ************************************************************************* */
Matrix HessianFactor::computeInformation() const {
return info_.full().selfadjointView();
@@ -360,7 +348,6 @@ void HessianFactor::updateATA(const HessianFactor& update, const Scatter& scatte
// Apply updates to the upper triangle
tic(3, "update");
- assert(this->info_.nBlocks() - 1 == scatter.size());
for(size_t j2=0; j2info_.nBlocks()-1 : slots[j2];
for(size_t j1=0; j1<=j2; ++j1) {
@@ -437,7 +424,6 @@ void HessianFactor::updateATA(const JacobianFactor& update, const Scatter& scatt
// Apply updates to the upper triangle
tic(3, "update");
- assert(this->info_.nBlocks() - 1 == scatter.size());
for(size_t j2=0; j2info_.nBlocks()-1 : slots[j2];
for(size_t j1=0; j1<=j2; ++j1) {
diff --git a/gtsam/linear/HessianFactor.h b/gtsam/linear/HessianFactor.h
index 74216f0f0..9e4ed2074 100644
--- a/gtsam/linear/HessianFactor.h
+++ b/gtsam/linear/HessianFactor.h
@@ -120,20 +120,6 @@ namespace gtsam {
InfoMatrix matrix_; ///< The full augmented information matrix, s.t. the quadratic error is 0.5*[x -1]'*H*[x -1]
BlockInfo info_; ///< The block view of the full information matrix.
- /** Update the factor by adding the information from the JacobianFactor
- * (used internally during elimination).
- * @param update The JacobianFactor containing the new information to add
- * @param scatter A mapping from variable index to slot index in this HessianFactor
- */
- void updateATA(const JacobianFactor& update, const Scatter& scatter);
-
- /** Update the factor by adding the information from the HessianFactor
- * (used internally during elimination).
- * @param update The HessianFactor containing the new information to add
- * @param scatter A mapping from variable index to slot index in this HessianFactor
- */
- void updateATA(const HessianFactor& update, const Scatter& scatter);
-
public:
typedef boost::shared_ptr shared_ptr; ///< A shared_ptr to this
@@ -256,16 +242,39 @@ namespace gtsam {
*/
constBlock info(const_iterator j1, const_iterator j2) const { return info_(j1-begin(), j2-begin()); }
+ /** Return a view of the block at (j1,j2) of the upper-triangular part of the
+ * information matrix \f$ H \f$, no data is copied. See HessianFactor class documentation
+ * above to explain that only the upper-triangular part of the information matrix is stored
+ * and returned by this function.
+ * @param j1 Which block row to get, as an iterator pointing to the slot in this factor. You can
+ * use, for example, begin() + 2 to get the 3rd variable in this factor.
+ * @param j2 Which block column to get, as an iterator pointing to the slot in this factor. You can
+ * use, for example, begin() + 2 to get the 3rd variable in this factor.
+ * @return A view of the requested block, not a copy.
+ */
+ Block info(iterator j1, iterator j2) { return info_(j1-begin(), j2-begin()); }
+
/** Return the upper-triangular part of the full *augmented* information matrix,
* as described above. See HessianFactor class documentation above to explain that only the
* upper-triangular part of the information matrix is stored and returned by this function.
*/
constBlock info() const { return info_.full(); }
+ /** Return the upper-triangular part of the full *augmented* information matrix,
+ * as described above. See HessianFactor class documentation above to explain that only the
+ * upper-triangular part of the information matrix is stored and returned by this function.
+ */
+ Block info() { return info_.full(); }
+
/** Return the constant term \f$ f \f$ as described above
* @return The constant term \f$ f \f$
*/
- double constantTerm() const;
+ double constantTerm() const { return info_(this->size(), this->size())(0,0); }
+
+ /** Return the constant term \f$ f \f$ as described above
+ * @return The constant term \f$ f \f$
+ */
+ double& constantTerm() { return info_(this->size(), this->size())(0,0); }
/** Return the part of linear term \f$ g \f$ as described above corresponding to the requested variable.
* @param j Which block row to get, as an iterator pointing to the slot in this factor. You can
@@ -273,9 +282,19 @@ namespace gtsam {
* @return The linear term \f$ g \f$ */
constColumn linearTerm(const_iterator j) const { return info_.column(j-begin(), size(), 0); }
+ /** Return the part of linear term \f$ g \f$ as described above corresponding to the requested variable.
+ * @param j Which block row to get, as an iterator pointing to the slot in this factor. You can
+ * use, for example, begin() + 2 to get the 3rd variable in this factor.
+ * @return The linear term \f$ g \f$ */
+ Column linearTerm(iterator j) { return info_.column(j-begin(), size(), 0); }
+
/** Return the complete linear term \f$ g \f$ as described above.
* @return The linear term \f$ g \f$ */
- constColumn linearTerm() const;
+ constColumn linearTerm() const { return info_.rangeColumn(0, this->size(), this->size(), 0); }
+
+ /** Return the complete linear term \f$ g \f$ as described above.
+ * @return The linear term \f$ g \f$ */
+ Column linearTerm() { return info_.rangeColumn(0, this->size(), this->size(), 0); }
/** Return the augmented information matrix represented by this GaussianFactor.
* The augmented information matrix contains the information matrix with an
@@ -315,6 +334,20 @@ namespace gtsam {
/** split partially eliminated factor */
boost::shared_ptr splitEliminatedFactor(size_t nrFrontals);
+ /** Update the factor by adding the information from the JacobianFactor
+ * (used internally during elimination).
+ * @param update The JacobianFactor containing the new information to add
+ * @param scatter A mapping from variable index to slot index in this HessianFactor
+ */
+ void updateATA(const JacobianFactor& update, const Scatter& scatter);
+
+ /** Update the factor by adding the information from the HessianFactor
+ * (used internally during elimination).
+ * @param update The HessianFactor containing the new information to add
+ * @param scatter A mapping from variable index to slot index in this HessianFactor
+ */
+ void updateATA(const HessianFactor& update, const Scatter& scatter);
+
/** assert invariants */
void assertInvariants() const;
diff --git a/gtsam/linear/IterativeSolver.cpp b/gtsam/linear/IterativeSolver.cpp
new file mode 100644
index 000000000..2bba3e12b
--- /dev/null
+++ b/gtsam/linear/IterativeSolver.cpp
@@ -0,0 +1,50 @@
+/**
+ * @file IterativeSolver.cpp
+ * @brief
+ * @date Sep 3, 2012
+ * @author Yong-Dian Jian
+ */
+
+#include
+#include
+#include
+
+namespace gtsam {
+
+std::string IterativeOptimizationParameters::getKernel() const { return kernelTranslator(kernel_); }
+std::string IterativeOptimizationParameters::getVerbosity() const { return verbosityTranslator(verbosity_); }
+void IterativeOptimizationParameters::setKernel(const std::string &src) { kernel_ = kernelTranslator(src); }
+void IterativeOptimizationParameters::setVerbosity(const std::string &src) { verbosity_ = verbosityTranslator(src); }
+
+IterativeOptimizationParameters::Kernel IterativeOptimizationParameters::kernelTranslator(const std::string &src) {
+ std::string s = src; boost::algorithm::to_upper(s);
+ if (s == "CG") return IterativeOptimizationParameters::CG;
+ /* default is cg */
+ else return IterativeOptimizationParameters::CG;
+}
+
+IterativeOptimizationParameters::Verbosity IterativeOptimizationParameters::verbosityTranslator(const std::string &src) {
+ std::string s = src; boost::algorithm::to_upper(s);
+ if (s == "SILENT") return IterativeOptimizationParameters::SILENT;
+ else if (s == "COMPLEXITY") return IterativeOptimizationParameters::COMPLEXITY;
+ else if (s == "ERROR") return IterativeOptimizationParameters::ERROR;
+ /* default is default */
+ else return IterativeOptimizationParameters::SILENT;
+}
+
+std::string IterativeOptimizationParameters::kernelTranslator(IterativeOptimizationParameters::Kernel k) {
+ if ( k == CG ) return "CG";
+ else return "UNKNOWN";
+}
+
+std::string IterativeOptimizationParameters::verbosityTranslator(IterativeOptimizationParameters::Verbosity verbosity) {
+ if (verbosity == SILENT) return "SILENT";
+ else if (verbosity == COMPLEXITY) return "COMPLEXITY";
+ else if (verbosity == ERROR) return "ERROR";
+ else return "UNKNOWN";
+}
+
+
+}
+
+
diff --git a/gtsam/linear/IterativeSolver.h b/gtsam/linear/IterativeSolver.h
index 8292b3831..7946874bb 100644
--- a/gtsam/linear/IterativeSolver.h
+++ b/gtsam/linear/IterativeSolver.h
@@ -13,6 +13,7 @@
#include
#include
+#include
namespace gtsam {
@@ -41,23 +42,34 @@ namespace gtsam {
inline Kernel kernel() const { return kernel_; }
inline Verbosity verbosity() const { return verbosity_; }
+ /* matlab interface */
+ std::string getKernel() const ;
+ std::string getVerbosity() const;
+ void setKernel(const std::string &s) ;
+ void setVerbosity(const std::string &s) ;
+
void print() const {
- const std::string kernelStr[1] = {"cg"};
std::cout << "IterativeOptimizationParameters: "
- << "kernel = " << kernelStr[kernel_]
- << ", verbosity = " << verbosity_ << std::endl;
+ << "kernel = " << kernelTranslator(kernel_)
+ << ", verbosity = " << verbosityTranslator(verbosity_) << std::endl;
}
+
+ static Kernel kernelTranslator(const std::string &s);
+ static Verbosity verbosityTranslator(const std::string &s);
+ static std::string kernelTranslator(Kernel k);
+ static std::string verbosityTranslator(Verbosity v);
};
class IterativeSolver {
public:
typedef boost::shared_ptr shared_ptr;
- IterativeSolver(){}
+ IterativeSolver() {}
virtual ~IterativeSolver() {}
+ /* interface to the nonlinear optimizer */
+ virtual VectorValues optimize () = 0;
/* update interface to the nonlinear optimizer */
virtual void replaceFactors(const GaussianFactorGraph::shared_ptr &factorGraph, const double lambda) {}
- virtual VectorValues optimize () = 0;
};
}
diff --git a/gtsam/linear/JacobianFactor.h b/gtsam/linear/JacobianFactor.h
index def36b086..f2bd9c75d 100644
--- a/gtsam/linear/JacobianFactor.h
+++ b/gtsam/linear/JacobianFactor.h
@@ -322,7 +322,6 @@ namespace gtsam {
ar & BOOST_SERIALIZATION_NVP(model_);
ar & BOOST_SERIALIZATION_NVP(matrix_);
}
-
}; // JacobianFactor
} // gtsam
diff --git a/gtsam/linear/JacobianFactorGraph.cpp b/gtsam/linear/JacobianFactorGraph.cpp
deleted file mode 100644
index 0ca3b8667..000000000
--- a/gtsam/linear/JacobianFactorGraph.cpp
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * @file JacobianFactorGraph.h
- * @date Jun 6, 2012
- * @author Yong Dian Jian
- */
-
-#include
-#include
-
-namespace gtsam {
-
-/* ************************************************************************* */
-Errors operator*(const JacobianFactorGraph& fg, const VectorValues& x) {
- Errors e;
- BOOST_FOREACH(const JacobianFactor::shared_ptr& Ai, fg) {
- e.push_back((*Ai)*x);
- }
- return e;
-}
-
-/* ************************************************************************* */
-void multiplyInPlace(const JacobianFactorGraph& fg, const VectorValues& x, Errors& e) {
- multiplyInPlace(fg,x,e.begin());
-}
-
-/* ************************************************************************* */
-void multiplyInPlace(const JacobianFactorGraph& fg, const VectorValues& x, const Errors::iterator& e) {
- Errors::iterator ei = e;
- BOOST_FOREACH(const JacobianFactor::shared_ptr& Ai, fg) {
- *ei = (*Ai)*x;
- ei++;
- }
-}
-
-
-/* ************************************************************************* */
-// x += alpha*A'*e
-void transposeMultiplyAdd(const JacobianFactorGraph& fg, double alpha, const Errors& e, VectorValues& x) {
- // For each factor add the gradient contribution
- Errors::const_iterator ei = e.begin();
- BOOST_FOREACH(const JacobianFactor::shared_ptr& Ai, fg) {
- Ai->transposeMultiplyAdd(alpha,*(ei++),x);
- }
-}
-
-/* ************************************************************************* */
-VectorValues gradient(const JacobianFactorGraph& fg, const VectorValues& x0) {
- // It is crucial for performance to make a zero-valued clone of x
- VectorValues g = VectorValues::Zero(x0);
- Errors e;
- BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, fg) {
- e.push_back(factor->error_vector(x0));
- }
- transposeMultiplyAdd(fg, 1.0, e, g);
- return g;
-}
-
-/* ************************************************************************* */
-void gradientAtZero(const JacobianFactorGraph& fg, VectorValues& g) {
- // Zero-out the gradient
- g.setZero();
- Errors e;
- BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, fg) {
- e.push_back(-factor->getb());
- }
- transposeMultiplyAdd(fg, 1.0, e, g);
-}
-
-/* ************************************************************************* */
-void residual(const JacobianFactorGraph& fg, const VectorValues &x, VectorValues &r) {
- Index i = 0 ;
- BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, fg) {
- r[i] = factor->getb();
- i++;
- }
- VectorValues Ax = VectorValues::SameStructure(r);
- multiply(fg,x,Ax);
- axpy(-1.0,Ax,r);
-}
-
-/* ************************************************************************* */
-void multiply(const JacobianFactorGraph& fg, const VectorValues &x, VectorValues &r) {
- r.vector() = Vector::Zero(r.dim());
- Index i = 0;
- BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, fg) {
- SubVector &y = r[i];
- for(JacobianFactor::const_iterator j = factor->begin(); j != factor->end(); ++j) {
- y += factor->getA(j) * x[*j];
- }
- ++i;
- }
-}
-
-/* ************************************************************************* */
-void transposeMultiply(const JacobianFactorGraph& fg, const VectorValues &r, VectorValues &x) {
- x.vector() = Vector::Zero(x.dim());
- Index i = 0;
- BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, fg) {
- for(JacobianFactor::const_iterator j = factor->begin(); j != factor->end(); ++j) {
- x[*j] += factor->getA(j).transpose() * r[i];
- }
- ++i;
- }
-}
-
-/* ************************************************************************* */
-JacobianFactorGraph::shared_ptr dynamicCastFactors(const GaussianFactorGraph &gfg) {
- JacobianFactorGraph::shared_ptr jfg(new JacobianFactorGraph());
- jfg->reserve(gfg.size());
- BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, gfg) {
- JacobianFactor::shared_ptr castedFactor(boost::dynamic_pointer_cast(factor));
- if(castedFactor) jfg->push_back(castedFactor);
- else throw std::invalid_argument("dynamicCastFactors(), dynamic_cast failed, meaning an invalid cast was requested.");
- }
- return jfg;
-}
-
-
-
-} // namespace
-
-
diff --git a/gtsam/linear/JacobianFactorGraph.h b/gtsam/linear/JacobianFactorGraph.h
deleted file mode 100644
index cb27b507a..000000000
--- a/gtsam/linear/JacobianFactorGraph.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* ----------------------------------------------------------------------------
-
- * GTSAM Copyright 2010, Georgia Tech Research Corporation,
- * Atlanta, Georgia 30332-0415
- * All Rights Reserved
- * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
-
- * See LICENSE for the license information
-
- * -------------------------------------------------------------------------- */
-
-/**
- * @file JacobianFactorGraph.cpp
- * @date Jun 6, 2012
- * @brief Linear Algebra Operations for a JacobianFactorGraph
- * @author Yong Dian Jian
- */
-#pragma once
-
-#include
-#include
-#include
-#include
-#include
-#include
-
-namespace gtsam {
-
- typedef FactorGraph JacobianFactorGraph;
-
- /** return A*x */
- Errors operator*(const JacobianFactorGraph& fg, const VectorValues& x);
-
- /** In-place version e <- A*x that overwrites e. */
- void multiplyInPlace(const JacobianFactorGraph& fg, const VectorValues& x, Errors& e);
-
- /** In-place version e <- A*x that takes an iterator. */
- void multiplyInPlace(const JacobianFactorGraph& fg, const VectorValues& x, const Errors::iterator& e);
-
- /** x += alpha*A'*e */
- void transposeMultiplyAdd(const JacobianFactorGraph& fg, double alpha, const Errors& e, VectorValues& x);
-
- /**
- * Compute the gradient of the energy function,
- * \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} A x - b \right\Vert^2 \f$,
- * centered around \f$ x = x_0 \f$.
- * The gradient is \f$ A^T(Ax-b) \f$.
- * @param fg The Jacobian factor graph $(A,b)$
- * @param x0 The center about which to compute the gradient
- * @return The gradient as a VectorValues
- */
- VectorValues gradient(const JacobianFactorGraph& fg, const VectorValues& x0);
-
- /**
- * Compute the gradient of the energy function,
- * \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} A x - b \right\Vert^2 \f$,
- * centered around zero.
- * The gradient is \f$ A^T(Ax-b) \f$.
- * @param fg The Jacobian factor graph $(A,b)$
- * @param [output] g A VectorValues to store the gradient, which must be preallocated, see allocateVectorValues
- * @return The gradient as a VectorValues
- */
- void gradientAtZero(const JacobianFactorGraph& fg, VectorValues& g);
-
- /* matrix-vector operations */
- void residual(const JacobianFactorGraph& fg, const VectorValues &x, VectorValues &r);
- void multiply(const JacobianFactorGraph& fg, const VectorValues &x, VectorValues &r);
- void transposeMultiply(const JacobianFactorGraph& fg, const VectorValues &r, VectorValues &x);
-
- /** dynamic_cast the gaussian factors down to jacobian factors */
- JacobianFactorGraph::shared_ptr dynamicCastFactors(const GaussianFactorGraph &gfg);
-
-}
diff --git a/gtsam/linear/SimpleSPCGSolver.cpp b/gtsam/linear/SimpleSPCGSolver.cpp
deleted file mode 100644
index fa0fee82f..000000000
--- a/gtsam/linear/SimpleSPCGSolver.cpp
+++ /dev/null
@@ -1,226 +0,0 @@
-/* ----------------------------------------------------------------------------
-
- * GTSAM Copyright 2010, Georgia Tech Research Corporation,
- * Atlanta, Georgia 30332-0415
- * All Rights Reserved
- * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
-
- * See LICENSE for the license information
-
- * -------------------------------------------------------------------------- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-namespace gtsam {
-
-/* utility function */
-std::vector extractRowSpec_(const FactorGraph& jfg) {
- std::vector spec; spec.reserve(jfg.size());
- BOOST_FOREACH ( const JacobianFactor::shared_ptr &jf, jfg ) {
- spec.push_back(jf->rows());
- }
- return spec;
-}
-
-std::vector extractColSpec_(const FactorGraph& gfg, const VariableIndex &index) {
- const size_t n = index.size();
- std::vector spec(n, 0);
- for ( Index i = 0 ; i < n ; ++i ) {
- const GaussianFactor &gf = *(gfg[index[i].front()]);
- for ( GaussianFactor::const_iterator it = gf.begin() ; it != gf.end() ; ++it ) {
- spec[*it] = gf.getDim(it);
- }
- }
- return spec;
-}
-
-SimpleSPCGSolver::SimpleSPCGSolver(const GaussianFactorGraph &gfg, const Parameters ¶meters)
- : Base(), parameters_(parameters)
-{
- std::vector colSpec = extractColSpec_(gfg, VariableIndex(gfg));
-
- nVar_ = colSpec.size();
-
- /* Split the factor graph into At (tree) and Ac (constraints)
- * Note: This part has to be refined for your graph/application */
- GaussianFactorGraph::shared_ptr At;
- boost::tie(At, Ac_) = this->splitGraph(gfg);
-
- /* construct row vector spec of the new system */
- nAc_ = Ac_->size();
- std::vector rowSpec = extractRowSpec_(*Ac_);
- for ( Index i = 0 ; i < nVar_ ; ++i ) {
- rowSpec.push_back(colSpec[i]);
- }
-
- /* solve the tree with direct solver. get preconditioner */
- Rt_ = EliminationTree::Create(*At)->eliminate(EliminateQR);
- xt_ = boost::make_shared(gtsam::optimize(*Rt_));
-
- /* initial value for the lspcg method */
- y0_ = boost::make_shared(VectorValues::Zero(colSpec));
-
- /* the right hand side of the new system */
- by_ = boost::make_shared(VectorValues::Zero(rowSpec));
- for ( Index i = 0 ; i < nAc_ ; ++i ) {
- JacobianFactor::shared_ptr jf = (*Ac_)[i];
- Vector xi = internal::extractVectorValuesSlices(*xt_, jf->begin(), jf->end());
- (*by_)[i] = jf->getb() - jf->getA()*xi;
- }
-
- /* allocate buffer for row and column vectors */
- tmpY_ = boost::make_shared(VectorValues::Zero(colSpec));
- tmpB_ = boost::make_shared(VectorValues::Zero(rowSpec));
-}
-
-/* implements the CGLS method in Section 7.4 of Bjork's book */
-VectorValues SimpleSPCGSolver::optimize (const VectorValues &initial) {
-
- VectorValues::shared_ptr x(new VectorValues(initial));
- VectorValues r = VectorValues::Zero(*by_),
- q = VectorValues::Zero(*by_),
- p = VectorValues::Zero(*y0_),
- s = VectorValues::Zero(*y0_);
-
- residual(*x, r);
- transposeMultiply(r, s) ;
- p.vector() = s.vector() ;
-
- double gamma = s.vector().squaredNorm(), new_gamma = 0.0, alpha = 0.0, beta = 0.0 ;
-
- const double threshold = std::max(parameters_.epsilon_abs(), parameters_.epsilon() * parameters_.epsilon() * gamma);
- const size_t iMaxIterations = parameters_.maxIterations();
- const Parameters::Verbosity verbosity = parameters_.verbosity();
-
- if ( verbosity >= ConjugateGradientParameters::ERROR )
- std::cout << "[SimpleSPCGSolver] epsilon = " << parameters_.epsilon()
- << ", max = " << parameters_.maxIterations()
- << ", ||r0|| = " << std::sqrt(gamma)
- << ", threshold = " << threshold << std::endl;
-
- size_t k ;
- for ( k = 1 ; k < iMaxIterations ; ++k ) {
-
- multiply(p, q);
- alpha = gamma / q.vector().squaredNorm() ;
- x->vector() += (alpha * p.vector());
- r.vector() -= (alpha * q.vector());
- transposeMultiply(r, s);
- new_gamma = s.vector().squaredNorm();
- beta = new_gamma / gamma ;
- p.vector() = s.vector() + beta * p.vector();
- gamma = new_gamma ;
-
- if ( verbosity >= ConjugateGradientParameters::ERROR) {
- std::cout << "[SimpleSPCGSolver] iteration " << k << ": a = " << alpha << ": b = " << beta << ", ||r|| = " << std::sqrt(gamma) << std::endl;
- }
-
- if ( gamma < threshold ) break ;
- } // k
-
- if ( verbosity >= ConjugateGradientParameters::ERROR )
- std::cout << "[SimpleSPCGSolver] iteration " << k << ": a = " << alpha << ": b = " << beta << ", ||r|| = " << std::sqrt(gamma) << std::endl;
-
- /* transform y back to x */
- return this->transform(*x);
-}
-
-void SimpleSPCGSolver::residual(const VectorValues &input, VectorValues &output) {
- output.vector() = by_->vector();
- this->multiply(input, *tmpB_);
- axpy(-1.0, *tmpB_, output);
-}
-
-void SimpleSPCGSolver::multiply(const VectorValues &input, VectorValues &output) {
- this->backSubstitute(input, *tmpY_);
- gtsam::multiply(*Ac_, *tmpY_, output);
- for ( Index i = 0 ; i < nVar_ ; ++i ) {
- output[i + nAc_] = input[i];
- }
-}
-
-void SimpleSPCGSolver::transposeMultiply(const VectorValues &input, VectorValues &output) {
- gtsam::transposeMultiply(*Ac_, input, *tmpY_);
- this->transposeBackSubstitute(*tmpY_, output);
- for ( Index i = 0 ; i < nVar_ ; ++i ) {
- output[i] += input[nAc_+i];
- }
-}
-
-void SimpleSPCGSolver::backSubstitute(const VectorValues &input, VectorValues &output) {
- for ( Index i = 0 ; i < input.size() ; ++i ) {
- output[i] = input[i] ;
- }
- BOOST_REVERSE_FOREACH(const boost::shared_ptr cg, *Rt_) {
- const Index key = *(cg->beginFrontals());
- Vector xS = internal::extractVectorValuesSlices(output, cg->beginParents(), cg->endParents());
- xS = input[key] - cg->get_S() * xS;
- output[key] = cg->get_R().triangularView().solve(xS);
- }
-}
-
-void SimpleSPCGSolver::transposeBackSubstitute(const VectorValues &input, VectorValues &output) {
- for ( Index i = 0 ; i < input.size() ; ++i ) {
- output[i] = input[i] ;
- }
- BOOST_FOREACH(const boost::shared_ptr cg, *Rt_) {
- const Index key = *(cg->beginFrontals());
- output[key] = gtsam::backSubstituteUpper(output[key], Matrix(cg->get_R()));
- const Vector d = internal::extractVectorValuesSlices(output, cg->beginParents(), cg->endParents())
- - cg->get_S().transpose() * output[key];
- internal::writeVectorValuesSlices(d, output, cg->beginParents(), cg->endParents());
- }
-}
-
-VectorValues SimpleSPCGSolver::transform(const VectorValues &y) {
- VectorValues x = VectorValues::Zero(y);
- this->backSubstitute(y, x);
- axpy(1.0, *xt_, x);
- return x;
-}
-
-boost::tuple::shared_ptr>
-SimpleSPCGSolver::splitGraph(const GaussianFactorGraph &gfg) {
-
- VariableIndex index(gfg);
- size_t n = index.size();
- std::vector connected(n, false);
-
- GaussianFactorGraph::shared_ptr At(new GaussianFactorGraph());
- FactorGraph::shared_ptr Ac( new FactorGraph());
-
- BOOST_FOREACH ( const GaussianFactor::shared_ptr &gf, gfg ) {
- bool augment = false ;
-
- /* check whether this factor should be augmented to the "tree" graph */
- if ( gf->keys().size() == 1 ) augment = true;
- else {
- BOOST_FOREACH ( const Index key, *gf ) {
- if ( connected[key] == false ) {
- augment = true ;
- }
- connected[key] = true;
- }
- }
-
- if ( augment ) At->push_back(gf);
- else Ac->push_back(boost::dynamic_pointer_cast(gf));
- }
-
- return boost::tie(At, Ac);
-}
-
-
-
-
-
-
-}
diff --git a/gtsam/linear/SimpleSPCGSolver.h b/gtsam/linear/SimpleSPCGSolver.h
deleted file mode 100644
index 994815fa4..000000000
--- a/gtsam/linear/SimpleSPCGSolver.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/* ----------------------------------------------------------------------------
-
- * GTSAM Copyright 2010, Georgia Tech Research Corporation,
- * Atlanta, Georgia 30332-0415
- * All Rights Reserved
- * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
-
- * See LICENSE for the license information
-
- * -------------------------------------------------------------------------- */
-
-#pragma once
-
-#include
-#include
-#include
-#include
-#include
-
-namespace gtsam {
-
-struct SimpleSPCGSolverParameters : public ConjugateGradientParameters {
- typedef ConjugateGradientParameters Base;
- SimpleSPCGSolverParameters() : Base() {}
-};
-
-/**
- * This class gives a simple implementation to the SPCG solver presented in Dellaert et al in IROS'10.
- *
- * Given a linear least-squares problem \f$ f(x) = |A x - b|^2 \f$. We split the problem into
- * \f$ f(x) = |A_t - b_t|^2 + |A_c - b_c|^2 \f$ where \f$ A_t \f$ denotes the "tree" part, and \f$ A_c \f$ denotes the "constraint" part.
- * \f$ A_t \f$ is factorized into \f$ Q_t R_t \f$, and we compute \f$ c_t = Q_t^{-1} b_t \f$, and \f$ x_t = R_t^{-1} c_t \f$ accordingly.
- * Then we solve a reparametrized problem \f$ f(y) = |y|^2 + |A_c R_t^{-1} y - \bar{b_y}|^2 \f$, where \f$ y = R_t(x - x_t) \f$, and \f$ \bar{b_y} = (b_c - A_c x_t) \f$
- *
- * In the matrix form, it is equivalent to solving \f$ [A_c R_t^{-1} ; I ] y = [\bar{b_y} ; 0] \f$. We can solve it
- * with the least-squares variation of the conjugate gradient method.
- *
- * Note: A full SPCG implementation will come up soon in the next release.
- *
- * \nosubgrouping
- */
-
-class SimpleSPCGSolver : public IterativeSolver {
-
-public:
-
- typedef IterativeSolver Base;
- typedef SimpleSPCGSolverParameters Parameters;
- typedef boost::shared_ptr shared_ptr;
-
-protected:
-
- size_t nVar_ ; ///< number of variables \f$ x \f$
- size_t nAc_ ; ///< number of factors in \f$ A_c \f$
- FactorGraph::shared_ptr Ac_; ///< the constrained part of the graph
- GaussianBayesNet::shared_ptr Rt_; ///< the gaussian bayes net of the tree part of the graph
- VectorValues::shared_ptr xt_; ///< the solution of the \f$ A_t^{-1} b_t \f$
- VectorValues::shared_ptr y0_; ///< a column zero vector
- VectorValues::shared_ptr by_; ///< \f$ [\bar{b_y} ; 0 ] \f$
- VectorValues::shared_ptr tmpY_; ///< buffer for the column vectors
- VectorValues::shared_ptr tmpB_; ///< buffer for the row vectors
- Parameters parameters_; ///< Parameters for iterative method
-
-public:
-
- SimpleSPCGSolver(const GaussianFactorGraph &gfg, const Parameters ¶meters);
- virtual ~SimpleSPCGSolver() {}
- virtual VectorValues optimize () {return optimize(*y0_);}
-
-protected:
-
- VectorValues optimize (const VectorValues &initial);
-
- /** output = \f$ [\bar{b_y} ; 0 ] - [A_c R_t^{-1} ; I] \f$ input */
- void residual(const VectorValues &input, VectorValues &output);
-
- /** output = \f$ [A_c R_t^{-1} ; I] \f$ input */
- void multiply(const VectorValues &input, VectorValues &output);
-
- /** output = \f$ [R_t^{-T} A_c^T, I] \f$ input */
- void transposeMultiply(const VectorValues &input, VectorValues &output);
-
- /** output = \f$ R_t^{-1} \f$ input */
- void backSubstitute(const VectorValues &rhs, VectorValues &sol) ;
-
- /** output = \f$ R_t^{-T} \f$ input */
- void transposeBackSubstitute(const VectorValues &rhs, VectorValues &sol) ;
-
- /** return \f$ R_t^{-1} y + x_t \f$ */
- VectorValues transform(const VectorValues &y);
-
- /** naively split a gaussian factor graph into tree and constraint parts
- * Note: This function has to be refined for your graph/application */
- boost::tuple::shared_ptr>
- splitGraph(const GaussianFactorGraph &gfg);
-};
-
-}
diff --git a/gtsam/linear/SubgraphPreconditioner.cpp b/gtsam/linear/SubgraphPreconditioner.cpp
index 91a98a263..8b5a3708f 100644
--- a/gtsam/linear/SubgraphPreconditioner.cpp
+++ b/gtsam/linear/SubgraphPreconditioner.cpp
@@ -17,18 +17,29 @@
#include
#include
-#include
-
#include
using namespace std;
namespace gtsam {
+
+ /* ************************************************************************* */
+ static GaussianFactorGraph::shared_ptr convertToJacobianFactors(const GaussianFactorGraph &gfg) {
+ GaussianFactorGraph::shared_ptr result(new GaussianFactorGraph());
+ BOOST_FOREACH(const GaussianFactor::shared_ptr &gf, gfg) {
+ JacobianFactor::shared_ptr jf = boost::dynamic_pointer_cast(gf);
+ if( !jf ) {
+ jf = boost::make_shared(*gf); // Convert any non-Jacobian factors to Jacobians (e.g. Hessian -> Jacobian with Cholesky)
+ }
+ result->push_back(jf);
+ }
+ return result;
+ }
/* ************************************************************************* */
- SubgraphPreconditioner::SubgraphPreconditioner(const sharedFG& Ab1, const sharedFG& Ab2,
+ SubgraphPreconditioner::SubgraphPreconditioner(const sharedFG& Ab2,
const sharedBayesNet& Rc1, const sharedValues& xbar) :
- Ab1_(Ab1), Ab2_(Ab2), Rc1_(Rc1), xbar_(xbar), b2bar_(new Errors(-gaussianErrors(*Ab2_,*xbar))) {
+ Ab2_(convertToJacobianFactors(*Ab2)), Rc1_(Rc1), xbar_(xbar), b2bar_(new Errors(-gaussianErrors(*Ab2_,*xbar))) {
}
/* ************************************************************************* */
diff --git a/gtsam/linear/SubgraphPreconditioner.h b/gtsam/linear/SubgraphPreconditioner.h
index 7b59735d9..f266928d7 100644
--- a/gtsam/linear/SubgraphPreconditioner.h
+++ b/gtsam/linear/SubgraphPreconditioner.h
@@ -18,6 +18,7 @@
#pragma once
#include
+#include
#include
namespace gtsam {
@@ -32,15 +33,14 @@ namespace gtsam {
class SubgraphPreconditioner {
public:
-
typedef boost::shared_ptr shared_ptr;
typedef boost::shared_ptr sharedBayesNet;
- typedef boost::shared_ptr > sharedFG;
+ typedef boost::shared_ptr sharedFG;
typedef boost::shared_ptr sharedValues;
typedef boost::shared_ptr sharedErrors;
private:
- sharedFG Ab1_, Ab2_;
+ sharedFG Ab2_;
sharedBayesNet Rc1_;
sharedValues xbar_; ///< A1 \ b1
sharedErrors b2bar_; ///< A2*xbar - b2
@@ -48,17 +48,14 @@ namespace gtsam {
public:
SubgraphPreconditioner();
+
/**
* Constructor
- * @param Ab1: the Graph A1*x=b1
* @param Ab2: the Graph A2*x=b2
* @param Rc1: the Bayes Net R1*x=c1
* @param xbar: the solution to R1*x=c1
*/
- SubgraphPreconditioner(const sharedFG& Ab1, const sharedFG& Ab2, const sharedBayesNet& Rc1, const sharedValues& xbar);
-
- /** Access Ab1 */
- const sharedFG& Ab1() const { return Ab1_; }
+ SubgraphPreconditioner(const sharedFG& Ab2, const sharedBayesNet& Rc1, const sharedValues& xbar);
/** Access Ab2 */
const sharedFG& Ab2() const { return Ab2_; }
@@ -69,23 +66,23 @@ namespace gtsam {
/** Access b2bar */
const sharedErrors b2bar() const { return b2bar_; }
- /**
- * Add zero-mean i.i.d. Gaussian prior terms to each variable
- * @param sigma Standard deviation of Gaussian
- */
-// SubgraphPreconditioner add_priors(double sigma) const;
+ /**
+ * Add zero-mean i.i.d. Gaussian prior terms to each variable
+ * @param sigma Standard deviation of Gaussian
+ */
+// SubgraphPreconditioner add_priors(double sigma) const;
/* x = xbar + inv(R1)*y */
VectorValues x(const VectorValues& y) const;
/* A zero VectorValues with the structure of xbar */
VectorValues zero() const {
- VectorValues V(VectorValues::Zero(*xbar_)) ;
+ VectorValues V(VectorValues::Zero(*xbar_));
return V ;
}
/**
- * Add constraint part of the error only, used in both calls above
+ * Add constraint part of the error only
* y += alpha*inv(R1')*A2'*e2
* Takes a range indicating e2 !!!!
*/
diff --git a/gtsam/linear/SubgraphSolver-inl.h b/gtsam/linear/SubgraphSolver-inl.h
deleted file mode 100644
index 28b2caaac..000000000
--- a/gtsam/linear/SubgraphSolver-inl.h
+++ /dev/null
@@ -1,80 +0,0 @@
-///* ----------------------------------------------------------------------------
-//
-// * GTSAM Copyright 2010, Georgia Tech Research Corporation,
-// * Atlanta, Georgia 30332-0415
-// * All Rights Reserved
-// * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
-//
-// * See LICENSE for the license information
-//
-// * -------------------------------------------------------------------------- */
-//
-//#pragma once
-//
-//#include
-//
-//#include
-//#include
-//#include
-//
-//namespace gtsam {
-//
-//template
-//void SubgraphSolver::replaceFactors(const typename LINEAR::shared_ptr &graph) {
-//
-// GaussianFactorGraph::shared_ptr Ab1 = boost::make_shared();
-// GaussianFactorGraph::shared_ptr Ab2 = boost::make_shared();
-//
-// if (parameters_->verbosity()) cout << "split the graph ...";
-// split(pairs_, *graph, *Ab1, *Ab2) ;
-// if (parameters_->verbosity()) cout << ",with " << Ab1->size() << " and " << Ab2->size() << " factors" << endl;
-//
-// // // Add a HardConstraint to the root, otherwise the root will be singular
-// // Key root = keys.back();
-// // T_.addHardConstraint(root, theta0[root]);
-// //
-// // // compose the approximate solution
-// // theta_bar_ = composePoses (T_, tree, theta0[root]);
-//
-// LINEAR sacrificialAb1 = *Ab1; // duplicate !!!!!
-// SubgraphPreconditioner::sharedBayesNet Rc1 =
-// EliminationTree::Create(sacrificialAb1)->eliminate(&EliminateQR);
-// SubgraphPreconditioner::sharedValues xbar = gtsam::optimize_(*Rc1);
-//
-// pc_ = boost::make_shared(
-// Ab1->dynamicCastFactors >(), Ab2->dynamicCastFactors >(),Rc1,xbar);
-//}
-//
-//template
-//VectorValues::shared_ptr SubgraphSolver::optimize() const {
-//
-// // preconditioned conjugate gradient
-// VectorValues zeros = pc_->zero();
-// VectorValues ybar = conjugateGradients
-// (*pc_, zeros, *parameters_);
-//
-// boost::shared_ptr xbar = boost::make_shared() ;
-// *xbar = pc_->x(ybar);
-// return xbar;
-//}
-//
-//template
-//void SubgraphSolver::initialize(const GRAPH& G, const Values& theta0) {
-// // generate spanning tree
-// PredecessorMap tree_ = gtsam::findMinimumSpanningTree(G);
-//
-// // make the ordering
-// list keys = predecessorMap2Keys(tree_);
-// ordering_ = boost::make_shared(list(keys.begin(), keys.end()));
-//
-// // build factor pairs
-// pairs_.clear();
-// typedef pair EG ;
-// BOOST_FOREACH( const EG &eg, tree_ ) {
-// Key key1 = Key(eg.first),
-// key2 = Key(eg.second) ;
-// pairs_.insert(pair((*ordering_)[key1], (*ordering_)[key2])) ;
-// }
-//}
-//
-//} // \namespace gtsam
diff --git a/gtsam/linear/SubgraphSolver.cpp b/gtsam/linear/SubgraphSolver.cpp
index 9b37d9a4b..9986fb76c 100644
--- a/gtsam/linear/SubgraphSolver.cpp
+++ b/gtsam/linear/SubgraphSolver.cpp
@@ -14,100 +14,119 @@
#include
#include
#include
-#include
+#include
#include
#include
#include
#include
-
+#include
#include
#include
-
#include
using namespace std;
namespace gtsam {
+/**************************************************************************************************/
SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &gfg, const Parameters ¶meters)
: parameters_(parameters)
{
+ initialize(gfg);
+}
+/**************************************************************************************************/
+SubgraphSolver::SubgraphSolver(const GaussianFactorGraph::shared_ptr &jfg, const Parameters ¶meters)
+ : parameters_(parameters)
+{
+ initialize(*jfg);
+}
- GaussianFactorGraph::shared_ptr Ab1 = boost::make_shared();
- GaussianFactorGraph::shared_ptr Ab2 = boost::make_shared();
+/**************************************************************************************************/
+SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &Ab1, const GaussianFactorGraph &Ab2, const Parameters ¶meters)
+ : parameters_(parameters) {
- boost::tie(Ab1, Ab2) = splitGraph(gfg) ;
+ GaussianBayesNet::shared_ptr Rc1 = EliminationTree::Create(Ab1)->eliminate(&EliminateQR);
+ initialize(Rc1, boost::make_shared(Ab2));
+}
- if (parameters_.verbosity())
- cout << ",with " << Ab1->size() << " and " << Ab2->size() << " factors" << endl;
-
- // // Add a HardConstraint to the root, otherwise the root will be singular
- // Key root = keys.back();
- // T_.addHardConstraint(root, theta0[root]);
- //
- // // compose the approximate solution
- // theta_bar_ = composePoses (T_, tree, theta0[root]);
+/**************************************************************************************************/
+SubgraphSolver::SubgraphSolver(const GaussianFactorGraph::shared_ptr &Ab1,
+ const GaussianFactorGraph::shared_ptr &Ab2, const Parameters ¶meters)
+ : parameters_(parameters) {
GaussianBayesNet::shared_ptr Rc1 = EliminationTree::Create(*Ab1)->eliminate(&EliminateQR);
- VectorValues::shared_ptr xbar(new VectorValues(gtsam::optimize(*Rc1)));
+ initialize(Rc1, Ab2);
+}
- // Convert or cast Ab1 to JacobianFactors
- boost::shared_ptr > Ab1Jacobians = boost::make_shared >();
- Ab1Jacobians->reserve(Ab1->size());
- BOOST_FOREACH(const boost::shared_ptr& factor, *Ab1) {
- if(boost::shared_ptr jf =
- boost::dynamic_pointer_cast(factor))
- Ab1Jacobians->push_back(jf);
- else
- Ab1Jacobians->push_back(boost::make_shared(*factor));
- }
-
- // Convert or cast Ab2 to JacobianFactors
- boost::shared_ptr > Ab2Jacobians = boost::make_shared >();
- Ab1Jacobians->reserve(Ab2->size());
- BOOST_FOREACH(const boost::shared_ptr& factor, *Ab2) {
- if(boost::shared_ptr jf =
- boost::dynamic_pointer_cast(factor))
- Ab2Jacobians->push_back(jf);
- else
- Ab2Jacobians->push_back(boost::make_shared(*factor));
- }
+/**************************************************************************************************/
+SubgraphSolver::SubgraphSolver(const GaussianBayesNet::shared_ptr &Rc1, const GaussianFactorGraph &Ab2,
+ const Parameters ¶meters) : parameters_(parameters)
+{
+ initialize(Rc1, boost::make_shared(Ab2));
+}
- pc_ = boost::make_shared(Ab1Jacobians, Ab2Jacobians, Rc1, xbar);
+/**************************************************************************************************/
+SubgraphSolver::SubgraphSolver(const GaussianBayesNet::shared_ptr &Rc1,
+ const GaussianFactorGraph::shared_ptr &Ab2, const Parameters ¶meters) : parameters_(parameters)
+{
+ initialize(Rc1, Ab2);
}
VectorValues SubgraphSolver::optimize() {
-
VectorValues ybar = conjugateGradients(*pc_, pc_->zero(), parameters_);
return pc_->x(ybar);
}
-boost::tuple
-SubgraphSolver::splitGraph(const GaussianFactorGraph &gfg) {
+void SubgraphSolver::initialize(const GaussianFactorGraph &jfg)
+{
+ GaussianFactorGraph::shared_ptr Ab1 = boost::make_shared(),
+ Ab2 = boost::make_shared();
- VariableIndex index(gfg);
- size_t n = index.size();
- std::vector connected(n, false);
+ boost::tie(Ab1, Ab2) = splitGraph(jfg) ;
+ if (parameters_.verbosity())
+ cout << "Split A into (A1) " << Ab1->size() << " and (A2) " << Ab2->size() << " factors" << endl;
+
+ GaussianBayesNet::shared_ptr Rc1 = EliminationTree::Create(*Ab1)->eliminate(&EliminateQR);
+ VectorValues::shared_ptr xbar(new VectorValues(gtsam::optimize(*Rc1)));
+ pc_ = boost::make_shared(Ab2, Rc1, xbar);
+}
+
+void SubgraphSolver::initialize(const GaussianBayesNet::shared_ptr &Rc1, const GaussianFactorGraph::shared_ptr &Ab2)
+{
+ VectorValues::shared_ptr xbar(new VectorValues(gtsam::optimize(*Rc1)));
+ pc_ = boost::make_shared(Ab2, Rc1, xbar);
+}
+
+boost::tuple
+SubgraphSolver::splitGraph(const GaussianFactorGraph &jfg) {
+
+ const VariableIndex index(jfg);
+ const size_t n = index.size();
+ DSFVector D(n);
GaussianFactorGraph::shared_ptr At(new GaussianFactorGraph());
GaussianFactorGraph::shared_ptr Ac( new GaussianFactorGraph());
- BOOST_FOREACH ( const GaussianFactor::shared_ptr &gf, gfg ) {
+ size_t t = 0;
+ BOOST_FOREACH ( const GaussianFactor::shared_ptr &gf, jfg ) {
+
+ if ( gf->keys().size() > 2 ) {
+ throw runtime_error("SubgraphSolver::splitGraph the graph is not simple, sanity check failed ");
+ }
bool augment = false ;
/* check whether this factor should be augmented to the "tree" graph */
if ( gf->keys().size() == 1 ) augment = true;
else {
- BOOST_FOREACH ( const Index key, *gf ) {
- if ( connected[key] == false ) {
- augment = true ;
- connected[key] = true;
- }
+ const Index u = gf->keys()[0], v = gf->keys()[1],
+ u_root = D.findSet(u), v_root = D.findSet(v);
+ if ( u_root != v_root ) {
+ t++; augment = true ;
+ D.makeUnionInPlace(u_root, v_root);
}
}
-
if ( augment ) At->push_back(gf);
else Ac->push_back(gf);
}
@@ -115,7 +134,4 @@ SubgraphSolver::splitGraph(const GaussianFactorGraph &gfg) {
return boost::tie(At, Ac);
}
-
-
-
} // \namespace gtsam
diff --git a/gtsam/linear/SubgraphSolver.h b/gtsam/linear/SubgraphSolver.h
index 60104bbe0..7c60c0588 100644
--- a/gtsam/linear/SubgraphSolver.h
+++ b/gtsam/linear/SubgraphSolver.h
@@ -13,7 +13,9 @@
#include
#include
+#include
#include
+#include
#include
@@ -23,31 +25,61 @@ class SubgraphSolverParameters : public ConjugateGradientParameters {
public:
typedef ConjugateGradientParameters Base;
SubgraphSolverParameters() : Base() {}
+ virtual void print(const std::string &s="") const { Base::print(s); }
};
/**
- * A linear system solver using subgraph preconditioning conjugate gradient
+ * This class implements the SPCG solver presented in Dellaert et al in IROS'10.
+ *
+ * Given a linear least-squares problem \f$ f(x) = |A x - b|^2 \f$. We split the problem into
+ * \f$ f(x) = |A_t - b_t|^2 + |A_c - b_c|^2 \f$ where \f$ A_t \f$ denotes the "tree" part, and \f$ A_c \f$ denotes the "constraint" part.
+ * \f$ A_t \f$ is factorized into \f$ Q_t R_t \f$, and we compute \f$ c_t = Q_t^{-1} b_t \f$, and \f$ x_t = R_t^{-1} c_t \f$ accordingly.
+ * Then we solve a reparametrized problem \f$ f(y) = |y|^2 + |A_c R_t^{-1} y - \bar{b_y}|^2 \f$, where \f$ y = R_t(x - x_t) \f$, and \f$ \bar{b_y} = (b_c - A_c x_t) \f$
+ *
+ * In the matrix form, it is equivalent to solving \f$ [A_c R_t^{-1} ; I ] y = [\bar{b_y} ; 0] \f$. We can solve it
+ * with the least-squares variation of the conjugate gradient method.
+ *
+ * To use it in nonlinear optimization, please see the following example
+ *
+ * LevenbergMarquardtParams parameters;
+ * parameters.linearSolverType = SuccessiveLinearizationParams::CONJUGATE_GRADIENT;
+ * parameters.iterativeParams = boost::make_shared();
+ * LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, parameters);
+ * Values result = optimizer.optimize();
+ *
+ * \nosubgrouping
*/
class SubgraphSolver : public IterativeSolver {
public:
-
typedef SubgraphSolverParameters Parameters;
protected:
-
Parameters parameters_;
SubgraphPreconditioner::shared_ptr pc_; ///< preconditioner object
public:
+ /* Given a gaussian factor graph, split it into a spanning tree (A1) + others (A2) for SPCG */
+ SubgraphSolver(const GaussianFactorGraph &A, const Parameters ¶meters);
+ SubgraphSolver(const GaussianFactorGraph::shared_ptr &A, const Parameters ¶meters);
+
+ /* The user specify the subgraph part and the constraint part, may throw exception if A1 is underdetermined */
+ SubgraphSolver(const GaussianFactorGraph &Ab1, const GaussianFactorGraph &Ab2, const Parameters ¶meters);
+ SubgraphSolver(const GaussianFactorGraph::shared_ptr &Ab1, const GaussianFactorGraph::shared_ptr &Ab2, const Parameters ¶meters);
+
+ /* The same as above, but the A1 is solved before */
+ SubgraphSolver(const GaussianBayesNet::shared_ptr &Rc1, const GaussianFactorGraph &Ab2, const Parameters ¶meters);
+ SubgraphSolver(const GaussianBayesNet::shared_ptr &Rc1, const GaussianFactorGraph::shared_ptr &Ab2, const Parameters ¶meters);
- SubgraphSolver(const GaussianFactorGraph &gfg, const Parameters ¶meters);
virtual ~SubgraphSolver() {}
virtual VectorValues optimize () ;
protected:
+ void initialize(const GaussianFactorGraph &jfg);
+ void initialize(const GaussianBayesNet::shared_ptr &Rc1, const GaussianFactorGraph::shared_ptr &Ab2);
+
boost::tuple
splitGraph(const GaussianFactorGraph &gfg) ;
};
diff --git a/gtsam/linear/iterative-inl.h b/gtsam/linear/iterative-inl.h
index 8a052a66f..5cc0841b0 100644
--- a/gtsam/linear/iterative-inl.h
+++ b/gtsam/linear/iterative-inl.h
@@ -122,7 +122,7 @@ namespace gtsam {
// conjugate gradient method.
// S: linear system, V: step vector, E: errors
template
- V conjugateGradients(const S& Ab, V x, const ConjugateGradientParameters ¶meters, bool steepest = false) {
+ V conjugateGradients(const S& Ab, V x, const ConjugateGradientParameters ¶meters, bool steepest) {
CGState state(Ab, x, parameters, steepest);
diff --git a/gtsam/linear/iterative.cpp b/gtsam/linear/iterative.cpp
index 6dadb2d72..07d4de865 100644
--- a/gtsam/linear/iterative.cpp
+++ b/gtsam/linear/iterative.cpp
@@ -19,53 +19,60 @@
#include
#include
#include
-#include
+#include
#include
-
#include
using namespace std;
namespace gtsam {
- /* ************************************************************************* */
- void System::print (const string& s) const {
- cout << s << endl;
- gtsam::print(A_, "A");
- gtsam::print(b_, "b");
- }
+ /* ************************************************************************* */
+ void System::print(const string& s) const {
+ cout << s << endl;
+ gtsam::print(A_, "A");
+ gtsam::print(b_, "b");
+ }
- /* ************************************************************************* */
+ /* ************************************************************************* */
- Vector steepestDescent(const System& Ab, const Vector& x, const ConjugateGradientParameters & parameters) {
- return conjugateGradients (Ab, x, parameters, true);
- }
+ Vector steepestDescent(const System& Ab, const Vector& x,
+ const ConjugateGradientParameters & parameters) {
+ return conjugateGradients(Ab, x, parameters, true);
+ }
- Vector conjugateGradientDescent(const System& Ab, const Vector& x, const ConjugateGradientParameters & parameters) {
- return conjugateGradients (Ab, x, parameters);
- }
+ Vector conjugateGradientDescent(const System& Ab, const Vector& x,
+ const ConjugateGradientParameters & parameters) {
+ return conjugateGradients(Ab, x, parameters);
+ }
- /* ************************************************************************* */
- Vector steepestDescent(const Matrix& A, const Vector& b, const Vector& x, const ConjugateGradientParameters & parameters) {
- System Ab(A, b);
- return conjugateGradients (Ab, x, parameters, true);
- }
+ /* ************************************************************************* */
+ Vector steepestDescent(const Matrix& A, const Vector& b, const Vector& x,
+ const ConjugateGradientParameters & parameters) {
+ System Ab(A, b);
+ return conjugateGradients(Ab, x, parameters, true);
+ }
- Vector conjugateGradientDescent(const Matrix& A, const Vector& b, const Vector& x, const ConjugateGradientParameters & parameters) {
- System Ab(A, b);
- return conjugateGradients (Ab, x, parameters);
- }
+ Vector conjugateGradientDescent(const Matrix& A, const Vector& b,
+ const Vector& x, const ConjugateGradientParameters & parameters) {
+ System Ab(A, b);
+ return conjugateGradients(Ab, x, parameters);
+ }
- /* ************************************************************************* */
- VectorValues steepestDescent(const FactorGraph& fg, const VectorValues& x, const ConjugateGradientParameters & parameters) {
- return conjugateGradients, VectorValues, Errors> (fg, x, parameters, true);
- }
+ /* ************************************************************************* */
+ VectorValues steepestDescent(const GaussianFactorGraph& fg,
+ const VectorValues& x, const ConjugateGradientParameters & parameters) {
+ return conjugateGradients(
+ fg, x, parameters, true);
+ }
- VectorValues conjugateGradientDescent(const FactorGraph& fg, const VectorValues& x, const ConjugateGradientParameters & parameters) {
- return conjugateGradients, VectorValues, Errors> (fg, x, parameters);
- }
+ VectorValues conjugateGradientDescent(const GaussianFactorGraph& fg,
+ const VectorValues& x, const ConjugateGradientParameters & parameters) {
+ return conjugateGradients(
+ fg, x, parameters);
+ }
- /* ************************************************************************* */
+/* ************************************************************************* */
} // namespace gtsam
diff --git a/gtsam/linear/iterative.h b/gtsam/linear/iterative.h
index 5ffb366c2..0eb04bf49 100644
--- a/gtsam/linear/iterative.h
+++ b/gtsam/linear/iterative.h
@@ -31,12 +31,11 @@ namespace gtsam {
* "Vector" class E needs dot(v,v)
* @param Ab, the "system" that needs to be solved, examples below
* @param x is the initial estimate
- * @param epsilon determines the convergence criterion: norm(g)
- V conjugateGradients(const S& Ab, V x, bool verbose, double epsilon, size_t maxIterations, bool steepest = false);
+ template
+ V conjugateGradients(const S& Ab, V x,
+ const ConjugateGradientParameters ¶meters, bool steepest = false);
/**
* Helper class encapsulating the combined system |Ax-b_|^2
@@ -127,11 +126,9 @@ namespace gtsam {
const Vector& x,
const ConjugateGradientParameters & parameters);
- class GaussianFactorGraph;
-
/**
* Method of steepest gradients, Gaussian Factor Graph version
- * */
+ */
VectorValues steepestDescent(
const GaussianFactorGraph& fg,
const VectorValues& x,
@@ -139,7 +136,7 @@ namespace gtsam {
/**
* Method of conjugate gradients (CG), Gaussian Factor Graph version
- * */
+ */
VectorValues conjugateGradientDescent(
const GaussianFactorGraph& fg,
const VectorValues& x,
diff --git a/gtsam/linear/tests/testGaussianFactorGraph.cpp b/gtsam/linear/tests/testGaussianFactorGraph.cpp
index 4c0aa3871..cb835a76a 100644
--- a/gtsam/linear/tests/testGaussianFactorGraph.cpp
+++ b/gtsam/linear/tests/testGaussianFactorGraph.cpp
@@ -410,7 +410,7 @@ TEST(GaussianFactor, eliminateFrontals)
factors.push_back(factor4);
// extract the dense matrix for the graph
- Matrix actualDense = factors.denseJacobian();
+ Matrix actualDense = factors.augmentedJacobian();
EXPECT(assert_equal(2.0 * Ab, actualDense));
// Convert to Jacobians, inefficient copy of all factors instead of selectively converting only Hessians
@@ -619,7 +619,7 @@ TEST(GaussianFactorGraph, sparseJacobian) {
}
/* ************************************************************************* */
-TEST(GaussianFactorGraph, denseHessian) {
+TEST(GaussianFactorGraph, matrices) {
// Create factor graph:
// x1 x2 x3 x4 x5 b
// 1 2 3 0 0 4
@@ -639,9 +639,24 @@ TEST(GaussianFactorGraph, denseHessian) {
9,10, 0,11,12,13,
0, 0, 0,14,15,16;
+ Matrix expectedJacobian = jacobian;
Matrix expectedHessian = jacobian.transpose() * jacobian;
- Matrix actualHessian = gfg.denseHessian();
+ Matrix expectedA = jacobian.leftCols(jacobian.cols()-1);
+ Vector expectedb = jacobian.col(jacobian.cols()-1);
+ Matrix expectedL = expectedA.transpose() * expectedA;
+ Vector expectedeta = expectedA.transpose() * expectedb;
+
+ Matrix actualJacobian = gfg.augmentedJacobian();
+ Matrix actualHessian = gfg.augmentedHessian();
+ Matrix actualA; Vector actualb; boost::tie(actualA,actualb) = gfg.jacobian();
+ Matrix actualL; Vector actualeta; boost::tie(actualL,actualeta) = gfg.hessian();
+
+ EXPECT(assert_equal(expectedJacobian, actualJacobian));
EXPECT(assert_equal(expectedHessian, actualHessian));
+ EXPECT(assert_equal(expectedA, actualA));
+ EXPECT(assert_equal(expectedb, actualb));
+ EXPECT(assert_equal(expectedL, actualL));
+ EXPECT(assert_equal(expectedeta, actualeta));
}
/* ************************************************************************* */
diff --git a/gtsam/linear/tests/testJacobianFactorGraph.cpp b/gtsam/linear/tests/testJacobianFactorGraph.cpp
new file mode 100644
index 000000000..07dda4652
--- /dev/null
+++ b/gtsam/linear/tests/testJacobianFactorGraph.cpp
@@ -0,0 +1,92 @@
+/* ----------------------------------------------------------------------------
+
+ * GTSAM Copyright 2010, Georgia Tech Research Corporation,
+ * Atlanta, Georgia 30332-0415
+ * All Rights Reserved
+ * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
+
+ * See LICENSE for the license information
+
+ * -------------------------------------------------------------------------- */
+
+/**
+ * @file testJacobianFactorGraph.cpp
+ * @brief Unit tests for GaussianFactorGraph
+ * @author Yong Dian Jian
+ **/
+
+#include
+#include
+
+
+///* ************************************************************************* */
+//TEST( GaussianFactorGraph, gradient )
+//{
+// GaussianFactorGraph fg = createGaussianFactorGraph();
+//
+// // Construct expected gradient
+// VectorValues expected;
+//
+// // 2*f(x) = 100*(x1+c[X(1)])^2 + 100*(x2-x1-[0.2;-0.1])^2 + 25*(l1-x1-[0.0;0.2])^2 + 25*(l1-x2-[-0.2;0.3])^2
+// // worked out: df/dx1 = 100*[0.1;0.1] + 100*[0.2;-0.1]) + 25*[0.0;0.2] = [10+20;10-10+5] = [30;5]
+// expected.insert(L(1),Vector_(2, 5.0,-12.5));
+// expected.insert(X(1),Vector_(2, 30.0, 5.0));
+// expected.insert(X(2),Vector_(2,-25.0, 17.5));
+//
+// // Check the gradient at delta=0
+// VectorValues zero = createZeroDelta();
+// VectorValues actual = fg.gradient(zero);
+// EXPECT(assert_equal(expected,actual));
+//
+// // Check it numerically for good measure
+// Vector numerical_g = numericalGradient(error,zero,0.001);
+// EXPECT(assert_equal(Vector_(6,5.0,-12.5,30.0,5.0,-25.0,17.5),numerical_g));
+//
+// // Check the gradient at the solution (should be zero)
+// Ordering ord;
+// ord += X(2),L(1),X(1);
+// GaussianFactorGraph fg2 = createGaussianFactorGraph();
+// VectorValues solution = fg2.optimize(ord); // destructive
+// VectorValues actual2 = fg.gradient(solution);
+// EXPECT(assert_equal(zero,actual2));
+//}
+
+///* ************************************************************************* */
+//TEST( GaussianFactorGraph, transposeMultiplication )
+//{
+// // create an ordering
+// Ordering ord; ord += X(2),L(1),X(1);
+//
+// GaussianFactorGraph A = createGaussianFactorGraph(ord);
+// Errors e;
+// e += Vector_(2, 0.0, 0.0);
+// e += Vector_(2,15.0, 0.0);
+// e += Vector_(2, 0.0,-5.0);
+// e += Vector_(2,-7.5,-5.0);
+//
+// VectorValues expected = createZeroDelta(ord), actual = A ^ e;
+// expected[ord[L(1)]] = Vector_(2, -37.5,-50.0);
+// expected[ord[X(1)]] = Vector_(2,-150.0, 25.0);
+// expected[ord[X(2)]] = Vector_(2, 187.5, 25.0);
+// EXPECT(assert_equal(expected,actual));
+//}
+
+///* ************************************************************************* */
+//TEST( GaussianFactorGraph, rhs )
+//{
+// // create an ordering
+// Ordering ord; ord += X(2),L(1),X(1);
+//
+// GaussianFactorGraph Ab = createGaussianFactorGraph(ord);
+// Errors expected = createZeroDelta(ord), actual = Ab.rhs();
+// expected.push_back(Vector_(2,-1.0,-1.0));
+// expected.push_back(Vector_(2, 2.0,-1.0));
+// expected.push_back(Vector_(2, 0.0, 1.0));
+// expected.push_back(Vector_(2,-1.0, 1.5));
+// EXPECT(assert_equal(expected,actual));
+//}
+
+
+/* ************************************************************************* */
+int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
+/* ************************************************************************* */
diff --git a/gtsam/nonlinear/ISAM2.cpp b/gtsam/nonlinear/ISAM2.cpp
index b09133c4d..081910e8c 100644
--- a/gtsam/nonlinear/ISAM2.cpp
+++ b/gtsam/nonlinear/ISAM2.cpp
@@ -27,7 +27,7 @@ using namespace boost::assign;
#include
#include