diff --git a/cpp/Makefile.am b/cpp/Makefile.am index 907f04694..1f9332547 100644 --- a/cpp/Makefile.am +++ b/cpp/Makefile.am @@ -101,7 +101,7 @@ testBinaryBayesNet_LDADD = libgtsam.la # Gaussian inference headers += GaussianFactorSet.h -sources += Errors.cpp VectorConfig.cpp GaussianFactor.cpp GaussianFactorGraph.cpp GaussianConditional.cpp GaussianBayesNet.cpp +sources += Errors.cpp VectorConfig.cpp GaussianFactor.cpp GaussianFactorGraph.cpp GaussianConditional.cpp GaussianBayesNet.cpp iterative.cpp check_PROGRAMS += testVectorConfig testGaussianFactor testGaussianFactorGraph testGaussianConditional testGaussianBayesNet testIterative testVectorConfig_SOURCES = testVectorConfig.cpp testVectorConfig_LDADD = libgtsam.la diff --git a/cpp/iterative.cpp b/cpp/iterative.cpp new file mode 100644 index 000000000..59cd75990 --- /dev/null +++ b/cpp/iterative.cpp @@ -0,0 +1,109 @@ +/* + * iterative.cpp + * @brief Iterative methods, implementation + * @author Frank Dellaert + * Created on: Dec 28, 2009 + */ + +#include "GaussianFactorGraph.h" +#include "iterative.h" + +using namespace std; + +namespace gtsam { + + /* ************************************************************************* */ + + /** + * gradient of objective function 0.5*|Ax-b|^2 at x = A'*(Ax-b) + */ + Vector gradient(const System& Ab, const Vector& x) { + const Matrix& A = Ab.first; + const Vector& b = Ab.second; + return A ^ (A * x - b); + } + + /** + * Apply operator A + */ + Vector operator*(const System& Ab, const Vector& x) { + const Matrix& A = Ab.first; + return A * x; + } + + /** + * Apply operator A^T + */ + Vector operator^(const System& Ab, const Vector& x) { + const Matrix& A = Ab.first; + return A ^ x; + } + + /* ************************************************************************* */ + // Method of conjugate gradients (CG) template + // "System" class S needs gradient(S,v), e=S*v, v=S^e + // "Vector" class V needs dot(v,v), -v, v+v, s*v + // "Vector" class E needs dot(v,v) + template + V CGD(const S& Ab, V x, double threshold = 1e-9) { + + // Start with g0 = A'*(A*x0-b), d0 = - g0 + // i.e., first step is in direction of negative gradient + V g = gradient(Ab, x); + V d = -g; + double prev_dotg = dot(g, g); + + // loop max n times + size_t n = x.size(); + for (int k = 1; k <= n; k++) { + + // calculate optimal step-size + E Ad = Ab * d; + double alpha = -dot(d, g) / dot(Ad, Ad); + + // do step in new search direction + x = x + alpha * d; + if (k == n) break; + + // update gradient + g = g + alpha * (Ab ^ Ad); + + // check for convergence + double dotg = dot(g, g); + if (dotg < threshold) break; + + // calculate new search direction + double beta = dotg / prev_dotg; + prev_dotg = dotg; + d = -g + beta * d; + } + return x; + } + + /* ************************************************************************* */ + Vector conjugateGradientDescent(const System& Ab, const Vector& x, + double threshold) { + return CGD (Ab, x); + } + + /* ************************************************************************* */ + Vector conjugateGradientDescent(const Matrix& A, const Vector& b, + const Vector& x, double threshold) { + System Ab = make_pair(A, b); + return CGD (Ab, x); + } + + /* ************************************************************************* */ + VectorConfig gradient(const GaussianFactorGraph& fg, const VectorConfig& x) { + return fg.gradient(x); + } + + /* ************************************************************************* */ + VectorConfig conjugateGradientDescent(const GaussianFactorGraph& fg, + const VectorConfig& x, double threshold) { + return CGD (fg, x); + } + +/* ************************************************************************* */ + +} // namespace gtsam diff --git a/cpp/iterative.h b/cpp/iterative.h new file mode 100644 index 000000000..e99228f1f --- /dev/null +++ b/cpp/iterative.h @@ -0,0 +1,34 @@ +/* + * iterative.h + * @brief Iterative methods, implementation + * @author Frank Dellaert + * Created on: Dec 28, 2009 + */ + +#include "Matrix.h" +namespace gtsam { + + class GaussianFactorGraph; + class VectorConfig; + + typedef std::pair System; + + /** + * Method of conjugate gradients (CG), System version + */ + Vector conjugateGradientDescent(const System& Ab, const Vector& x, + double threshold = 1e-9); + + /** + * Method of conjugate gradients (CG), Matrix version + */ + Vector conjugateGradientDescent(const Matrix& A, const Vector& b, + const Vector& x, double threshold = 1e-9); + + /** + * Method of conjugate gradients (CG), Gaussian Factor Graph version + * */ + VectorConfig conjugateGradientDescent(const GaussianFactorGraph& fg, + const VectorConfig& x, double threshold = 1e-9); + +} // namespace gtsam diff --git a/cpp/testIterative.cpp b/cpp/testIterative.cpp index 0d090e81b..b3b4d1aad 100644 --- a/cpp/testIterative.cpp +++ b/cpp/testIterative.cpp @@ -10,102 +10,14 @@ using namespace boost::assign; #include #include "Ordering.h" +#include "iterative.h" #include "smallExample.h" using namespace std; using namespace gtsam; /* ************************************************************************* */ -VectorConfig gradient(const GaussianFactorGraph& Ab, const VectorConfig& x) { - return Ab.gradient(x); -} - -/* ************************************************************************* */ -typedef pair System; - -/** - * gradient of objective function 0.5*|Ax-b|^2 at x = A'*(Ax-b) - */ -Vector gradient(const System& Ab, const Vector& x) { - const Matrix& A = Ab.first; - const Vector& b = Ab.second; - return A ^ (A * x - b); -} - -/** - * Apply operator A - */ -Vector operator*(const System& Ab, const Vector& x) { - const Matrix& A = Ab.first; - return A * x; -} - -/** - * Apply operator A^T - */ -Vector operator^(const System& Ab, const Vector& x) { - const Matrix& A = Ab.first; - return A ^ x; -} - -/* ************************************************************************* */ -// Method of conjugate gradients (CG) -// "System" class S needs gradient(S,v), e=S*v, v=S^e -// "Vector" class V needs dot(v,v), -v, v+v, s*v -// "Vector" class E needs dot(v,v) -template -V CGD(const S& Ab, V x, double threshold = 1e-9) { - - // Start with g0 = A'*(A*x0-b), d0 = - g0 - // i.e., first step is in direction of negative gradient - V g = gradient(Ab, x); - V d = -g; - double prev_dotg = dot(g, g); - - // loop max n times - size_t n = x.size(); - for (int k = 1; k <= n; k++) { - - // calculate optimal step-size - E Ad = Ab * d; - double alpha = -dot(d, g) / dot(Ad, Ad); - - // do step in new search direction - x = x + alpha * d; - if (k == n) break; - - // update gradient - g = g + alpha * (Ab ^ Ad); - - // check for convergence - double dotg = dot(g, g); - if (dotg < threshold) break; - - // calculate new search direction - double beta = dotg / prev_dotg; - prev_dotg = dotg; - d = -g + beta * d; - } - return x; -} - -/* ************************************************************************* */ -// Method of conjugate gradients (CG), Matrix version -Vector conjugateGradientDescent(const Matrix& A, const Vector& b, - const Vector& x, double threshold = 1e-9) { - System Ab = make_pair(A, b); - return CGD (Ab, x); -} - -/* ************************************************************************* */ -// Method of conjugate gradients (CG), Gaussian Factor Graph version -VectorConfig conjugateGradientDescent(const GaussianFactorGraph& Ab, - const VectorConfig& x, double threshold = 1e-9) { - return CGD (Ab, x); -} - -/* ************************************************************************* */ -TEST( GaussianFactorGraph, gradientDescent ) +TEST( Iterative, gradientDescent ) { // Expected solution Ordering ord; @@ -136,7 +48,7 @@ TEST( GaussianFactorGraph, gradientDescent ) // Do conjugate gradient descent, System version System Ab = make_pair(A,b); - Vector actualX2 = CGD(Ab,x0); + Vector actualX2 = conjugateGradientDescent(Ab,x0); CHECK(assert_equal(expectedX,actualX2,1e-9)); }