diff --git a/examples/Pose2SLAMwSPCG.cpp b/examples/Pose2SLAMwSPCG.cpp index cec4301dc..984701734 100644 --- a/examples/Pose2SLAMwSPCG.cpp +++ b/examples/Pose2SLAMwSPCG.cpp @@ -16,7 +16,6 @@ * @date June 2, 2012 */ -#include #include #include diff --git a/gtsam/linear/ConjugateGradientSolver.h b/gtsam/linear/ConjugateGradientSolver.h new file mode 100644 index 000000000..2cfd1279f --- /dev/null +++ b/gtsam/linear/ConjugateGradientSolver.h @@ -0,0 +1,89 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +#pragma once + +#include + +namespace gtsam { + +/** + * parameters for the conjugate gradient method + */ +struct ConjugateGradientParameters : public IterativeOptimizationParameters { + + typedef IterativeOptimizationParameters Base; + typedef boost::shared_ptr shared_ptr; + + size_t minIterations_; ///< minimum number of cg iterations + size_t maxIterations_; ///< maximum number of cg iterations + size_t reset_; ///< number of iterations before reset + double epsilon_rel_; ///< threshold for relative error decrease + double epsilon_abs_; ///< threshold for absolute error decrease + + /* Matrix Operation Kernel */ + enum BLASKernel { + GTSAM = 0, ///< Jacobian Factor Graph of GTSAM + SBM, ///< Sparse Block Matrix + SBM_MT ///< Sparse Block Matrix Multithreaded + } blas_kernel_; + + enum Verbosity { SILENT = 0, COMPLEXITY = 1, ERROR = 2} verbosity_ ; /* Verbosity */ + + ConjugateGradientParameters() + : minIterations_(1), maxIterations_(500), reset_(501), epsilon_rel_(1e-3), epsilon_abs_(1e-3), + blas_kernel_(GTSAM), verbosity_(SILENT) {} + + ConjugateGradientParameters(size_t minIterations, size_t maxIterations, size_t reset, + double epsilon_rel, double epsilon_abs, BLASKernel blas = GTSAM, Verbosity verbosity = SILENT) + : minIterations_(minIterations), maxIterations_(maxIterations), reset_(reset), + epsilon_rel_(epsilon_rel), epsilon_abs_(epsilon_abs), blas_kernel_(blas), verbosity_(verbosity) {} + + /* general interface */ + inline size_t minIterations() const { return minIterations_; } + inline size_t maxIterations() const { return maxIterations_; } + inline size_t reset() const { return reset_; } + inline double epsilon() const { return epsilon_rel_; } + inline double epsilon_rel() const { return epsilon_rel_; } + inline double epsilon_abs() const { return epsilon_abs_; } + inline BLASKernel blas_kernel() const { return blas_kernel_; } + inline Verbosity verbosity() const { return verbosity_; } + + void print() const { + const std::string blasStr[3] = {"gtsam", "sbm", "sbm-mt"}; + Base::print(); + std::cout << "ConjugateGradientParameters: " + << "blas = " << blasStr[blas_kernel_] + << ", minIter = " << minIterations_ + << ", maxIter = " << maxIterations_ + << ", resetIter = " << reset_ + << ", eps_rel = " << epsilon_rel_ + << ", eps_abs = " << epsilon_abs_ + << ", verbosity = " << verbosity_ + << std::endl; + } +}; + +//class ConjugateGradientSolver : public IterativeSolver { +// +//public: +// +// typedef ConjugateGradientParameters Parameters; +// +// Parameters parameters_; +// +// ConjugateGradientSolver(const ConjugateGradientParameters ¶meters) : parameters_(parameters) {} +// virtual VectorValues::shared_ptr optimize () = 0; +// virtual const IterativeOptimizationParameters& _params() const = 0; +//}; + + +} diff --git a/gtsam/linear/IterativeOptimizationParameters.h b/gtsam/linear/IterativeOptimizationParameters.h deleted file mode 100644 index 459f07b3e..000000000 --- a/gtsam/linear/IterativeOptimizationParameters.h +++ /dev/null @@ -1,134 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file IterativeOptimizationParameters.h - * @date Oct 22, 2010 - * @author Yong-Dian Jian - */ - -#pragma once - -#include -#include -#include - -namespace gtsam { - -/** - * parameters for the conjugate gradient method - */ -struct ConjugateGradientParameters { - - size_t minIterations_; ///< minimum number of cg iterations - size_t maxIterations_; ///< maximum number of cg iterations - size_t reset_; ///< number of iterations before reset - double epsilon_rel_; ///< threshold for relative error decrease - double epsilon_abs_; ///< threshold for absolute error decrease - - /* Matrix Operation Kernel */ - enum BLASKernel { - GTSAM = 0, ///< Jacobian Factor Graph of GTSAM - SBM, ///< Sparse Block Matrix - SBM_MT ///< Sparse Block Matrix Multithreaded - } blas_kernel_; - - size_t degree_; ///< the maximum degree of the vertices to be eliminated before doing cg - - enum Verbosity { SILENT = 0, COMPLEXITY = 1, ERROR = 2} verbosity_ ; /* Verbosity */ - - ConjugateGradientParameters() - : minIterations_(1), maxIterations_(500), reset_(501), epsilon_rel_(1e-3), epsilon_abs_(1e-3), - blas_kernel_(GTSAM), degree_(0), verbosity_(SILENT) {} - - ConjugateGradientParameters(size_t minIterations, size_t maxIterations, size_t reset, - double epsilon_rel, double epsilon_abs, BLASKernel blas = GTSAM, size_t degree = 0, Verbosity verbosity = SILENT) - : minIterations_(minIterations), maxIterations_(maxIterations), reset_(reset), - epsilon_rel_(epsilon_rel), epsilon_abs_(epsilon_abs), blas_kernel_(blas), degree_(degree), verbosity_(verbosity) {} - - /* general interface */ - inline size_t minIterations() const { return minIterations_; } - inline size_t maxIterations() const { return maxIterations_; } - inline size_t reset() const { return reset_; } - inline double epsilon() const { return epsilon_rel_; } - inline double epsilon_rel() const { return epsilon_rel_; } - inline double epsilon_abs() const { return epsilon_abs_; } - inline BLASKernel blas_kernel() const { return blas_kernel_; } - inline size_t degree() const { return degree_; } - inline Verbosity verbosity() const { return verbosity_; } - - void print() const { - const std::string blasStr[3] = {"gtsam", "sbm", "sbm-mt"}; - std::cout << "ConjugateGradientParameters: " - << "blas = " << blasStr[blas_kernel_] - << ", minIter = " << minIterations_ - << ", maxIter = " << maxIterations_ - << ", resetIter = " << reset_ - << ", eps_rel = " << epsilon_rel_ - << ", eps_abs = " << epsilon_abs_ - << ", degree = " << degree_ - << ", verbosity = " << verbosity_ - << std::endl; - } -}; - -/** - * parameters for iterative linear solvers - */ -class IterativeOptimizationParameters { - -public: - - typedef boost::shared_ptr shared_ptr; - - ConjugateGradientParameters cg_; ///< Parameters for the Conjugate Gradient Method - enum Kernel { PCG = 0, LSPCG = 1 } kernel_ ; ///< Iterative Method Kernel - enum Verbosity { SILENT = 0, COMPLEXITY = 1, ERROR = 2} verbosity_ ; ///< Verbosity - -public: - - IterativeOptimizationParameters() : cg_(), kernel_(LSPCG), verbosity_(SILENT) {} - - IterativeOptimizationParameters(const IterativeOptimizationParameters &p) - : cg_(p.cg_), kernel_(p.kernel_), verbosity_(p.verbosity_) {} - - IterativeOptimizationParameters(const ConjugateGradientParameters &c, Kernel kernel = LSPCG, Verbosity verbosity = SILENT) - : cg_(c), kernel_(kernel), verbosity_(verbosity) {} - - virtual ~IterativeOptimizationParameters() {} - - /* general interface */ - inline Kernel kernel() const { return kernel_; } - inline Verbosity verbosity() const { return verbosity_; } - - /* interface to cg parameters */ - inline const ConjugateGradientParameters& cg() const { return cg_; } - inline size_t minIterations() const { return cg_.minIterations(); } - inline size_t maxIterations() const { return cg_.maxIterations(); } - inline size_t reset() const { return cg_.reset(); } - inline double epsilon() const { return cg_.epsilon_rel(); } - inline double epsilon_rel() const { return cg_.epsilon_rel(); } - inline double epsilon_abs() const { return cg_.epsilon_abs(); } - inline size_t degree() const { return cg_.degree(); } - inline ConjugateGradientParameters::BLASKernel blas_kernel() const { return cg_.blas_kernel(); } - - virtual void print(const std::string &s="") const { - const std::string kernelStr[2] = {"pcg", "lspcg"}; - std::cout << s << std::endl - << "IterativeOptimizationParameters: " - << "kernel = " << kernelStr[kernel_] - << ", verbosity = " << verbosity_ << std::endl; - cg_.print(); - } - -}; - -} diff --git a/gtsam/linear/IterativeSolver.h b/gtsam/linear/IterativeSolver.h index edaa09943..de1c564ef 100644 --- a/gtsam/linear/IterativeSolver.h +++ b/gtsam/linear/IterativeSolver.h @@ -11,20 +11,49 @@ #pragma once -#include #include namespace gtsam { -class IterativeSolver { + /** + * parameters for iterative linear solvers + */ + class IterativeOptimizationParameters { -public: + public: - IterativeSolver(){} - virtual ~IterativeSolver() {} + typedef boost::shared_ptr shared_ptr; - virtual VectorValues::shared_ptr optimize () = 0; - virtual const IterativeOptimizationParameters& _params() const = 0; -}; + enum Kernel { PCG = 0, LSPCG = 1 } kernel_ ; ///< Iterative Method Kernel + enum Verbosity { SILENT = 0, COMPLEXITY = 1, ERROR = 2} verbosity_ ; ///< Verbosity + + public: + + IterativeOptimizationParameters(const IterativeOptimizationParameters &p) + : kernel_(p.kernel_), verbosity_(p.verbosity_) {} + + IterativeOptimizationParameters(Kernel kernel = LSPCG, Verbosity verbosity = SILENT) + : kernel_(kernel), verbosity_(verbosity) {} + + virtual ~IterativeOptimizationParameters() {} + + /* general interface */ + inline Kernel kernel() const { return kernel_; } + inline Verbosity verbosity() const { return verbosity_; } + + void print() const { + const std::string kernelStr[2] = {"pcg", "lspcg"}; + std::cout << "IterativeOptimizationParameters: " + << "kernel = " << kernelStr[kernel_] + << ", verbosity = " << verbosity_ << std::endl; + } + }; + + class IterativeSolver { + public: + IterativeSolver(){} + virtual ~IterativeSolver() {} + virtual VectorValues::shared_ptr optimize () = 0; + }; } diff --git a/gtsam/linear/SimpleSPCGSolver.cpp b/gtsam/linear/SimpleSPCGSolver.cpp index b08cc0c36..03401cc7f 100644 --- a/gtsam/linear/SimpleSPCGSolver.cpp +++ b/gtsam/linear/SimpleSPCGSolver.cpp @@ -9,11 +9,11 @@ * -------------------------------------------------------------------------- */ -#include +#include #include #include -#include #include +#include #include #include #include @@ -100,7 +100,7 @@ VectorValues::shared_ptr SimpleSPCGSolver::optimize (const VectorValues &initial std::max(parameters_.epsilon_abs(), parameters_.epsilon() * parameters_.epsilon() * gamma); const size_t iMaxIterations = parameters_.maxIterations(); - const ConjugateGradientParameters::Verbosity verbosity = parameters_.cg_.verbosity(); + const Parameters::Verbosity verbosity = parameters_.verbosity(); if ( verbosity >= ConjugateGradientParameters::ERROR ) std::cout << "[SimpleSPCGSolver] epsilon = " << parameters_.epsilon() diff --git a/gtsam/linear/SimpleSPCGSolver.h b/gtsam/linear/SimpleSPCGSolver.h index d2768f581..406232d3c 100644 --- a/gtsam/linear/SimpleSPCGSolver.h +++ b/gtsam/linear/SimpleSPCGSolver.h @@ -11,9 +11,10 @@ #pragma once -#include +#include #include #include +#include #include namespace gtsam { @@ -39,7 +40,7 @@ class SimpleSPCGSolver : public IterativeSolver { public: typedef IterativeSolver Base; - typedef IterativeOptimizationParameters Parameters; + typedef ConjugateGradientParameters Parameters; typedef boost::shared_ptr shared_ptr; protected: @@ -60,7 +61,6 @@ public: SimpleSPCGSolver(const GaussianFactorGraph &gfg, const Parameters ¶meters); virtual ~SimpleSPCGSolver() {} virtual VectorValues::shared_ptr optimize () {return optimize(*y0_);} - virtual const IterativeOptimizationParameters& _params() const { return parameters_; } protected: diff --git a/gtsam/linear/SubgraphPreconditioner.cpp b/gtsam/linear/SubgraphPreconditioner.cpp index cce8eea9d..59a72dd8b 100644 --- a/gtsam/linear/SubgraphPreconditioner.cpp +++ b/gtsam/linear/SubgraphPreconditioner.cpp @@ -1,150 +1,146 @@ -///* ---------------------------------------------------------------------------- -// -// * GTSAM Copyright 2010, Georgia Tech Research Corporation, -// * Atlanta, Georgia 30332-0415 -// * All Rights Reserved -// * Authors: Frank Dellaert, et al. (see THANKS for the full author list) -// -// * See LICENSE for the license information -// -// * -------------------------------------------------------------------------- */ -// -///** -// * @file SubgraphPreconditioner.cpp -// * @date Dec 31, 2009 -// * @author: Frank Dellaert -// */ -// -//#include -//#include -//#include -//#include -// -//using namespace std; -// -//namespace gtsam { -// -// /* ************************************************************************* */ -// SubgraphPreconditioner::SubgraphPreconditioner(const sharedFG& Ab1, const sharedFG& Ab2, -// const sharedBayesNet& Rc1, const sharedValues& xbar) : -// Ab1_(Ab1), Ab2_(Ab2), Rc1_(Rc1), xbar_(xbar), b2bar_(gaussianErrors_(*Ab2_,*xbar)) { -// } -// -// /* ************************************************************************* */ -// // x = xbar + inv(R1)*y -// VectorValues SubgraphPreconditioner::x(const VectorValues& y) const { -//#ifdef VECTORBTREE -// if (!y.cloned(*xbar_)) throw -// invalid_argument("SubgraphPreconditioner::x: y needs to be cloned from xbar"); -//#endif -// VectorValues x = y; -// backSubstituteInPlace(*Rc1_,x); -// x += *xbar_; -// return x; -// } -// -//// SubgraphPreconditioner SubgraphPreconditioner::add_priors(double sigma) const { -//// SubgraphPreconditioner result = *this ; -//// result.Ab2_ = sharedFG(new GaussianFactorGraph(Ab2_->add_priors(sigma))) ; -//// return result ; -//// } -// -// /* ************************************************************************* */ -// double error(const SubgraphPreconditioner& sp, const VectorValues& y) { -// -// Errors e(y); -// VectorValues x = sp.x(y); -// Errors e2 = gaussianErrors(*sp.Ab2(),x); -// return 0.5 * (dot(e, e) + dot(e2,e2)); -// } -// -// /* ************************************************************************* */ -// // gradient is y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar), -// VectorValues gradient(const SubgraphPreconditioner& sp, const VectorValues& y) { -// VectorValues x = sp.x(y); // x = inv(R1)*y -// Errors e2 = gaussianErrors(*sp.Ab2(),x); -// VectorValues gx2 = VectorValues::Zero(y); -// gtsam::transposeMultiplyAdd(*sp.Ab2(),1.0,e2,gx2); // A2'*e2; -// VectorValues gy2 = gtsam::backSubstituteTranspose(*sp.Rc1(), gx2); // inv(R1')*gx2 -// return y + gy2; -// } -// -// /* ************************************************************************* */ -// // Apply operator A, A*y = [I;A2*inv(R1)]*y = [y; A2*inv(R1)*y] -// Errors operator*(const SubgraphPreconditioner& sp, const VectorValues& y) { -// -// Errors e(y); -// -// // Add A2 contribution -// VectorValues x = y; // TODO avoid ? -// gtsam::backSubstituteInPlace(*sp.Rc1(), x); // x=inv(R1)*y -// Errors e2 = *sp.Ab2() * x; // A2*x -// e.splice(e.end(), e2); -// -// return e; -// } -// -// /* ************************************************************************* */ -// // In-place version that overwrites e -// void multiplyInPlace(const SubgraphPreconditioner& sp, const VectorValues& y, Errors& e) { -// -// -// Errors::iterator ei = e.begin(); -// for ( Index i = 0 ; i < y.size() ; ++i, ++ei ) { -// *ei = y[i]; -// } -// -// // Add A2 contribution -// VectorValues x = y; // TODO avoid ? -// gtsam::backSubstituteInPlace(*sp.Rc1(), x); // x=inv(R1)*y -// gtsam::multiplyInPlace(*sp.Ab2(),x,ei); // use iterator version -// } -// -// /* ************************************************************************* */ -// // Apply operator A', A'*e = [I inv(R1')*A2']*e = e1 + inv(R1')*A2'*e2 -// VectorValues operator^(const SubgraphPreconditioner& sp, const Errors& e) { -// -// Errors::const_iterator it = e.begin(); -// VectorValues y = sp.zero(); -// for ( Index i = 0 ; i < y.size() ; ++i, ++it ) -// y[i] = *it ; -// sp.transposeMultiplyAdd2(1.0,it,e.end(),y); -// return y; -// } -// -// /* ************************************************************************* */ -// // y += alpha*A'*e -// void transposeMultiplyAdd -// (const SubgraphPreconditioner& sp, double alpha, const Errors& e, VectorValues& y) { -// -// -// Errors::const_iterator it = e.begin(); -// for ( Index i = 0 ; i < y.size() ; ++i, ++it ) { -// const Vector& ei = *it; -// axpy(alpha,ei,y[i]); -// } -// sp.transposeMultiplyAdd2(alpha,it,e.end(),y); -// } -// -// /* ************************************************************************* */ -// // y += alpha*inv(R1')*A2'*e2 -// void SubgraphPreconditioner::transposeMultiplyAdd2 (double alpha, -// Errors::const_iterator it, Errors::const_iterator end, VectorValues& y) const { -// -// // create e2 with what's left of e -// // TODO can we avoid creating e2 by passing iterator to transposeMultiplyAdd ? -// Errors e2; -// while (it != end) -// e2.push_back(*(it++)); -// -// VectorValues x = VectorValues::Zero(y); // x = 0 -// gtsam::transposeMultiplyAdd(*Ab2_,1.0,e2,x); // x += A2'*e2 -// axpy(alpha, gtsam::backSubstituteTranspose(*Rc1_, x), y); // y += alpha*inv(R1')*x -// } -// -// /* ************************************************************************* */ -// void SubgraphPreconditioner::print(const std::string& s) const { -// cout << s << endl; -// Ab2_->print(); -// } -//} // nsamespace gtsam +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file SubgraphPreconditioner.cpp + * @date Dec 31, 2009 + * @author: Frank Dellaert + */ + +#include +#include +#include + +#include + +using namespace std; + +namespace gtsam { + + /* ************************************************************************* */ + SubgraphPreconditioner::SubgraphPreconditioner(const sharedFG& Ab1, const sharedFG& Ab2, + const sharedBayesNet& Rc1, const sharedValues& xbar) : + Ab1_(Ab1), Ab2_(Ab2), Rc1_(Rc1), xbar_(xbar), b2bar_(gaussianErrors_(*Ab2_,*xbar)) { + } + + /* ************************************************************************* */ + // x = xbar + inv(R1)*y + VectorValues SubgraphPreconditioner::x(const VectorValues& y) const { + VectorValues x = y; + optimizeInPlace(*Rc1_,x); + x += *xbar_; + return x; + } + +// SubgraphPreconditioner SubgraphPreconditioner::add_priors(double sigma) const { +// SubgraphPreconditioner result = *this ; +// result.Ab2_ = sharedFG(new GaussianFactorGraph(Ab2_->add_priors(sigma))) ; +// return result ; +// } + + /* ************************************************************************* */ + double error(const SubgraphPreconditioner& sp, const VectorValues& y) { + + Errors e(y); + VectorValues x = sp.x(y); + Errors e2 = gaussianErrors(*sp.Ab2(),x); + return 0.5 * (dot(e, e) + dot(e2,e2)); + } + + /* ************************************************************************* */ + // gradient is y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar), + VectorValues gradient(const SubgraphPreconditioner& sp, const VectorValues& y) { + VectorValues x = sp.x(y); // x = inv(R1)*y + Errors e2 = gaussianErrors(*sp.Ab2(),x); + VectorValues gx2 = VectorValues::Zero(y); + gtsam::transposeMultiplyAdd(*sp.Ab2(),1.0,e2,gx2); // A2'*e2; + VectorValues gy2 = gtsam::backSubstituteTranspose(*sp.Rc1(), gx2); // inv(R1')*gx2 + return y + gy2; + } + + /* ************************************************************************* */ + // Apply operator A, A*y = [I;A2*inv(R1)]*y = [y; A2*inv(R1)*y] + Errors operator*(const SubgraphPreconditioner& sp, const VectorValues& y) { + + Errors e(y); + + // Add A2 contribution + VectorValues x = y; // TODO avoid ? + gtsam::optimizeInPlace(*sp.Rc1(), x); // x=inv(R1)*y + Errors e2 = *sp.Ab2() * x; // A2*x + e.splice(e.end(), e2); + + return e; + } + + /* ************************************************************************* */ + // In-place version that overwrites e + void multiplyInPlace(const SubgraphPreconditioner& sp, const VectorValues& y, Errors& e) { + + + Errors::iterator ei = e.begin(); + for ( Index i = 0 ; i < y.size() ; ++i, ++ei ) { + *ei = y[i]; + } + + // Add A2 contribution + VectorValues x = y; // TODO avoid ? + gtsam::optimizeInPlace(*sp.Rc1(), x); // x=inv(R1)*y + gtsam::multiplyInPlace(*sp.Ab2(),x,ei); // use iterator version + } + + /* ************************************************************************* */ + // Apply operator A', A'*e = [I inv(R1')*A2']*e = e1 + inv(R1')*A2'*e2 + VectorValues operator^(const SubgraphPreconditioner& sp, const Errors& e) { + + Errors::const_iterator it = e.begin(); + VectorValues y = sp.zero(); + for ( Index i = 0 ; i < y.size() ; ++i, ++it ) + y[i] = *it ; + sp.transposeMultiplyAdd2(1.0,it,e.end(),y); + return y; + } + + /* ************************************************************************* */ + // y += alpha*A'*e + void transposeMultiplyAdd + (const SubgraphPreconditioner& sp, double alpha, const Errors& e, VectorValues& y) { + + + Errors::const_iterator it = e.begin(); + for ( Index i = 0 ; i < y.size() ; ++i, ++it ) { + const Vector& ei = *it; + axpy(alpha,ei,y[i]); + } + sp.transposeMultiplyAdd2(alpha,it,e.end(),y); + } + + /* ************************************************************************* */ + // y += alpha*inv(R1')*A2'*e2 + void SubgraphPreconditioner::transposeMultiplyAdd2 (double alpha, + Errors::const_iterator it, Errors::const_iterator end, VectorValues& y) const { + + // create e2 with what's left of e + // TODO can we avoid creating e2 by passing iterator to transposeMultiplyAdd ? + Errors e2; + while (it != end) e2.push_back(*(it++)); + + VectorValues x = VectorValues::Zero(y); // x = 0 + gtsam::transposeMultiplyAdd(*Ab2_,1.0,e2,x); // x += A2'*e2 + axpy(alpha, gtsam::backSubstituteTranspose(*Rc1_, x), y); // y += alpha*inv(R1')*x + } + + /* ************************************************************************* */ + void SubgraphPreconditioner::print(const std::string& s) const { + cout << s << endl; + Ab2_->print(); + } +} // nsamespace gtsam diff --git a/gtsam/linear/SubgraphPreconditioner.h b/gtsam/linear/SubgraphPreconditioner.h index 2cc760bfa..460f28e5a 100644 --- a/gtsam/linear/SubgraphPreconditioner.h +++ b/gtsam/linear/SubgraphPreconditioner.h @@ -1,116 +1,117 @@ -///* ---------------------------------------------------------------------------- -// -// * GTSAM Copyright 2010, Georgia Tech Research Corporation, -// * Atlanta, Georgia 30332-0415 -// * All Rights Reserved -// * Authors: Frank Dellaert, et al. (see THANKS for the full author list) -// -// * See LICENSE for the license information -// -// * -------------------------------------------------------------------------- */ -// -///** -// * @file SubgraphPreconditioner.h -// * @date Dec 31, 2009 -// * @author Frank Dellaert -// */ -// -//#pragma once -// -//#include -//#include -//#include // FIXME shouldn't have nonlinear things in linear -// -//namespace gtsam { -// -// /** -// * Subgraph conditioner class, as explained in the RSS 2010 submission. -// * Starting with a graph A*x=b, we split it in two systems A1*x=b1 and A2*x=b2 -// * We solve R1*x=c1, and make the substitution y=R1*x-c1. -// * To use the class, give the Bayes Net R1*x=c1 and Graph A2*x=b2. -// * Then solve for yhat using CG, and solve for xhat = system.x(yhat). -// */ -// class SubgraphPreconditioner { -// -// public: -// typedef boost::shared_ptr sharedBayesNet; -// typedef boost::shared_ptr > sharedFG; -// typedef boost::shared_ptr sharedValues; -// typedef boost::shared_ptr sharedErrors; -// -// private: -// sharedFG Ab1_, Ab2_; -// sharedBayesNet Rc1_; -// sharedValues xbar_; -// sharedErrors b2bar_; /** b2 - A2*xbar */ -// -// public: -// -// SubgraphPreconditioner(); -// /** -// * Constructor -// * @param Ab1: the Graph A1*x=b1 -// * @param Ab2: the Graph A2*x=b2 -// * @param Rc1: the Bayes Net R1*x=c1 -// * @param xbar: the solution to R1*x=c1 -// */ -// SubgraphPreconditioner(const sharedFG& Ab1, const sharedFG& Ab2, const sharedBayesNet& Rc1, const sharedValues& xbar); -// -// /** Access Ab1 */ -// const sharedFG& Ab1() const { return Ab1_; } -// -// /** Access Ab2 */ -// const sharedFG& Ab2() const { return Ab2_; } -// -// /** Access Rc1 */ -// const sharedBayesNet& Rc1() const { return Rc1_; } -// -// /** -// * Add zero-mean i.i.d. Gaussian prior terms to each variable -// * @param sigma Standard deviation of Gaussian -// */ -//// SubgraphPreconditioner add_priors(double sigma) const; -// -// /* x = xbar + inv(R1)*y */ -// VectorValues x(const VectorValues& y) const; -// -// /* A zero VectorValues with the structure of xbar */ -// VectorValues zero() const { -// VectorValues V(VectorValues::Zero(*xbar_)) ; -// return V ; -// } -// -// /** -// * Add constraint part of the error only, used in both calls above -// * y += alpha*inv(R1')*A2'*e2 -// * Takes a range indicating e2 !!!! -// */ -// void transposeMultiplyAdd2(double alpha, Errors::const_iterator begin, -// Errors::const_iterator end, VectorValues& y) const; -// -// /** print the object */ -// void print(const std::string& s = "SubgraphPreconditioner") const; -// }; -// -// /* error, given y */ -// double error(const SubgraphPreconditioner& sp, const VectorValues& y); -// -// /** gradient = y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar) */ -// VectorValues gradient(const SubgraphPreconditioner& sp, const VectorValues& y); -// -// /** Apply operator A */ -// Errors operator*(const SubgraphPreconditioner& sp, const VectorValues& y); -// -// /** Apply operator A in place: needs e allocated already */ -// void multiplyInPlace(const SubgraphPreconditioner& sp, const VectorValues& y, Errors& e); -// -// /** Apply operator A' */ -// VectorValues operator^(const SubgraphPreconditioner& sp, const Errors& e); -// -// /** -// * Add A'*e to y -// * y += alpha*A'*[e1;e2] = [alpha*e1; alpha*inv(R1')*A2'*e2] -// */ -// void transposeMultiplyAdd(const SubgraphPreconditioner& sp, double alpha, const Errors& e, VectorValues& y); -// -//} // namespace gtsam +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file SubgraphPreconditioner.h + * @date Dec 31, 2009 + * @author Frank Dellaert + */ + +#pragma once + +#include +#include + +namespace gtsam { + + /** + * Subgraph conditioner class, as explained in the RSS 2010 submission. + * Starting with a graph A*x=b, we split it in two systems A1*x=b1 and A2*x=b2 + * We solve R1*x=c1, and make the substitution y=R1*x-c1. + * To use the class, give the Bayes Net R1*x=c1 and Graph A2*x=b2. + * Then solve for yhat using CG, and solve for xhat = system.x(yhat). + */ + class SubgraphPreconditioner { + + public: + + typedef boost::shared_ptr shared_ptr; + typedef boost::shared_ptr sharedBayesNet; + typedef boost::shared_ptr > sharedFG; + typedef boost::shared_ptr sharedValues; + typedef boost::shared_ptr sharedErrors; + + private: + sharedFG Ab1_, Ab2_; + sharedBayesNet Rc1_; + sharedValues xbar_; + sharedErrors b2bar_; /** b2 - A2*xbar */ + + public: + + SubgraphPreconditioner(); + /** + * Constructor + * @param Ab1: the Graph A1*x=b1 + * @param Ab2: the Graph A2*x=b2 + * @param Rc1: the Bayes Net R1*x=c1 + * @param xbar: the solution to R1*x=c1 + */ + SubgraphPreconditioner(const sharedFG& Ab1, const sharedFG& Ab2, const sharedBayesNet& Rc1, const sharedValues& xbar); + + /** Access Ab1 */ + const sharedFG& Ab1() const { return Ab1_; } + + /** Access Ab2 */ + const sharedFG& Ab2() const { return Ab2_; } + + /** Access Rc1 */ + const sharedBayesNet& Rc1() const { return Rc1_; } + + /** + * Add zero-mean i.i.d. Gaussian prior terms to each variable + * @param sigma Standard deviation of Gaussian + */ +// SubgraphPreconditioner add_priors(double sigma) const; + + /* x = xbar + inv(R1)*y */ + VectorValues x(const VectorValues& y) const; + + /* A zero VectorValues with the structure of xbar */ + VectorValues zero() const { + VectorValues V(VectorValues::Zero(*xbar_)) ; + return V ; + } + + /** + * Add constraint part of the error only, used in both calls above + * y += alpha*inv(R1')*A2'*e2 + * Takes a range indicating e2 !!!! + */ + void transposeMultiplyAdd2(double alpha, Errors::const_iterator begin, + Errors::const_iterator end, VectorValues& y) const; + + /** print the object */ + void print(const std::string& s = "SubgraphPreconditioner") const; + }; + + /* error, given y */ + double error(const SubgraphPreconditioner& sp, const VectorValues& y); + + /** gradient = y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar) */ + VectorValues gradient(const SubgraphPreconditioner& sp, const VectorValues& y); + + /** Apply operator A */ + Errors operator*(const SubgraphPreconditioner& sp, const VectorValues& y); + + /** Apply operator A in place: needs e allocated already */ + void multiplyInPlace(const SubgraphPreconditioner& sp, const VectorValues& y, Errors& e); + + /** Apply operator A' */ + VectorValues operator^(const SubgraphPreconditioner& sp, const Errors& e); + + /** + * Add A'*e to y + * y += alpha*A'*[e1;e2] = [alpha*e1; alpha*inv(R1')*A2'*e2] + */ + void transposeMultiplyAdd(const SubgraphPreconditioner& sp, double alpha, const Errors& e, VectorValues& y); + +} // namespace gtsam diff --git a/gtsam/linear/SubgraphSolver-inl.h b/gtsam/linear/SubgraphSolver-inl.h index d2dc7cee2..28b2caaac 100644 --- a/gtsam/linear/SubgraphSolver-inl.h +++ b/gtsam/linear/SubgraphSolver-inl.h @@ -1,82 +1,80 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -#pragma once - -#include - -#include -#include -#include - -using namespace std; - -namespace gtsam { - -template -void SubgraphSolver::replaceFactors(const typename LINEAR::shared_ptr &graph) { - - GaussianFactorGraph::shared_ptr Ab1 = boost::make_shared(); - GaussianFactorGraph::shared_ptr Ab2 = boost::make_shared(); - - if (parameters_->verbosity()) cout << "split the graph ..."; - split(pairs_, *graph, *Ab1, *Ab2) ; - if (parameters_->verbosity()) cout << ",with " << Ab1->size() << " and " << Ab2->size() << " factors" << endl; - - // // Add a HardConstraint to the root, otherwise the root will be singular - // Key root = keys.back(); - // T_.addHardConstraint(root, theta0[root]); - // - // // compose the approximate solution - // theta_bar_ = composePoses (T_, tree, theta0[root]); - - LINEAR sacrificialAb1 = *Ab1; // duplicate !!!!! - SubgraphPreconditioner::sharedBayesNet Rc1 = - EliminationTree::Create(sacrificialAb1)->eliminate(&EliminateQR); - SubgraphPreconditioner::sharedValues xbar = gtsam::optimize_(*Rc1); - - pc_ = boost::make_shared( - Ab1->dynamicCastFactors >(), Ab2->dynamicCastFactors >(),Rc1,xbar); -} - -template -VectorValues::shared_ptr SubgraphSolver::optimize() const { - - // preconditioned conjugate gradient - VectorValues zeros = pc_->zero(); - VectorValues ybar = conjugateGradients - (*pc_, zeros, *parameters_); - - boost::shared_ptr xbar = boost::make_shared() ; - *xbar = pc_->x(ybar); - return xbar; -} - -template -void SubgraphSolver::initialize(const GRAPH& G, const Values& theta0) { - // generate spanning tree - PredecessorMap tree_ = gtsam::findMinimumSpanningTree(G); - - // make the ordering - list keys = predecessorMap2Keys(tree_); - ordering_ = boost::make_shared(list(keys.begin(), keys.end())); - - // build factor pairs - pairs_.clear(); - typedef pair EG ; - BOOST_FOREACH( const EG &eg, tree_ ) { - Key key1 = Key(eg.first), - key2 = Key(eg.second) ; - pairs_.insert(pair((*ordering_)[key1], (*ordering_)[key2])) ; - } -} - -} // \namespace gtsam +///* ---------------------------------------------------------------------------- +// +// * GTSAM Copyright 2010, Georgia Tech Research Corporation, +// * Atlanta, Georgia 30332-0415 +// * All Rights Reserved +// * Authors: Frank Dellaert, et al. (see THANKS for the full author list) +// +// * See LICENSE for the license information +// +// * -------------------------------------------------------------------------- */ +// +//#pragma once +// +//#include +// +//#include +//#include +//#include +// +//namespace gtsam { +// +//template +//void SubgraphSolver::replaceFactors(const typename LINEAR::shared_ptr &graph) { +// +// GaussianFactorGraph::shared_ptr Ab1 = boost::make_shared(); +// GaussianFactorGraph::shared_ptr Ab2 = boost::make_shared(); +// +// if (parameters_->verbosity()) cout << "split the graph ..."; +// split(pairs_, *graph, *Ab1, *Ab2) ; +// if (parameters_->verbosity()) cout << ",with " << Ab1->size() << " and " << Ab2->size() << " factors" << endl; +// +// // // Add a HardConstraint to the root, otherwise the root will be singular +// // Key root = keys.back(); +// // T_.addHardConstraint(root, theta0[root]); +// // +// // // compose the approximate solution +// // theta_bar_ = composePoses (T_, tree, theta0[root]); +// +// LINEAR sacrificialAb1 = *Ab1; // duplicate !!!!! +// SubgraphPreconditioner::sharedBayesNet Rc1 = +// EliminationTree::Create(sacrificialAb1)->eliminate(&EliminateQR); +// SubgraphPreconditioner::sharedValues xbar = gtsam::optimize_(*Rc1); +// +// pc_ = boost::make_shared( +// Ab1->dynamicCastFactors >(), Ab2->dynamicCastFactors >(),Rc1,xbar); +//} +// +//template +//VectorValues::shared_ptr SubgraphSolver::optimize() const { +// +// // preconditioned conjugate gradient +// VectorValues zeros = pc_->zero(); +// VectorValues ybar = conjugateGradients +// (*pc_, zeros, *parameters_); +// +// boost::shared_ptr xbar = boost::make_shared() ; +// *xbar = pc_->x(ybar); +// return xbar; +//} +// +//template +//void SubgraphSolver::initialize(const GRAPH& G, const Values& theta0) { +// // generate spanning tree +// PredecessorMap tree_ = gtsam::findMinimumSpanningTree(G); +// +// // make the ordering +// list keys = predecessorMap2Keys(tree_); +// ordering_ = boost::make_shared(list(keys.begin(), keys.end())); +// +// // build factor pairs +// pairs_.clear(); +// typedef pair EG ; +// BOOST_FOREACH( const EG &eg, tree_ ) { +// Key key1 = Key(eg.first), +// key2 = Key(eg.second) ; +// pairs_.insert(pair((*ordering_)[key1], (*ordering_)[key2])) ; +// } +//} +// +//} // \namespace gtsam diff --git a/gtsam/linear/SubgraphSolver.cpp b/gtsam/linear/SubgraphSolver.cpp index 609b427f2..5d6237d9c 100644 --- a/gtsam/linear/SubgraphSolver.cpp +++ b/gtsam/linear/SubgraphSolver.cpp @@ -1,50 +1,108 @@ -///* ---------------------------------------------------------------------------- -// -// * GTSAM Copyright 2010, Georgia Tech Research Corporation, -// * Atlanta, Georgia 30332-0415 -// * All Rights Reserved -// * Authors: Frank Dellaert, et al. (see THANKS for the full author list) -// -// * See LICENSE for the license information -// -// * -------------------------------------------------------------------------- */ -// -//#include -// -//using namespace std; -// -//namespace gtsam { -// -///* split the gaussian factor graph Ab into Ab1 and Ab2 according to the map */ -//bool split(const std::map &M, -// const GaussianFactorGraph &Ab, -// GaussianFactorGraph &Ab1, -// GaussianFactorGraph &Ab2) { -// -// Ab1 = GaussianFactorGraph(); -// Ab2 = GaussianFactorGraph(); -// -// for ( size_t i = 0 ; i < Ab.size() ; ++i ) { -// -// boost::shared_ptr factor = Ab[i] ; -// -// if (factor->keys().size() > 2) -// throw(invalid_argument("split: only support factors with at most two keys")); -// if (factor->keys().size() == 1) { -// Ab1.push_back(factor); -// Ab2.push_back(factor); -// continue; -// } -// Index key1 = factor->keys()[0]; -// Index key2 = factor->keys()[1]; -// -// if ((M.find(key1) != M.end() && M.find(key1)->second == key2) || -// (M.find(key2) != M.end() && M.find(key2)->second == key1)) -// Ab1.push_back(factor); -// else -// Ab2.push_back(factor); -// } -// return true ; -//} -// -//} // \namespace gtsam +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include + +using namespace std; + +namespace gtsam { + +SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &gfg, const Parameters ¶meters) + : parameters_(parameters) +{ + + + GaussianFactorGraph::shared_ptr Ab1 = boost::make_shared(); + GaussianFactorGraph::shared_ptr Ab2 = boost::make_shared(); + + boost::tie(Ab1, Ab2) = splitGraph(gfg) ; + + if (parameters_.verbosity()) + cout << ",with " << Ab1->size() << " and " << Ab2->size() << " factors" << endl; + + // // Add a HardConstraint to the root, otherwise the root will be singular + // Key root = keys.back(); + // T_.addHardConstraint(root, theta0[root]); + // + // // compose the approximate solution + // theta_bar_ = composePoses (T_, tree, theta0[root]); + + GaussianBayesNet::shared_ptr Rc1 = EliminationTree::Create(*Ab1)->eliminate(&EliminateQR); + VectorValues::shared_ptr xbar(new VectorValues(gtsam::optimize(*Rc1))); + + pc_ = boost::make_shared( + Ab1->dynamicCastFactors >(), + Ab2->dynamicCastFactors >(), + Rc1, xbar); +} + +VectorValues::shared_ptr SubgraphSolver::optimize() { + + // preconditioned conjugate gradient + VectorValues zeros = pc_->zero(); + VectorValues ybar = conjugateGradients (*pc_, zeros, parameters_); + + boost::shared_ptr xbar = boost::make_shared() ; + *xbar = pc_->x(ybar); + return xbar; +} + +boost::tuple +SubgraphSolver::splitGraph(const GaussianFactorGraph &gfg) { + + VariableIndex index(gfg); + size_t n = index.size(); + std::vector connected(n, false); + + GaussianFactorGraph::shared_ptr At(new GaussianFactorGraph()); + GaussianFactorGraph::shared_ptr Ac( new GaussianFactorGraph()); + + BOOST_FOREACH ( const GaussianFactor::shared_ptr &gf, gfg ) { + + bool augment = false ; + + /* check whether this factor should be augmented to the "tree" graph */ + if ( gf->keys().size() == 1 ) augment = true; + else { + BOOST_FOREACH ( const Index key, *gf ) { + if ( connected[key] == false ) { + augment = true ; + connected[key] = true; + } + } + } + + if ( augment ) At->push_back(gf); + else Ac->push_back(gf); + } + + return boost::tie(At, Ac); +} + + + + +} // \namespace gtsam diff --git a/gtsam/linear/SubgraphSolver.h b/gtsam/linear/SubgraphSolver.h index f4fa34427..111f85cad 100644 --- a/gtsam/linear/SubgraphSolver.h +++ b/gtsam/linear/SubgraphSolver.h @@ -1,100 +1,52 @@ -///* ---------------------------------------------------------------------------- -// -// * GTSAM Copyright 2010, Georgia Tech Research Corporation, -// * Atlanta, Georgia 30332-0415 -// * All Rights Reserved -// * Authors: Frank Dellaert, et al. (see THANKS for the full author list) -// -// * See LICENSE for the license information -// -// * -------------------------------------------------------------------------- */ -// -//#pragma once -// -//#include -// -//#include -//#include -//#include -// -//namespace gtsam { -// -///* split the gaussian factor graph Ab into Ab1 and Ab2 according to the map */ -//bool split(const std::map &M, -// const GaussianFactorGraph &Ab, -// GaussianFactorGraph &Ab1, -// GaussianFactorGraph &Ab2); -// -///** -// * A nonlinear system solver using subgraph preconditioning conjugate gradient -// * Concept NonLinearSolver implements -// * linearize: G * T -> L -// * solve : L -> VectorValues -// */ -//template -//class SubgraphSolver : public IterativeSolver { -// -//private: -// typedef typename VALUES::Key Key; -// typedef typename GRAPH::Pose Pose; -// typedef typename GRAPH::Constraint Constraint; -// -// typedef boost::shared_ptr shared_ptr ; -// typedef boost::shared_ptr shared_ordering ; -// typedef boost::shared_ptr shared_graph ; -// typedef boost::shared_ptr shared_linear ; -// typedef boost::shared_ptr shared_values ; -// typedef boost::shared_ptr shared_preconditioner ; -// typedef std::map mapPairIndex ; -// -// /* the ordering derived from the spanning tree */ -// shared_ordering ordering_; -// -// /* the indice of two vertices in the gaussian factor graph */ -// mapPairIndex pairs_; -// -// /* preconditioner */ -// shared_preconditioner pc_; -// -// /* flag for direct solver - either QR or LDL */ -// bool useQR_; -// -//public: -// -// SubgraphSolver(const GRAPH& G, const VALUES& theta0, const Parameters ¶meters = Parameters(), bool useQR = false): -// IterativeSolver(parameters), useQR_(useQR) { initialize(G,theta0); } -// -// SubgraphSolver(const LINEAR& GFG) { -// std::cout << "[SubgraphSolver] Unexpected usage.." << std::endl; -// throw std::runtime_error("SubgraphSolver: gaussian factor graph initialization not supported"); -// } -// -// SubgraphSolver(const shared_linear& GFG, const boost::shared_ptr& structure, bool useQR = false) { -// std::cout << "[SubgraphSolver] Unexpected usage.." << std::endl; -// throw std::runtime_error("SubgraphSolver: gaussian factor graph and variable index initialization not supported"); -// } -// -// SubgraphSolver(const SubgraphSolver& solver) : -// IterativeSolver(solver), ordering_(solver.ordering_), pairs_(solver.pairs_), pc_(solver.pc_), useQR_(solver.useQR_) {} -// -// SubgraphSolver(shared_ordering ordering, -// mapPairIndex pairs, -// shared_preconditioner pc, -// sharedParameters parameters = boost::make_shared(), -// bool useQR = true) : -// IterativeSolver(parameters), ordering_(ordering), pairs_(pairs), pc_(pc), useQR_(useQR) {} -// -// void replaceFactors(const typename LINEAR::shared_ptr &graph); -// VectorValues::shared_ptr optimize() ; -// shared_ordering ordering() const { return ordering_; } -// -//protected: -// void initialize(const GRAPH& G, const VALUES& theta0); -// -//private: -// SubgraphSolver():IterativeSolver(){} -//}; -// -//} // namespace gtsam -// -//#include +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +#pragma once + +#include +#include +#include + +#include + +namespace gtsam { + +/** + * A linear system solver using subgraph preconditioning conjugate gradient + */ + +class SubgraphSolver : public IterativeSolver { + +public: + + typedef ConjugateGradientParameters Parameters; + + Parameters parameters_; + + SubgraphPreconditioner::shared_ptr pc_; ///< preconditioner object + +public: + + SubgraphSolver(const GaussianFactorGraph &gfg, const Parameters ¶meters); + + virtual ~SubgraphSolver() {} + + virtual VectorValues::shared_ptr optimize () ; + +protected: + + boost::tuple + splitGraph(const GaussianFactorGraph &gfg) ; +}; + +} // namespace gtsam + + diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 9a5897b41..8fbedc614 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -24,6 +24,7 @@ #include #include #include +#include using namespace std; @@ -74,8 +75,15 @@ void LevenbergMarquardtOptimizer::iterate() { delta = gtsam::optimize(*EliminationTree::Create(dampedSystem)->eliminate(params_.getEliminationFunction())); } else if ( params_.isCG() ) { - IterativeOptimizationParameters::shared_ptr params(!params_.iterativeParams ? boost::make_shared() : params_.iterativeParams); + + ConjugateGradientParameters::shared_ptr params (!params_.iterativeParams ? + boost::make_shared() : + boost::dynamic_pointer_cast(params_.iterativeParams)); + + if ( !params ) throw runtime_error("LMSolver: spcg parameter dynamic casting failed"); + SimpleSPCGSolver solver(dampedSystem, *params); + //SubgraphSolver solver(dampedSystem, *params); delta = *solver.optimize(); } else { diff --git a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h index b4a7c3c21..393e4bacc 100644 --- a/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h +++ b/gtsam/nonlinear/SuccessiveLinearizationOptimizer.h @@ -19,7 +19,7 @@ #pragma once #include -#include +#include namespace gtsam { diff --git a/gtsam_unstable/linear/iterative-inl.h b/gtsam_unstable/linear/iterative-inl.h index aa7eb076b..5641cb7ae 100644 --- a/gtsam_unstable/linear/iterative-inl.h +++ b/gtsam_unstable/linear/iterative-inl.h @@ -19,7 +19,7 @@ #pragma once #include -#include +#include #include namespace gtsam { @@ -29,7 +29,7 @@ namespace gtsam { template struct CGState { - typedef IterativeOptimizationParameters Parameters; + typedef ConjugateGradientParameters Parameters; const Parameters ¶meters_; int k; ///< iteration @@ -91,7 +91,7 @@ namespace gtsam { // check for convergence double new_gamma = dot(g, g); - if (parameters_.verbosity() != IterativeOptimizationParameters::SILENT) + if (parameters_.verbosity() != ConjugateGradientParameters::SILENT) std::cout << "iteration " << k << ": alpha = " << alpha << ", dotg = " << new_gamma << std::endl; if (new_gamma < threshold) return true; @@ -121,18 +121,18 @@ namespace gtsam { V conjugateGradients( const S& Ab, V x, - const IterativeOptimizationParameters ¶meters, + const ConjugateGradientParameters ¶meters, bool steepest = false) { CGState state(Ab, x, parameters, steepest); - if (parameters.verbosity() != IterativeOptimizationParameters::SILENT) + if (parameters.verbosity() != ConjugateGradientParameters::SILENT) std::cout << "CG: epsilon = " << parameters.epsilon() << ", maxIterations = " << parameters.maxIterations() << ", ||g0||^2 = " << state.gamma << ", threshold = " << state.threshold << std::endl; if ( state.gamma < state.threshold ) { - if (parameters.verbosity() != IterativeOptimizationParameters::SILENT) + if (parameters.verbosity() != ConjugateGradientParameters::SILENT) std::cout << "||g0||^2 < threshold, exiting immediately !" << std::endl; return x; diff --git a/gtsam_unstable/linear/iterative.cpp b/gtsam_unstable/linear/iterative.cpp index c7b670843..eb1d898dd 100644 --- a/gtsam_unstable/linear/iterative.cpp +++ b/gtsam_unstable/linear/iterative.cpp @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include @@ -38,31 +38,31 @@ namespace gtsam { /* ************************************************************************* */ - Vector steepestDescent(const System& Ab, const Vector& x, const IterativeOptimizationParameters & parameters) { + Vector steepestDescent(const System& Ab, const Vector& x, const ConjugateGradientParameters & parameters) { return conjugateGradients (Ab, x, parameters, true); } - Vector conjugateGradientDescent(const System& Ab, const Vector& x, const IterativeOptimizationParameters & parameters) { + Vector conjugateGradientDescent(const System& Ab, const Vector& x, const ConjugateGradientParameters & parameters) { return conjugateGradients (Ab, x, parameters); } /* ************************************************************************* */ - Vector steepestDescent(const Matrix& A, const Vector& b, const Vector& x, const IterativeOptimizationParameters & parameters) { + Vector steepestDescent(const Matrix& A, const Vector& b, const Vector& x, const ConjugateGradientParameters & parameters) { System Ab(A, b); return conjugateGradients (Ab, x, parameters, true); } - Vector conjugateGradientDescent(const Matrix& A, const Vector& b, const Vector& x, const IterativeOptimizationParameters & parameters) { + Vector conjugateGradientDescent(const Matrix& A, const Vector& b, const Vector& x, const ConjugateGradientParameters & parameters) { System Ab(A, b); return conjugateGradients (Ab, x, parameters); } /* ************************************************************************* */ - VectorValues steepestDescent(const FactorGraph& fg, const VectorValues& x, const IterativeOptimizationParameters & parameters) { + VectorValues steepestDescent(const FactorGraph& fg, const VectorValues& x, const ConjugateGradientParameters & parameters) { return conjugateGradients, VectorValues, Errors> (fg, x, parameters, true); } - VectorValues conjugateGradientDescent(const FactorGraph& fg, const VectorValues& x, const IterativeOptimizationParameters & parameters) { + VectorValues conjugateGradientDescent(const FactorGraph& fg, const VectorValues& x, const ConjugateGradientParameters & parameters) { return conjugateGradients, VectorValues, Errors> (fg, x, parameters); } diff --git a/gtsam_unstable/linear/iterative.h b/gtsam_unstable/linear/iterative.h index 2e6f77f10..f578391d0 100644 --- a/gtsam_unstable/linear/iterative.h +++ b/gtsam_unstable/linear/iterative.h @@ -1,6 +1,6 @@ /* ---------------------------------------------------------------------------- - * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * GTSAM Copyright 2010, Georgia Tech Research Corporation, * Atlanta, Georgia 30332-0415 * All Rights Reserved * Authors: Frank Dellaert, et al. (see THANKS for the full author list) @@ -20,7 +20,7 @@ #include #include -#include +#include namespace gtsam { @@ -105,7 +105,7 @@ namespace gtsam { Vector conjugateGradientDescent( const System& Ab, const Vector& x, - const IterativeOptimizationParameters & parameters); + const ConjugateGradientParameters & parameters); /** convenience calls using matrices, will create System class internally: */ @@ -116,7 +116,7 @@ namespace gtsam { const Matrix& A, const Vector& b, const Vector& x, - const IterativeOptimizationParameters & parameters); + const ConjugateGradientParameters & parameters); /** * Method of conjugate gradients (CG), Matrix version @@ -125,7 +125,7 @@ namespace gtsam { const Matrix& A, const Vector& b, const Vector& x, - const IterativeOptimizationParameters & parameters); + const ConjugateGradientParameters & parameters); class GaussianFactorGraph; @@ -135,7 +135,7 @@ namespace gtsam { VectorValues steepestDescent( const GaussianFactorGraph& fg, const VectorValues& x, - const IterativeOptimizationParameters & parameters); + const ConjugateGradientParameters & parameters); /** * Method of conjugate gradients (CG), Gaussian Factor Graph version @@ -143,7 +143,7 @@ namespace gtsam { VectorValues conjugateGradientDescent( const GaussianFactorGraph& fg, const VectorValues& x, - const IterativeOptimizationParameters & parameters); + const ConjugateGradientParameters & parameters); } // namespace gtsam