diff --git a/examples/SFMExample_SmartFactorPCG.cpp b/examples/SFMExample_SmartFactorPCG.cpp index a913d32ad..b64fcd048 100644 --- a/examples/SFMExample_SmartFactorPCG.cpp +++ b/examples/SFMExample_SmartFactorPCG.cpp @@ -94,11 +94,10 @@ int main(int argc, char* argv[]) { parameters.maxIterations = 500; PCGSolverParameters::shared_ptr pcg = std::make_shared(); - pcg->preconditioner_ = - std::make_shared(); + pcg->preconditioner = std::make_shared(); // Following is crucial: - pcg->setEpsilon_abs(1e-10); - pcg->setEpsilon_rel(1e-10); + pcg->epsilon_abs = 1e-10; + pcg->epsilon_rel = 1e-10; parameters.iterativeParams = pcg; LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, parameters); diff --git a/gtsam/linear/ConjugateGradientSolver.cpp b/gtsam/linear/ConjugateGradientSolver.cpp index 006e7d67d..b2725ea1e 100644 --- a/gtsam/linear/ConjugateGradientSolver.cpp +++ b/gtsam/linear/ConjugateGradientSolver.cpp @@ -26,13 +26,13 @@ namespace gtsam { /*****************************************************************************/ void ConjugateGradientParameters::print(ostream &os) const { - Base::print(os); - cout << "ConjugateGradientParameters" << endl - << "minIter: " << minIterations_ << endl - << "maxIter: " << maxIterations_ << endl - << "resetIter: " << reset_ << endl - << "eps_rel: " << epsilon_rel_ << endl - << "eps_abs: " << epsilon_abs_ << endl; + Base::print(os); + cout << "ConjugateGradientParameters" << endl + << "minIter: " << minIterations << endl + << "maxIter: " << maxIterations << endl + << "resetIter: " << reset << endl + << "eps_rel: " << epsilon_rel << endl + << "eps_abs: " << epsilon_abs << endl; } /*****************************************************************************/ diff --git a/gtsam/linear/ConjugateGradientSolver.h b/gtsam/linear/ConjugateGradientSolver.h index b09b1a352..46d49ca4f 100644 --- a/gtsam/linear/ConjugateGradientSolver.h +++ b/gtsam/linear/ConjugateGradientSolver.h @@ -24,59 +24,66 @@ namespace gtsam { /** - * parameters for the conjugate gradient method + * Parameters for the Conjugate Gradient method */ -class GTSAM_EXPORT ConjugateGradientParameters : public IterativeOptimizationParameters { - -public: +struct GTSAM_EXPORT ConjugateGradientParameters + : public IterativeOptimizationParameters { typedef IterativeOptimizationParameters Base; typedef std::shared_ptr shared_ptr; - size_t minIterations_; ///< minimum number of cg iterations - size_t maxIterations_; ///< maximum number of cg iterations - size_t reset_; ///< number of iterations before reset - double epsilon_rel_; ///< threshold for relative error decrease - double epsilon_abs_; ///< threshold for absolute error decrease + size_t minIterations; ///< minimum number of cg iterations + size_t maxIterations; ///< maximum number of cg iterations + size_t reset; ///< number of iterations before reset + double epsilon_rel; ///< threshold for relative error decrease + double epsilon_abs; ///< threshold for absolute error decrease /* Matrix Operation Kernel */ enum BLASKernel { - GTSAM = 0, ///< Jacobian Factor Graph of GTSAM - } blas_kernel_ ; + GTSAM = 0, ///< Jacobian Factor Graph of GTSAM + } blas_kernel; ConjugateGradientParameters() - : minIterations_(1), maxIterations_(500), reset_(501), epsilon_rel_(1e-3), - epsilon_abs_(1e-3), blas_kernel_(GTSAM) {} + : minIterations(1), + maxIterations(500), + reset(501), + epsilon_rel(1e-3), + epsilon_abs(1e-3), + blas_kernel(GTSAM) {} - ConjugateGradientParameters(size_t minIterations, size_t maxIterations, size_t reset, - double epsilon_rel, double epsilon_abs, BLASKernel blas) - : minIterations_(minIterations), maxIterations_(maxIterations), reset_(reset), - epsilon_rel_(epsilon_rel), epsilon_abs_(epsilon_abs), blas_kernel_(blas) {} + ConjugateGradientParameters(size_t minIterations, size_t maxIterations, + size_t reset, double epsilon_rel, + double epsilon_abs, BLASKernel blas) + : minIterations(minIterations), + maxIterations(maxIterations), + reset(reset), + epsilon_rel(epsilon_rel), + epsilon_abs(epsilon_abs), + blas_kernel(blas) {} ConjugateGradientParameters(const ConjugateGradientParameters &p) - : Base(p), minIterations_(p.minIterations_), maxIterations_(p.maxIterations_), reset_(p.reset_), - epsilon_rel_(p.epsilon_rel_), epsilon_abs_(p.epsilon_abs_), blas_kernel_(GTSAM) {} + : Base(p), + minIterations(p.minIterations), + maxIterations(p.maxIterations), + reset(p.reset), + epsilon_rel(p.epsilon_rel), + epsilon_abs(p.epsilon_abs), + blas_kernel(GTSAM) {} - /* general interface */ - inline size_t minIterations() const { return minIterations_; } - inline size_t maxIterations() const { return maxIterations_; } - inline size_t reset() const { return reset_; } - inline double epsilon() const { return epsilon_rel_; } - inline double epsilon_rel() const { return epsilon_rel_; } - inline double epsilon_abs() const { return epsilon_abs_; } +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V43 + inline size_t getMinIterations() const { return minIterations; } + inline size_t getMaxIterations() const { return maxIterations; } + inline size_t getReset() const { return reset; } + inline double getEpsilon() const { return epsilon_rel; } + inline double getEpsilon_rel() const { return epsilon_rel; } + inline double getEpsilon_abs() const { return epsilon_abs; } - inline size_t getMinIterations() const { return minIterations_; } - inline size_t getMaxIterations() const { return maxIterations_; } - inline size_t getReset() const { return reset_; } - inline double getEpsilon() const { return epsilon_rel_; } - inline double getEpsilon_rel() const { return epsilon_rel_; } - inline double getEpsilon_abs() const { return epsilon_abs_; } - - inline void setMinIterations(size_t value) { minIterations_ = value; } - inline void setMaxIterations(size_t value) { maxIterations_ = value; } - inline void setReset(size_t value) { reset_ = value; } - inline void setEpsilon(double value) { epsilon_rel_ = value; } - inline void setEpsilon_rel(double value) { epsilon_rel_ = value; } - inline void setEpsilon_abs(double value) { epsilon_abs_ = value; } + inline void setMinIterations(size_t value) { minIterations = value; } + inline void setMaxIterations(size_t value) { maxIterations = value; } + inline void setReset(size_t value) { reset = value; } + inline void setEpsilon(double value) { epsilon_rel = value; } + inline void setEpsilon_rel(double value) { epsilon_rel = value; } + inline void setEpsilon_abs(double value) { epsilon_abs = value; } +#endif void print() const { Base::print(); } @@ -109,18 +116,19 @@ V preconditionedConjugateGradient(const S &system, const V &initial, double currentGamma = system.dot(residual, residual), prevGamma, alpha, beta; - const size_t iMaxIterations = parameters.maxIterations(), - iMinIterations = parameters.minIterations(), - iReset = parameters.reset() ; - const double threshold = std::max(parameters.epsilon_abs(), - parameters.epsilon() * parameters.epsilon() * currentGamma); + const size_t iMaxIterations = parameters.maxIterations, + iMinIterations = parameters.minIterations, + iReset = parameters.reset; + const double threshold = + std::max(parameters.epsilon_abs, + parameters.epsilon_rel * parameters.epsilon_rel * currentGamma); - if (parameters.verbosity() >= ConjugateGradientParameters::COMPLEXITY ) - std::cout << "[PCG] epsilon = " << parameters.epsilon() - << ", max = " << parameters.maxIterations() - << ", reset = " << parameters.reset() - << ", ||r0||^2 = " << currentGamma - << ", threshold = " << threshold << std::endl; + if (parameters.verbosity() >= ConjugateGradientParameters::COMPLEXITY) + std::cout << "[PCG] epsilon = " << parameters.epsilon_rel + << ", max = " << parameters.maxIterations + << ", reset = " << parameters.reset + << ", ||r0||^2 = " << currentGamma + << ", threshold = " << threshold << std::endl; size_t k; for ( k = 1 ; k <= iMaxIterations && (currentGamma > threshold || k <= iMinIterations) ; k++ ) { diff --git a/gtsam/linear/PCGSolver.cpp b/gtsam/linear/PCGSolver.cpp index 5ec6fa67b..c0510961f 100644 --- a/gtsam/linear/PCGSolver.cpp +++ b/gtsam/linear/PCGSolver.cpp @@ -34,17 +34,13 @@ namespace gtsam { void PCGSolverParameters::print(ostream &os) const { Base::print(os); os << "PCGSolverParameters:" << endl; - preconditioner_->print(os); + preconditioner->print(os); } /*****************************************************************************/ PCGSolver::PCGSolver(const PCGSolverParameters &p) { parameters_ = p; - preconditioner_ = createPreconditioner(p.preconditioner_); -} - -void PCGSolverParameters::setPreconditionerParams(const std::shared_ptr preconditioner) { - preconditioner_ = preconditioner; + preconditioner_ = createPreconditioner(p.preconditioner); } void PCGSolverParameters::print(const std::string &s) const { diff --git a/gtsam/linear/PCGSolver.h b/gtsam/linear/PCGSolver.h index 5ed2c7c6d..17cc2d3db 100644 --- a/gtsam/linear/PCGSolver.h +++ b/gtsam/linear/PCGSolver.h @@ -31,29 +31,22 @@ class VectorValues; struct PreconditionerParameters; /** - * Parameters for PCG + * Parameters for Preconditioned Conjugate Gradient solver. */ -struct GTSAM_EXPORT PCGSolverParameters: public ConjugateGradientParameters { -public: +struct GTSAM_EXPORT PCGSolverParameters : public ConjugateGradientParameters { typedef ConjugateGradientParameters Base; typedef std::shared_ptr shared_ptr; - PCGSolverParameters() { - } + std::shared_ptr preconditioner; + + PCGSolverParameters() {} + + PCGSolverParameters( + const std::shared_ptr &preconditioner) + : preconditioner(preconditioner) {} void print(std::ostream &os) const override; - - /* interface to preconditioner parameters */ - inline const PreconditionerParameters& preconditioner() const { - return *preconditioner_; - } - - // needed for python wrapper void print(const std::string &s) const; - - std::shared_ptr preconditioner_; - - void setPreconditionerParams(const std::shared_ptr preconditioner); }; /** @@ -87,16 +80,16 @@ public: * System class needed for calling preconditionedConjugateGradient */ class GTSAM_EXPORT GaussianFactorGraphSystem { -public: - - GaussianFactorGraphSystem(const GaussianFactorGraph &gfg, - const Preconditioner &preconditioner, const KeyInfo &info, - const std::map &lambda); - const GaussianFactorGraph &gfg_; const Preconditioner &preconditioner_; - const KeyInfo &keyInfo_; - const std::map &lambda_; + KeyInfo keyInfo_; + std::map lambda_; + + public: + GaussianFactorGraphSystem(const GaussianFactorGraph &gfg, + const Preconditioner &preconditioner, + const KeyInfo &info, + const std::map &lambda); void residual(const Vector &x, Vector &r) const; void multiply(const Vector &x, Vector& y) const; diff --git a/gtsam/linear/iterative-inl.h b/gtsam/linear/iterative-inl.h index c96d643b2..a8185b544 100644 --- a/gtsam/linear/iterative-inl.h +++ b/gtsam/linear/iterative-inl.h @@ -49,10 +49,12 @@ namespace gtsam { // init gamma and calculate threshold gamma = dot(g,g); - threshold = std::max(parameters_.epsilon_abs(), parameters_.epsilon() * parameters_.epsilon() * gamma); + threshold = + std::max(parameters_.epsilon_abs, + parameters_.epsilon_rel * parameters_.epsilon_rel * gamma); // Allocate and calculate A*d for first iteration - if (gamma > parameters_.epsilon_abs()) Ad = Ab * d; + if (gamma > parameters_.epsilon_abs) Ad = Ab * d; } /* ************************************************************************* */ @@ -79,13 +81,13 @@ namespace gtsam { // take a step, return true if converged bool step(const S& Ab, V& x) { - if ((++k) >= ((int)parameters_.maxIterations())) return true; + if ((++k) >= ((int)parameters_.maxIterations)) return true; //----------------------------------> double alpha = takeOptimalStep(x); // update gradient (or re-calculate at reset time) - if (k % parameters_.reset() == 0) g = Ab.gradient(x); + if (k % parameters_.reset == 0) g = Ab.gradient(x); // axpy(alpha, Ab ^ Ad, g); // g += alpha*(Ab^Ad) else Ab.transposeMultiplyAdd(alpha, Ad, g); @@ -126,11 +128,10 @@ namespace gtsam { CGState state(Ab, x, parameters, steepest); if (parameters.verbosity() != ConjugateGradientParameters::SILENT) - std::cout << "CG: epsilon = " << parameters.epsilon() - << ", maxIterations = " << parameters.maxIterations() + std::cout << "CG: epsilon = " << parameters.epsilon_rel + << ", maxIterations = " << parameters.maxIterations << ", ||g0||^2 = " << state.gamma - << ", threshold = " << state.threshold - << std::endl; + << ", threshold = " << state.threshold << std::endl; if ( state.gamma < state.threshold ) { if (parameters.verbosity() != ConjugateGradientParameters::SILENT) diff --git a/gtsam/linear/linear.i b/gtsam/linear/linear.i index af6c2ee22..eecc0192c 100644 --- a/gtsam/linear/linear.i +++ b/gtsam/linear/linear.i @@ -710,17 +710,11 @@ virtual class IterativeOptimizationParameters { #include virtual class ConjugateGradientParameters : gtsam::IterativeOptimizationParameters { ConjugateGradientParameters(); - int getMinIterations() const ; - int getMaxIterations() const ; - int getReset() const; - double getEpsilon_rel() const; - double getEpsilon_abs() const; - - void setMinIterations(int value); - void setMaxIterations(int value); - void setReset(int value); - void setEpsilon_rel(double value); - void setEpsilon_abs(double value); + int minIterations; + int maxIterations; + int reset; + double epsilon_rel; + double epsilon_abs; }; #include @@ -739,8 +733,10 @@ virtual class BlockJacobiPreconditionerParameters : gtsam::PreconditionerParamet #include virtual class PCGSolverParameters : gtsam::ConjugateGradientParameters { PCGSolverParameters(); + PCGSolverParameters(gtsam::PreconditionerParameters* preconditioner); void print(string s = ""); - void setPreconditionerParams(gtsam::PreconditionerParameters* preconditioner); + + gtsam::PreconditionerParameters* preconditioner; }; #include diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index 0b1a43815..0ab56f6d1 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -16,11 +16,11 @@ * @date Jun 11, 2012 */ -#include -#include -#include #include #include +#include +#include +#include #include @@ -34,29 +34,35 @@ typedef internal::NonlinearOptimizerState State; * @param values a linearization point * Can be moved to NonlinearFactorGraph.h if desired */ -static VectorValues gradientInPlace(const NonlinearFactorGraph &nfg, - const Values &values) { +static VectorValues gradientInPlace(const NonlinearFactorGraph& nfg, + const Values& values) { // Linearize graph GaussianFactorGraph::shared_ptr linear = nfg.linearize(values); return linear->gradientAtZero(); } NonlinearConjugateGradientOptimizer::NonlinearConjugateGradientOptimizer( - const NonlinearFactorGraph& graph, const Values& initialValues, const Parameters& params) - : Base(graph, std::unique_ptr(new State(initialValues, graph.error(initialValues)))), - params_(params) {} + const NonlinearFactorGraph& graph, const Values& initialValues, + const Parameters& params) + : Base(graph, std::unique_ptr( + new State(initialValues, graph.error(initialValues)))), + params_(params) {} -double NonlinearConjugateGradientOptimizer::System::error(const State& state) const { +double NonlinearConjugateGradientOptimizer::System::error( + const State& state) const { return graph_.error(state); } -NonlinearConjugateGradientOptimizer::System::Gradient NonlinearConjugateGradientOptimizer::System::gradient( - const State &state) const { +NonlinearConjugateGradientOptimizer::System::Gradient +NonlinearConjugateGradientOptimizer::System::gradient( + const State& state) const { return gradientInPlace(graph_, state); } -NonlinearConjugateGradientOptimizer::System::State NonlinearConjugateGradientOptimizer::System::advance( - const State ¤t, const double alpha, const Gradient &g) const { +NonlinearConjugateGradientOptimizer::System::State +NonlinearConjugateGradientOptimizer::System::advance(const State& current, + const double alpha, + const Gradient& g) const { Gradient step = g; step *= alpha; return current.retract(step); @@ -65,7 +71,8 @@ NonlinearConjugateGradientOptimizer::System::State NonlinearConjugateGradientOpt GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { const auto [newValues, dummy] = nonlinearConjugateGradient( System(graph_), state_->values, params_, true /* single iteration */); - state_.reset(new State(newValues, graph_.error(newValues), state_->iterations + 1)); + state_.reset( + new State(newValues, graph_.error(newValues), state_->iterations + 1)); // NOTE(frank): We don't linearize this system, so we must return null here. return nullptr; @@ -76,9 +83,9 @@ const Values& NonlinearConjugateGradientOptimizer::optimize() { System system(graph_); const auto [newValues, iterations] = nonlinearConjugateGradient(system, state_->values, params_, false); - state_.reset(new State(std::move(newValues), graph_.error(newValues), iterations)); + state_.reset( + new State(std::move(newValues), graph_.error(newValues), iterations)); return state_->values; } } /* namespace gtsam */ - diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 3dd6c7e05..369291fae 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -24,89 +24,80 @@ namespace gtsam { /** An implementation of the nonlinear CG method using the template below */ -class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimizer { - +class GTSAM_EXPORT NonlinearConjugateGradientOptimizer + : public NonlinearOptimizer { /* a class for the nonlinearConjugateGradient template */ class System { - public: + public: typedef Values State; typedef VectorValues Gradient; typedef NonlinearOptimizerParams Parameters; - protected: + protected: const NonlinearFactorGraph &graph_; - public: - System(const NonlinearFactorGraph &graph) : - graph_(graph) { - } + public: + System(const NonlinearFactorGraph &graph) : graph_(graph) {} double error(const State &state) const; Gradient gradient(const State &state) const; State advance(const State ¤t, const double alpha, - const Gradient &g) const; + const Gradient &g) const; }; -public: - + public: typedef NonlinearOptimizer Base; typedef NonlinearOptimizerParams Parameters; typedef std::shared_ptr shared_ptr; -protected: + protected: Parameters params_; - const NonlinearOptimizerParams& _params() const override { - return params_; - } - -public: + const NonlinearOptimizerParams &_params() const override { return params_; } + public: /// Constructor - NonlinearConjugateGradientOptimizer(const NonlinearFactorGraph& graph, - const Values& initialValues, const Parameters& params = Parameters()); + NonlinearConjugateGradientOptimizer(const NonlinearFactorGraph &graph, + const Values &initialValues, + const Parameters ¶ms = Parameters()); /// Destructor - ~NonlinearConjugateGradientOptimizer() override { - } + ~NonlinearConjugateGradientOptimizer() override {} - /** - * Perform a single iteration, returning GaussianFactorGraph corresponding to + /** + * Perform a single iteration, returning GaussianFactorGraph corresponding to * the linearized factor graph. */ GaussianFactorGraph::shared_ptr iterate() override; - /** - * Optimize for the maximum-likelihood estimate, returning a the optimized + /** + * Optimize for the maximum-likelihood estimate, returning a the optimized * variable assignments. */ - const Values& optimize() override; + const Values &optimize() override; }; /** Implement the golden-section line search algorithm */ -template +template double lineSearch(const S &system, const V currentValues, const W &gradient) { - /* normalize it such that it becomes a unit vector */ const double g = gradient.norm(); - // perform the golden section search algorithm to decide the the optimal step size - // detail refer to http://en.wikipedia.org/wiki/Golden_section_search - const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi, tau = - 1e-5; - double minStep = -1.0 / g, maxStep = 0, newStep = minStep - + (maxStep - minStep) / (phi + 1.0); + // perform the golden section search algorithm to decide the the optimal step + // size detail refer to http://en.wikipedia.org/wiki/Golden_section_search + const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi, + tau = 1e-5; + double minStep = -1.0 / g, maxStep = 0, + newStep = minStep + (maxStep - minStep) / (phi + 1.0); V newValues = system.advance(currentValues, newStep, gradient); double newError = system.error(newValues); while (true) { const bool flag = (maxStep - newStep > newStep - minStep) ? true : false; - const double testStep = - flag ? newStep + resphi * (maxStep - newStep) : - newStep - resphi * (newStep - minStep); + const double testStep = flag ? newStep + resphi * (maxStep - newStep) + : newStep - resphi * (newStep - minStep); - if ((maxStep - minStep) - < tau * (std::abs(testStep) + std::abs(newStep))) { + if ((maxStep - minStep) < tau * (std::abs(testStep) + std::abs(newStep))) { return 0.5 * (minStep + maxStep); } @@ -135,19 +126,21 @@ double lineSearch(const S &system, const V currentValues, const W &gradient) { } /** - * Implement the nonlinear conjugate gradient method using the Polak-Ribiere formula suggested in + * Implement the nonlinear conjugate gradient method using the Polak-Ribiere + * formula suggested in * http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method. * - * The S (system) class requires three member functions: error(state), gradient(state) and - * advance(state, step-size, direction). The V class denotes the state or the solution. + * The S (system) class requires three member functions: error(state), + * gradient(state) and advance(state, step-size, direction). The V class denotes + * the state or the solution. * - * The last parameter is a switch between gradient-descent and conjugate gradient + * The last parameter is a switch between gradient-descent and conjugate + * gradient */ -template -std::tuple nonlinearConjugateGradient(const S &system, - const V &initial, const NonlinearOptimizerParams ¶ms, +template +std::tuple nonlinearConjugateGradient( + const S &system, const V &initial, const NonlinearOptimizerParams ¶ms, const bool singleIteration, const bool gradientDescent = false) { - // GTSAM_CONCEPT_MANIFOLD_TYPE(V) size_t iteration = 0; @@ -157,14 +150,14 @@ std::tuple nonlinearConjugateGradient(const S &system, if (currentError <= params.errorTol) { if (params.verbosity >= NonlinearOptimizerParams::ERROR) { std::cout << "Exiting, as error = " << currentError << " < " - << params.errorTol << std::endl; + << params.errorTol << std::endl; } return {initial, iteration}; } V currentValues = initial; typename S::Gradient currentGradient = system.gradient(currentValues), - prevGradient, direction = currentGradient; + prevGradient, direction = currentGradient; /* do one step of gradient descent */ V prevValues = currentValues; @@ -185,9 +178,9 @@ std::tuple nonlinearConjugateGradient(const S &system, prevGradient = currentGradient; currentGradient = system.gradient(currentValues); // Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1 - const double beta = std::max(0.0, - currentGradient.dot(currentGradient - prevGradient) - / prevGradient.dot(prevGradient)); + const double beta = + std::max(0.0, currentGradient.dot(currentGradient - prevGradient) / + prevGradient.dot(prevGradient)); direction = currentGradient + (beta * direction); } @@ -205,20 +198,21 @@ std::tuple nonlinearConjugateGradient(const S &system, // Maybe show output if (params.verbosity >= NonlinearOptimizerParams::ERROR) - std::cout << "iteration: " << iteration << ", currentError: " << currentError << std::endl; - } while (++iteration < params.maxIterations && !singleIteration - && !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, - params.errorTol, prevError, currentError, params.verbosity)); + std::cout << "iteration: " << iteration + << ", currentError: " << currentError << std::endl; + } while (++iteration < params.maxIterations && !singleIteration && + !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, + params.errorTol, prevError, currentError, + params.verbosity)); // Printing if verbose - if (params.verbosity >= NonlinearOptimizerParams::ERROR - && iteration >= params.maxIterations) - std::cout - << "nonlinearConjugateGradient: Terminating because reached maximum iterations" - << std::endl; + if (params.verbosity >= NonlinearOptimizerParams::ERROR && + iteration >= params.maxIterations) + std::cout << "nonlinearConjugateGradient: Terminating because reached " + "maximum iterations" + << std::endl; return {currentValues, iteration}; } -} // \ namespace gtsam - +} // namespace gtsam diff --git a/tests/testGradientDescentOptimizer.cpp b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp similarity index 64% rename from tests/testGradientDescentOptimizer.cpp rename to gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp index d7ca70459..36673c7a0 100644 --- a/tests/testGradientDescentOptimizer.cpp +++ b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp @@ -1,56 +1,52 @@ /** - * @file NonlinearConjugateGradientOptimizer.cpp - * @brief Test simple CG optimizer + * @file testNonlinearConjugateGradientOptimizer.cpp + * @brief Test nonlinear CG optimizer * @author Yong-Dian Jian + * @author Varun Agrawal * @date June 11, 2012 */ -/** - * @file testGradientDescentOptimizer.cpp - * @brief Small test of NonlinearConjugateGradientOptimizer - * @author Yong-Dian Jian - * @date Jun 11, 2012 - */ - -#include +#include +#include #include #include #include -#include - -#include - +#include using namespace std; using namespace gtsam; // Generate a small PoseSLAM problem std::tuple generateProblem() { - // 1. Create graph container and add factors to it NonlinearFactorGraph graph; // 2a. Add Gaussian prior - Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin - SharedDiagonal priorNoise = noiseModel::Diagonal::Sigmas( - Vector3(0.3, 0.3, 0.1)); + Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin + SharedDiagonal priorNoise = + noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1)); graph.addPrior(1, priorMean, priorNoise); // 2b. Add odometry factors - SharedDiagonal odometryNoise = noiseModel::Diagonal::Sigmas( - Vector3(0.2, 0.2, 0.1)); - graph.emplace_shared>(1, 2, Pose2(2.0, 0.0, 0.0), odometryNoise); - graph.emplace_shared>(2, 3, Pose2(2.0, 0.0, M_PI_2), odometryNoise); - graph.emplace_shared>(3, 4, Pose2(2.0, 0.0, M_PI_2), odometryNoise); - graph.emplace_shared>(4, 5, Pose2(2.0, 0.0, M_PI_2), odometryNoise); + SharedDiagonal odometryNoise = + noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1)); + graph.emplace_shared>(1, 2, Pose2(2.0, 0.0, 0.0), + odometryNoise); + graph.emplace_shared>(2, 3, Pose2(2.0, 0.0, M_PI_2), + odometryNoise); + graph.emplace_shared>(3, 4, Pose2(2.0, 0.0, M_PI_2), + odometryNoise); + graph.emplace_shared>(4, 5, Pose2(2.0, 0.0, M_PI_2), + odometryNoise); // 2c. Add pose constraint - SharedDiagonal constraintUncertainty = noiseModel::Diagonal::Sigmas( - Vector3(0.2, 0.2, 0.1)); + SharedDiagonal constraintUncertainty = + noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1)); graph.emplace_shared>(5, 2, Pose2(2.0, 0.0, M_PI_2), - constraintUncertainty); + constraintUncertainty); - // 3. Create the data structure to hold the initialEstimate estimate to the solution + // 3. Create the data structure to hold the initialEstimate estimate to the + // solution Values initialEstimate; Pose2 x1(0.5, 0.0, 0.2); initialEstimate.insert(1, x1); @@ -68,16 +64,17 @@ std::tuple generateProblem() { /* ************************************************************************* */ TEST(NonlinearConjugateGradientOptimizer, Optimize) { -const auto [graph, initialEstimate] = generateProblem(); -// cout << "initial error = " << graph.error(initialEstimate) << endl; + const auto [graph, initialEstimate] = generateProblem(); + // cout << "initial error = " << graph.error(initialEstimate) << endl; NonlinearOptimizerParams param; - param.maxIterations = 500; /* requires a larger number of iterations to converge */ + param.maxIterations = + 500; /* requires a larger number of iterations to converge */ param.verbosity = NonlinearOptimizerParams::SILENT; NonlinearConjugateGradientOptimizer optimizer(graph, initialEstimate, param); Values result = optimizer.optimize(); -// cout << "cg final error = " << graph.error(result) << endl; + // cout << "cg final error = " << graph.error(result) << endl; EXPECT_DOUBLES_EQUAL(0.0, graph.error(result), 1e-4); } diff --git a/gtsam/sfm/ShonanAveraging.cpp b/gtsam/sfm/ShonanAveraging.cpp index 7c8b07f37..830f1c719 100644 --- a/gtsam/sfm/ShonanAveraging.cpp +++ b/gtsam/sfm/ShonanAveraging.cpp @@ -67,20 +67,15 @@ ShonanAveragingParameters::ShonanAveragingParameters( builderParameters.augmentationWeight = SubgraphBuilderParameters::SKELETON; builderParameters.augmentationFactor = 0.0; - auto pcg = std::make_shared(); - // Choose optimization method if (method == "SUBGRAPH") { lm.iterativeParams = std::make_shared(builderParameters); } else if (method == "SGPC") { - pcg->preconditioner_ = - std::make_shared(builderParameters); - lm.iterativeParams = pcg; + lm.iterativeParams = std::make_shared( + std::make_shared(builderParameters)); } else if (method == "JACOBI") { - pcg->preconditioner_ = - std::make_shared(); - lm.iterativeParams = pcg; + lm.iterativeParams = std::make_shared(std::make_shared()); } else if (method == "QR") { lm.setLinearSolverType("MULTIFRONTAL_QR"); } else if (method == "CHOLESKY") { diff --git a/python/gtsam/tests/test_NonlinearOptimizer.py b/python/gtsam/tests/test_NonlinearOptimizer.py index 1b4209509..2b276425f 100644 --- a/python/gtsam/tests/test_NonlinearOptimizer.py +++ b/python/gtsam/tests/test_NonlinearOptimizer.py @@ -69,7 +69,7 @@ class TestScenario(GtsamTestCase): lmParams = LevenbergMarquardtParams.CeresDefaults() lmParams.setLinearSolverType("ITERATIVE") cgParams = PCGSolverParameters() - cgParams.setPreconditionerParams(DummyPreconditionerParameters()) + cgParams.preconditioner = DummyPreconditionerParameters() lmParams.setIterativeParams(cgParams) actual = LevenbergMarquardtOptimizer(self.fg, self.initial_values, lmParams).optimize() self.assertAlmostEqual(0, self.fg.error(actual)) diff --git a/tests/testIterative.cpp b/tests/testIterative.cpp index 6d9918b76..4df598f80 100644 --- a/tests/testIterative.cpp +++ b/tests/testIterative.cpp @@ -95,9 +95,9 @@ TEST( Iterative, conjugateGradientDescent_hard_constraint ) VectorValues zeros = config.zeroVectors(); ConjugateGradientParameters parameters; - parameters.setEpsilon_abs(1e-3); - parameters.setEpsilon_rel(1e-5); - parameters.setMaxIterations(100); + parameters.epsilon_abs = 1e-3; + parameters.epsilon_rel = 1e-5; + parameters.maxIterations = 100; VectorValues actual = conjugateGradientDescent(*fg, zeros, parameters); VectorValues expected; @@ -122,9 +122,9 @@ TEST( Iterative, conjugateGradientDescent_soft_constraint ) VectorValues zeros = config.zeroVectors(); ConjugateGradientParameters parameters; - parameters.setEpsilon_abs(1e-3); - parameters.setEpsilon_rel(1e-5); - parameters.setMaxIterations(100); + parameters.epsilon_abs = 1e-3; + parameters.epsilon_rel = 1e-5; + parameters.maxIterations = 100; VectorValues actual = conjugateGradientDescent(*fg, zeros, parameters); VectorValues expected; diff --git a/tests/testPCGSolver.cpp b/tests/testPCGSolver.cpp index bc9f9e608..039864886 100644 --- a/tests/testPCGSolver.cpp +++ b/tests/testPCGSolver.cpp @@ -125,8 +125,8 @@ TEST( GaussianFactorGraphSystem, multiply_getb) TEST(PCGSolver, dummy) { LevenbergMarquardtParams params; params.linearSolverType = LevenbergMarquardtParams::Iterative; - auto pcg = std::make_shared(); - pcg->preconditioner_ = std::make_shared(); + auto pcg = std::make_shared( + std::make_shared()); params.iterativeParams = pcg; NonlinearFactorGraph fg = example::createReallyNonlinearFactorGraph(); @@ -145,9 +145,8 @@ TEST(PCGSolver, dummy) { TEST(PCGSolver, blockjacobi) { LevenbergMarquardtParams params; params.linearSolverType = LevenbergMarquardtParams::Iterative; - auto pcg = std::make_shared(); - pcg->preconditioner_ = - std::make_shared(); + auto pcg = std::make_shared( + std::make_shared()); params.iterativeParams = pcg; NonlinearFactorGraph fg = example::createReallyNonlinearFactorGraph(); @@ -166,8 +165,8 @@ TEST(PCGSolver, blockjacobi) { TEST(PCGSolver, subgraph) { LevenbergMarquardtParams params; params.linearSolverType = LevenbergMarquardtParams::Iterative; - auto pcg = std::make_shared(); - pcg->preconditioner_ = std::make_shared(); + auto pcg = std::make_shared( + std::make_shared()); params.iterativeParams = pcg; NonlinearFactorGraph fg = example::createReallyNonlinearFactorGraph(); diff --git a/tests/testPreconditioner.cpp b/tests/testPreconditioner.cpp index ecdf36322..967584b7d 100644 --- a/tests/testPreconditioner.cpp +++ b/tests/testPreconditioner.cpp @@ -54,21 +54,23 @@ TEST( PCGsolver, verySimpleLinearSystem) { // Solve the system using Preconditioned Conjugate Gradient solver // Common PCG parameters gtsam::PCGSolverParameters::shared_ptr pcg = std::make_shared(); - pcg->setMaxIterations(500); - pcg->setEpsilon_abs(0.0); - pcg->setEpsilon_rel(0.0); + pcg->maxIterations = 500; + pcg->epsilon_abs = 0.0; + pcg->epsilon_rel = 0.0; //pcg->setVerbosity("ERROR"); // With Dummy preconditioner - pcg->preconditioner_ = std::make_shared(); + pcg->preconditioner = + std::make_shared(); VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(exactSolution, deltaPCGDummy, 1e-7)); //deltaPCGDummy.print("PCG Dummy"); // With Block-Jacobi preconditioner - pcg->preconditioner_ = std::make_shared(); + pcg->preconditioner = + std::make_shared(); // It takes more than 1000 iterations for this test - pcg->setMaxIterations(1500); + pcg->maxIterations = 1500; VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(exactSolution, deltaPCGJacobi, 1e-5)); @@ -105,19 +107,21 @@ TEST(PCGSolver, simpleLinearSystem) { // Solve the system using Preconditioned Conjugate Gradient solver // Common PCG parameters gtsam::PCGSolverParameters::shared_ptr pcg = std::make_shared(); - pcg->setMaxIterations(500); - pcg->setEpsilon_abs(0.0); - pcg->setEpsilon_rel(0.0); + pcg->maxIterations = 500; + pcg->epsilon_abs = 0.0; + pcg->epsilon_rel = 0.0; //pcg->setVerbosity("ERROR"); // With Dummy preconditioner - pcg->preconditioner_ = std::make_shared(); + pcg->preconditioner = + std::make_shared(); VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(expectedSolution, deltaPCGDummy, 1e-5)); //deltaPCGDummy.print("PCG Dummy"); // With Block-Jacobi preconditioner - pcg->preconditioner_ = std::make_shared(); + pcg->preconditioner = + std::make_shared(); VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(expectedSolution, deltaPCGJacobi, 1e-5)); //deltaPCGJacobi.print("PCG Jacobi"); diff --git a/tests/testSubgraphSolver.cpp b/tests/testSubgraphSolver.cpp index 026f3c919..e31ce23b5 100644 --- a/tests/testSubgraphSolver.cpp +++ b/tests/testSubgraphSolver.cpp @@ -48,7 +48,7 @@ static double error(const GaussianFactorGraph& fg, const VectorValues& x) { TEST( SubgraphSolver, Parameters ) { LONGS_EQUAL(SubgraphSolverParameters::SILENT, kParameters.verbosity()); - LONGS_EQUAL(500, kParameters.maxIterations()); + LONGS_EQUAL(500, kParameters.maxIterations); } /* ************************************************************************* */ diff --git a/timing/timeShonanFactor.cpp b/timing/timeShonanFactor.cpp index fc97d01a8..3cdb596cd 100644 --- a/timing/timeShonanFactor.cpp +++ b/timing/timeShonanFactor.cpp @@ -83,7 +83,7 @@ int main(int argc, char* argv[]) { // params.setVerbosityLM("SUMMARY"); // params.linearSolverType = LevenbergMarquardtParams::Iterative; // auto pcg = std::make_shared(); - // pcg->preconditioner_ = + // pcg->preconditioner = // std::make_shared(); // std::make_shared(); // params.iterativeParams = pcg;