diff --git a/examples/SFMExample_SmartFactorPCG.cpp b/examples/SFMExample_SmartFactorPCG.cpp index a913d32ad..b64fcd048 100644 --- a/examples/SFMExample_SmartFactorPCG.cpp +++ b/examples/SFMExample_SmartFactorPCG.cpp @@ -94,11 +94,10 @@ int main(int argc, char* argv[]) { parameters.maxIterations = 500; PCGSolverParameters::shared_ptr pcg = std::make_shared(); - pcg->preconditioner_ = - std::make_shared(); + pcg->preconditioner = std::make_shared(); // Following is crucial: - pcg->setEpsilon_abs(1e-10); - pcg->setEpsilon_rel(1e-10); + pcg->epsilon_abs = 1e-10; + pcg->epsilon_rel = 1e-10; parameters.iterativeParams = pcg; LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, parameters); diff --git a/gtsam/linear/ConjugateGradientSolver.cpp b/gtsam/linear/ConjugateGradientSolver.cpp index 006e7d67d..b2725ea1e 100644 --- a/gtsam/linear/ConjugateGradientSolver.cpp +++ b/gtsam/linear/ConjugateGradientSolver.cpp @@ -26,13 +26,13 @@ namespace gtsam { /*****************************************************************************/ void ConjugateGradientParameters::print(ostream &os) const { - Base::print(os); - cout << "ConjugateGradientParameters" << endl - << "minIter: " << minIterations_ << endl - << "maxIter: " << maxIterations_ << endl - << "resetIter: " << reset_ << endl - << "eps_rel: " << epsilon_rel_ << endl - << "eps_abs: " << epsilon_abs_ << endl; + Base::print(os); + cout << "ConjugateGradientParameters" << endl + << "minIter: " << minIterations << endl + << "maxIter: " << maxIterations << endl + << "resetIter: " << reset << endl + << "eps_rel: " << epsilon_rel << endl + << "eps_abs: " << epsilon_abs << endl; } /*****************************************************************************/ diff --git a/gtsam/linear/ConjugateGradientSolver.h b/gtsam/linear/ConjugateGradientSolver.h index ccdab45da..46d49ca4f 100644 --- a/gtsam/linear/ConjugateGradientSolver.h +++ b/gtsam/linear/ConjugateGradientSolver.h @@ -26,59 +26,64 @@ namespace gtsam { /** * Parameters for the Conjugate Gradient method */ -class GTSAM_EXPORT ConjugateGradientParameters : public IterativeOptimizationParameters { - - public: +struct GTSAM_EXPORT ConjugateGradientParameters + : public IterativeOptimizationParameters { typedef IterativeOptimizationParameters Base; typedef std::shared_ptr shared_ptr; - protected: - size_t minIterations_; ///< minimum number of cg iterations - size_t maxIterations_; ///< maximum number of cg iterations - size_t reset_; ///< number of iterations before reset - double epsilon_rel_; ///< threshold for relative error decrease - double epsilon_abs_; ///< threshold for absolute error decrease + size_t minIterations; ///< minimum number of cg iterations + size_t maxIterations; ///< maximum number of cg iterations + size_t reset; ///< number of iterations before reset + double epsilon_rel; ///< threshold for relative error decrease + double epsilon_abs; ///< threshold for absolute error decrease - public: /* Matrix Operation Kernel */ enum BLASKernel { - GTSAM = 0, ///< Jacobian Factor Graph of GTSAM - } blas_kernel_ ; + GTSAM = 0, ///< Jacobian Factor Graph of GTSAM + } blas_kernel; ConjugateGradientParameters() - : minIterations_(1), maxIterations_(500), reset_(501), epsilon_rel_(1e-3), - epsilon_abs_(1e-3), blas_kernel_(GTSAM) {} + : minIterations(1), + maxIterations(500), + reset(501), + epsilon_rel(1e-3), + epsilon_abs(1e-3), + blas_kernel(GTSAM) {} - ConjugateGradientParameters(size_t minIterations, size_t maxIterations, size_t reset, - double epsilon_rel, double epsilon_abs, BLASKernel blas) - : minIterations_(minIterations), maxIterations_(maxIterations), reset_(reset), - epsilon_rel_(epsilon_rel), epsilon_abs_(epsilon_abs), blas_kernel_(blas) {} + ConjugateGradientParameters(size_t minIterations, size_t maxIterations, + size_t reset, double epsilon_rel, + double epsilon_abs, BLASKernel blas) + : minIterations(minIterations), + maxIterations(maxIterations), + reset(reset), + epsilon_rel(epsilon_rel), + epsilon_abs(epsilon_abs), + blas_kernel(blas) {} ConjugateGradientParameters(const ConjugateGradientParameters &p) - : Base(p), minIterations_(p.minIterations_), maxIterations_(p.maxIterations_), reset_(p.reset_), - epsilon_rel_(p.epsilon_rel_), epsilon_abs_(p.epsilon_abs_), blas_kernel_(GTSAM) {} + : Base(p), + minIterations(p.minIterations), + maxIterations(p.maxIterations), + reset(p.reset), + epsilon_rel(p.epsilon_rel), + epsilon_abs(p.epsilon_abs), + blas_kernel(GTSAM) {} - /* general interface */ - inline size_t minIterations() const { return minIterations_; } - inline size_t maxIterations() const { return maxIterations_; } - inline size_t reset() const { return reset_; } - inline double epsilon() const { return epsilon_rel_; } - inline double epsilon_rel() const { return epsilon_rel_; } - inline double epsilon_abs() const { return epsilon_abs_; } +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V43 + inline size_t getMinIterations() const { return minIterations; } + inline size_t getMaxIterations() const { return maxIterations; } + inline size_t getReset() const { return reset; } + inline double getEpsilon() const { return epsilon_rel; } + inline double getEpsilon_rel() const { return epsilon_rel; } + inline double getEpsilon_abs() const { return epsilon_abs; } - inline size_t getMinIterations() const { return minIterations_; } - inline size_t getMaxIterations() const { return maxIterations_; } - inline size_t getReset() const { return reset_; } - inline double getEpsilon() const { return epsilon_rel_; } - inline double getEpsilon_rel() const { return epsilon_rel_; } - inline double getEpsilon_abs() const { return epsilon_abs_; } - - inline void setMinIterations(size_t value) { minIterations_ = value; } - inline void setMaxIterations(size_t value) { maxIterations_ = value; } - inline void setReset(size_t value) { reset_ = value; } - inline void setEpsilon(double value) { epsilon_rel_ = value; } - inline void setEpsilon_rel(double value) { epsilon_rel_ = value; } - inline void setEpsilon_abs(double value) { epsilon_abs_ = value; } + inline void setMinIterations(size_t value) { minIterations = value; } + inline void setMaxIterations(size_t value) { maxIterations = value; } + inline void setReset(size_t value) { reset = value; } + inline void setEpsilon(double value) { epsilon_rel = value; } + inline void setEpsilon_rel(double value) { epsilon_rel = value; } + inline void setEpsilon_abs(double value) { epsilon_abs = value; } +#endif void print() const { Base::print(); } @@ -111,18 +116,19 @@ V preconditionedConjugateGradient(const S &system, const V &initial, double currentGamma = system.dot(residual, residual), prevGamma, alpha, beta; - const size_t iMaxIterations = parameters.maxIterations(), - iMinIterations = parameters.minIterations(), - iReset = parameters.reset() ; - const double threshold = std::max(parameters.epsilon_abs(), - parameters.epsilon() * parameters.epsilon() * currentGamma); + const size_t iMaxIterations = parameters.maxIterations, + iMinIterations = parameters.minIterations, + iReset = parameters.reset; + const double threshold = + std::max(parameters.epsilon_abs, + parameters.epsilon_rel * parameters.epsilon_rel * currentGamma); - if (parameters.verbosity() >= ConjugateGradientParameters::COMPLEXITY ) - std::cout << "[PCG] epsilon = " << parameters.epsilon() - << ", max = " << parameters.maxIterations() - << ", reset = " << parameters.reset() - << ", ||r0||^2 = " << currentGamma - << ", threshold = " << threshold << std::endl; + if (parameters.verbosity() >= ConjugateGradientParameters::COMPLEXITY) + std::cout << "[PCG] epsilon = " << parameters.epsilon_rel + << ", max = " << parameters.maxIterations + << ", reset = " << parameters.reset + << ", ||r0||^2 = " << currentGamma + << ", threshold = " << threshold << std::endl; size_t k; for ( k = 1 ; k <= iMaxIterations && (currentGamma > threshold || k <= iMinIterations) ; k++ ) { diff --git a/gtsam/linear/PCGSolver.cpp b/gtsam/linear/PCGSolver.cpp index 320c3e535..c0510961f 100644 --- a/gtsam/linear/PCGSolver.cpp +++ b/gtsam/linear/PCGSolver.cpp @@ -34,17 +34,13 @@ namespace gtsam { void PCGSolverParameters::print(ostream &os) const { Base::print(os); os << "PCGSolverParameters:" << endl; - preconditioner_->print(os); + preconditioner->print(os); } /*****************************************************************************/ PCGSolver::PCGSolver(const PCGSolverParameters &p) { parameters_ = p; - preconditioner_ = createPreconditioner(p.preconditioner()); -} - -void PCGSolverParameters::setPreconditionerParams(const std::shared_ptr preconditioner) { - preconditioner_ = preconditioner; + preconditioner_ = createPreconditioner(p.preconditioner); } void PCGSolverParameters::print(const std::string &s) const { diff --git a/gtsam/linear/PCGSolver.h b/gtsam/linear/PCGSolver.h index cb671f288..17cc2d3db 100644 --- a/gtsam/linear/PCGSolver.h +++ b/gtsam/linear/PCGSolver.h @@ -33,30 +33,19 @@ struct PreconditionerParameters; /** * Parameters for Preconditioned Conjugate Gradient solver. */ -struct GTSAM_EXPORT PCGSolverParameters: public ConjugateGradientParameters { - public: +struct GTSAM_EXPORT PCGSolverParameters : public ConjugateGradientParameters { typedef ConjugateGradientParameters Base; typedef std::shared_ptr shared_ptr; -protected: - std::shared_ptr preconditioner_; + std::shared_ptr preconditioner; -public: PCGSolverParameters() {} PCGSolverParameters( const std::shared_ptr &preconditioner) - : preconditioner_(preconditioner) {} + : preconditioner(preconditioner) {} void print(std::ostream &os) const override; - - const std::shared_ptr preconditioner() const { - return preconditioner_; - } - - void setPreconditionerParams( - const std::shared_ptr preconditioner); - void print(const std::string &s) const; }; diff --git a/gtsam/linear/iterative-inl.h b/gtsam/linear/iterative-inl.h index c96d643b2..a8185b544 100644 --- a/gtsam/linear/iterative-inl.h +++ b/gtsam/linear/iterative-inl.h @@ -49,10 +49,12 @@ namespace gtsam { // init gamma and calculate threshold gamma = dot(g,g); - threshold = std::max(parameters_.epsilon_abs(), parameters_.epsilon() * parameters_.epsilon() * gamma); + threshold = + std::max(parameters_.epsilon_abs, + parameters_.epsilon_rel * parameters_.epsilon_rel * gamma); // Allocate and calculate A*d for first iteration - if (gamma > parameters_.epsilon_abs()) Ad = Ab * d; + if (gamma > parameters_.epsilon_abs) Ad = Ab * d; } /* ************************************************************************* */ @@ -79,13 +81,13 @@ namespace gtsam { // take a step, return true if converged bool step(const S& Ab, V& x) { - if ((++k) >= ((int)parameters_.maxIterations())) return true; + if ((++k) >= ((int)parameters_.maxIterations)) return true; //----------------------------------> double alpha = takeOptimalStep(x); // update gradient (or re-calculate at reset time) - if (k % parameters_.reset() == 0) g = Ab.gradient(x); + if (k % parameters_.reset == 0) g = Ab.gradient(x); // axpy(alpha, Ab ^ Ad, g); // g += alpha*(Ab^Ad) else Ab.transposeMultiplyAdd(alpha, Ad, g); @@ -126,11 +128,10 @@ namespace gtsam { CGState state(Ab, x, parameters, steepest); if (parameters.verbosity() != ConjugateGradientParameters::SILENT) - std::cout << "CG: epsilon = " << parameters.epsilon() - << ", maxIterations = " << parameters.maxIterations() + std::cout << "CG: epsilon = " << parameters.epsilon_rel + << ", maxIterations = " << parameters.maxIterations << ", ||g0||^2 = " << state.gamma - << ", threshold = " << state.threshold - << std::endl; + << ", threshold = " << state.threshold << std::endl; if ( state.gamma < state.threshold ) { if (parameters.verbosity() != ConjugateGradientParameters::SILENT) diff --git a/gtsam/linear/linear.i b/gtsam/linear/linear.i index af6c2ee22..eecc0192c 100644 --- a/gtsam/linear/linear.i +++ b/gtsam/linear/linear.i @@ -710,17 +710,11 @@ virtual class IterativeOptimizationParameters { #include virtual class ConjugateGradientParameters : gtsam::IterativeOptimizationParameters { ConjugateGradientParameters(); - int getMinIterations() const ; - int getMaxIterations() const ; - int getReset() const; - double getEpsilon_rel() const; - double getEpsilon_abs() const; - - void setMinIterations(int value); - void setMaxIterations(int value); - void setReset(int value); - void setEpsilon_rel(double value); - void setEpsilon_abs(double value); + int minIterations; + int maxIterations; + int reset; + double epsilon_rel; + double epsilon_abs; }; #include @@ -739,8 +733,10 @@ virtual class BlockJacobiPreconditionerParameters : gtsam::PreconditionerParamet #include virtual class PCGSolverParameters : gtsam::ConjugateGradientParameters { PCGSolverParameters(); + PCGSolverParameters(gtsam::PreconditionerParameters* preconditioner); void print(string s = ""); - void setPreconditionerParams(gtsam::PreconditionerParameters* preconditioner); + + gtsam::PreconditionerParameters* preconditioner; }; #include diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index 14d2a705b..fd0e60bc8 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -88,24 +88,29 @@ NonlinearConjugateGradientOptimizer::NonlinearConjugateGradientOptimizer( new State(initialValues, graph.error(initialValues)))), params_(params) {} -double NonlinearConjugateGradientOptimizer::error(const Values& state) const { +double NonlinearConjugateGradientOptimizer::System::error( + const State& state) const { return graph_.error(state); } -VectorValues NonlinearConjugateGradientOptimizer::gradient( - const Values& state) const { +NonlinearConjugateGradientOptimizer::System::Gradient +NonlinearConjugateGradientOptimizer::System::gradient( + const State& state) const { return gradientInPlace(graph_, state); } -Values NonlinearConjugateGradientOptimizer::advance( - const Values& current, const double alpha, - const VectorValues& gradient) const { - return current.retract(alpha * gradient); +NonlinearConjugateGradientOptimizer::System::State +NonlinearConjugateGradientOptimizer::System::advance(const State& current, + const double alpha, + const Gradient& g) const { + Gradient step = g; + step *= alpha; + return current.retract(step); } GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { - const auto [newValues, dummy] = nonlinearConjugateGradient( - state_->values, params_, true /* single iteration */); + const auto [newValues, dummy] = nonlinearConjugateGradient( + System(graph_), state_->values, params_, true /* single iteration */); state_.reset( new State(newValues, graph_.error(newValues), state_->iterations + 1)); @@ -115,148 +120,12 @@ GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { const Values& NonlinearConjugateGradientOptimizer::optimize() { // Optimize until convergence + System system(graph_); const auto [newValues, iterations] = - nonlinearConjugateGradient(state_->values, params_, false); + nonlinearConjugateGradient(system, state_->values, params_, false); state_.reset( new State(std::move(newValues), graph_.error(newValues), iterations)); return state_->values; } -double NonlinearConjugateGradientOptimizer::lineSearch( - const Values& currentValues, const VectorValues& gradient) const { - /* normalize it such that it becomes a unit vector */ - const double g = gradient.norm(); - - // perform the golden section search algorithm to decide the the optimal - // step size detail refer to - // http://en.wikipedia.org/wiki/Golden_section_search - const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi, - tau = 1e-5; - double minStep = -1.0 / g, maxStep = 0, - newStep = minStep + (maxStep - minStep) / (phi + 1.0); - - Values newValues = advance(currentValues, newStep, gradient); - double newError = error(newValues); - - while (true) { - const bool flag = (maxStep - newStep > newStep - minStep) ? true : false; - const double testStep = flag ? newStep + resphi * (maxStep - newStep) - : newStep - resphi * (newStep - minStep); - - if ((maxStep - minStep) < tau * (std::abs(testStep) + std::abs(newStep))) { - return 0.5 * (minStep + maxStep); - } - - const Values testValues = advance(currentValues, testStep, gradient); - const double testError = error(testValues); - - // update the working range - if (testError >= newError) { - if (flag) - maxStep = testStep; - else - minStep = testStep; - } else { - if (flag) { - minStep = newStep; - newStep = testStep; - newError = testError; - } else { - maxStep = newStep; - newStep = testStep; - newError = testError; - } - } - } - return 0.0; -} - -std::tuple -NonlinearConjugateGradientOptimizer::nonlinearConjugateGradient( - const Values& initial, const NonlinearOptimizerParams& params, - const bool singleIteration, const bool gradientDescent) const { - size_t iteration = 0; - - // check if we're already close enough - double currentError = error(initial); - if (currentError <= params.errorTol) { - if (params.verbosity >= NonlinearOptimizerParams::ERROR) { - std::cout << "Exiting, as error = " << currentError << " < " - << params.errorTol << std::endl; - } - return {initial, iteration}; - } - - Values currentValues = initial; - VectorValues currentGradient = gradient(currentValues), prevGradient, - direction = currentGradient; - - /* do one step of gradient descent */ - Values prevValues = currentValues; - double prevError = currentError; - double alpha = lineSearch(currentValues, direction); - currentValues = advance(prevValues, alpha, direction); - currentError = error(currentValues); - - // Maybe show output - if (params.verbosity >= NonlinearOptimizerParams::ERROR) - std::cout << "Initial error: " << currentError << std::endl; - - // Iterative loop - do { - if (gradientDescent == true) { - direction = gradient(currentValues); - } else { - prevGradient = currentGradient; - currentGradient = gradient(currentValues); - double beta; - - switch (directionMethod_) { - case DirectionMethod::FletcherReeves: - beta = FletcherReeves(currentGradient, prevGradient); - break; - case DirectionMethod::PolakRibiere: - beta = PolakRibiere(currentGradient, prevGradient); - break; - case DirectionMethod::HestenesStiefel: - beta = HestenesStiefel(currentGradient, prevGradient, direction); - break; - case DirectionMethod::DaiYuan: - beta = DaiYuan(currentGradient, prevGradient, direction); - break; - } - direction = currentGradient + (beta * direction); - } - - alpha = lineSearch(currentValues, direction); - - prevValues = currentValues; - prevError = currentError; - - currentValues = advance(prevValues, alpha, direction); - currentError = error(currentValues); - - // User hook: - if (params.iterationHook) - params.iterationHook(iteration, prevError, currentError); - - // Maybe show output - if (params.verbosity >= NonlinearOptimizerParams::ERROR) - std::cout << "iteration: " << iteration - << ", currentError: " << currentError << std::endl; - } while (++iteration < params.maxIterations && !singleIteration && - !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, - params.errorTol, prevError, currentError, - params.verbosity)); - - // Printing if verbose - if (params.verbosity >= NonlinearOptimizerParams::ERROR && - iteration >= params.maxIterations) - std::cout << "nonlinearConjugateGradient: Terminating because reached " - "maximum iterations" - << std::endl; - - return {currentValues, iteration}; -} - } /* namespace gtsam */ diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 63ec4f6ea..78f11a600 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -26,6 +26,24 @@ namespace gtsam { /** An implementation of the nonlinear CG method using the template below */ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimizer { + /* a class for the nonlinearConjugateGradient template */ + class System { + public: + typedef Values State; + typedef VectorValues Gradient; + typedef NonlinearOptimizerParams Parameters; + + protected: + const NonlinearFactorGraph &graph_; + + public: + System(const NonlinearFactorGraph &graph) : graph_(graph) {} + double error(const State &state) const; + Gradient gradient(const State &state) const; + State advance(const State ¤t, const double alpha, + const Gradient &g) const; + }; + public: typedef NonlinearOptimizer Base; typedef NonlinearOptimizerParams Parameters; @@ -54,13 +72,6 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer /// Destructor ~NonlinearConjugateGradientOptimizer() override {} - double error(const Values &state) const; - - VectorValues gradient(const Values &state) const; - - Values advance(const Values ¤t, const double alpha, - const VectorValues &g) const; - /** * Perform a single iteration, returning GaussianFactorGraph corresponding to * the linearized factor graph. @@ -72,22 +83,145 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer * variable assignments. */ const Values &optimize() override; - - /** Implement the golden-section line search algorithm */ - double lineSearch(const Values ¤tValues, - const VectorValues &gradient) const; - - /** - * Implement the nonlinear conjugate gradient method using the Polak-Ribiere - * formula suggested in - * http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method. - * - * The last parameter is a switch between gradient-descent and conjugate - * gradient - */ - std::tuple nonlinearConjugateGradient( - const Values &initial, const NonlinearOptimizerParams ¶ms, - const bool singleIteration, const bool gradientDescent = false) const; }; +/** Implement the golden-section line search algorithm */ +template +double lineSearch(const S &system, const V currentValues, const W &gradient) { + /* normalize it such that it becomes a unit vector */ + const double g = gradient.norm(); + + // perform the golden section search algorithm to decide the the optimal step + // size detail refer to http://en.wikipedia.org/wiki/Golden_section_search + const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi, + tau = 1e-5; + double minStep = -1.0 / g, maxStep = 0, + newStep = minStep + (maxStep - minStep) / (phi + 1.0); + + V newValues = system.advance(currentValues, newStep, gradient); + double newError = system.error(newValues); + + while (true) { + const bool flag = (maxStep - newStep > newStep - minStep) ? true : false; + const double testStep = flag ? newStep + resphi * (maxStep - newStep) + : newStep - resphi * (newStep - minStep); + + if ((maxStep - minStep) < tau * (std::abs(testStep) + std::abs(newStep))) { + return 0.5 * (minStep + maxStep); + } + + const V testValues = system.advance(currentValues, testStep, gradient); + const double testError = system.error(testValues); + + // update the working range + if (testError >= newError) { + if (flag) + maxStep = testStep; + else + minStep = testStep; + } else { + if (flag) { + minStep = newStep; + newStep = testStep; + newError = testError; + } else { + maxStep = newStep; + newStep = testStep; + newError = testError; + } + } + } + return 0.0; +} + +/** + * Implement the nonlinear conjugate gradient method using the Polak-Ribiere + * formula suggested in + * http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method. + * + * The S (system) class requires three member functions: error(state), + * gradient(state) and advance(state, step-size, direction). The V class denotes + * the state or the solution. + * + * The last parameter is a switch between gradient-descent and conjugate + * gradient + */ +template +std::tuple nonlinearConjugateGradient( + const S &system, const V &initial, const NonlinearOptimizerParams ¶ms, + const bool singleIteration, const bool gradientDescent = false) { + // GTSAM_CONCEPT_MANIFOLD_TYPE(V) + + size_t iteration = 0; + + // check if we're already close enough + double currentError = system.error(initial); + if (currentError <= params.errorTol) { + if (params.verbosity >= NonlinearOptimizerParams::ERROR) { + std::cout << "Exiting, as error = " << currentError << " < " + << params.errorTol << std::endl; + } + return {initial, iteration}; + } + + V currentValues = initial; + typename S::Gradient currentGradient = system.gradient(currentValues), + prevGradient, direction = currentGradient; + + /* do one step of gradient descent */ + V prevValues = currentValues; + double prevError = currentError; + double alpha = lineSearch(system, currentValues, direction); + currentValues = system.advance(prevValues, alpha, direction); + currentError = system.error(currentValues); + + // Maybe show output + if (params.verbosity >= NonlinearOptimizerParams::ERROR) + std::cout << "Initial error: " << currentError << std::endl; + + // Iterative loop + do { + if (gradientDescent == true) { + direction = system.gradient(currentValues); + } else { + prevGradient = currentGradient; + currentGradient = system.gradient(currentValues); + // Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1 + const double beta = + std::max(0.0, currentGradient.dot(currentGradient - prevGradient) / + prevGradient.dot(prevGradient)); + direction = currentGradient + (beta * direction); + } + + alpha = lineSearch(system, currentValues, direction); + + prevValues = currentValues; + prevError = currentError; + + currentValues = system.advance(prevValues, alpha, direction); + currentError = system.error(currentValues); + + // User hook: + if (params.iterationHook) + params.iterationHook(iteration, prevError, currentError); + + // Maybe show output + if (params.verbosity >= NonlinearOptimizerParams::ERROR) + std::cout << "iteration: " << iteration + << ", currentError: " << currentError << std::endl; + } while (++iteration < params.maxIterations && !singleIteration && + !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, + params.errorTol, prevError, currentError, + params.verbosity)); + + // Printing if verbose + if (params.verbosity >= NonlinearOptimizerParams::ERROR && + iteration >= params.maxIterations) + std::cout << "nonlinearConjugateGradient: Terminating because reached " + "maximum iterations" + << std::endl; + + return {currentValues, iteration}; +} + } // namespace gtsam diff --git a/python/gtsam/tests/test_NonlinearOptimizer.py b/python/gtsam/tests/test_NonlinearOptimizer.py index 1b4209509..2b276425f 100644 --- a/python/gtsam/tests/test_NonlinearOptimizer.py +++ b/python/gtsam/tests/test_NonlinearOptimizer.py @@ -69,7 +69,7 @@ class TestScenario(GtsamTestCase): lmParams = LevenbergMarquardtParams.CeresDefaults() lmParams.setLinearSolverType("ITERATIVE") cgParams = PCGSolverParameters() - cgParams.setPreconditionerParams(DummyPreconditionerParameters()) + cgParams.preconditioner = DummyPreconditionerParameters() lmParams.setIterativeParams(cgParams) actual = LevenbergMarquardtOptimizer(self.fg, self.initial_values, lmParams).optimize() self.assertAlmostEqual(0, self.fg.error(actual)) diff --git a/tests/testIterative.cpp b/tests/testIterative.cpp index 6d9918b76..4df598f80 100644 --- a/tests/testIterative.cpp +++ b/tests/testIterative.cpp @@ -95,9 +95,9 @@ TEST( Iterative, conjugateGradientDescent_hard_constraint ) VectorValues zeros = config.zeroVectors(); ConjugateGradientParameters parameters; - parameters.setEpsilon_abs(1e-3); - parameters.setEpsilon_rel(1e-5); - parameters.setMaxIterations(100); + parameters.epsilon_abs = 1e-3; + parameters.epsilon_rel = 1e-5; + parameters.maxIterations = 100; VectorValues actual = conjugateGradientDescent(*fg, zeros, parameters); VectorValues expected; @@ -122,9 +122,9 @@ TEST( Iterative, conjugateGradientDescent_soft_constraint ) VectorValues zeros = config.zeroVectors(); ConjugateGradientParameters parameters; - parameters.setEpsilon_abs(1e-3); - parameters.setEpsilon_rel(1e-5); - parameters.setMaxIterations(100); + parameters.epsilon_abs = 1e-3; + parameters.epsilon_rel = 1e-5; + parameters.maxIterations = 100; VectorValues actual = conjugateGradientDescent(*fg, zeros, parameters); VectorValues expected; diff --git a/tests/testPreconditioner.cpp b/tests/testPreconditioner.cpp index 9eff3e7f0..967584b7d 100644 --- a/tests/testPreconditioner.cpp +++ b/tests/testPreconditioner.cpp @@ -54,23 +54,23 @@ TEST( PCGsolver, verySimpleLinearSystem) { // Solve the system using Preconditioned Conjugate Gradient solver // Common PCG parameters gtsam::PCGSolverParameters::shared_ptr pcg = std::make_shared(); - pcg->setMaxIterations(500); - pcg->setEpsilon_abs(0.0); - pcg->setEpsilon_rel(0.0); + pcg->maxIterations = 500; + pcg->epsilon_abs = 0.0; + pcg->epsilon_rel = 0.0; //pcg->setVerbosity("ERROR"); // With Dummy preconditioner - pcg->setPreconditionerParams( - std::make_shared()); + pcg->preconditioner = + std::make_shared(); VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(exactSolution, deltaPCGDummy, 1e-7)); //deltaPCGDummy.print("PCG Dummy"); // With Block-Jacobi preconditioner - pcg->setPreconditionerParams( - std::make_shared()); + pcg->preconditioner = + std::make_shared(); // It takes more than 1000 iterations for this test - pcg->setMaxIterations(1500); + pcg->maxIterations = 1500; VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(exactSolution, deltaPCGJacobi, 1e-5)); @@ -107,21 +107,21 @@ TEST(PCGSolver, simpleLinearSystem) { // Solve the system using Preconditioned Conjugate Gradient solver // Common PCG parameters gtsam::PCGSolverParameters::shared_ptr pcg = std::make_shared(); - pcg->setMaxIterations(500); - pcg->setEpsilon_abs(0.0); - pcg->setEpsilon_rel(0.0); + pcg->maxIterations = 500; + pcg->epsilon_abs = 0.0; + pcg->epsilon_rel = 0.0; //pcg->setVerbosity("ERROR"); // With Dummy preconditioner - pcg->setPreconditionerParams( - std::make_shared()); + pcg->preconditioner = + std::make_shared(); VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(expectedSolution, deltaPCGDummy, 1e-5)); //deltaPCGDummy.print("PCG Dummy"); // With Block-Jacobi preconditioner - pcg->setPreconditionerParams( - std::make_shared()); + pcg->preconditioner = + std::make_shared(); VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(expectedSolution, deltaPCGJacobi, 1e-5)); //deltaPCGJacobi.print("PCG Jacobi"); diff --git a/tests/testSubgraphSolver.cpp b/tests/testSubgraphSolver.cpp index 026f3c919..e31ce23b5 100644 --- a/tests/testSubgraphSolver.cpp +++ b/tests/testSubgraphSolver.cpp @@ -48,7 +48,7 @@ static double error(const GaussianFactorGraph& fg, const VectorValues& x) { TEST( SubgraphSolver, Parameters ) { LONGS_EQUAL(SubgraphSolverParameters::SILENT, kParameters.verbosity()); - LONGS_EQUAL(500, kParameters.maxIterations()); + LONGS_EQUAL(500, kParameters.maxIterations); } /* ************************************************************************* */ diff --git a/timing/timeShonanFactor.cpp b/timing/timeShonanFactor.cpp index fc97d01a8..3cdb596cd 100644 --- a/timing/timeShonanFactor.cpp +++ b/timing/timeShonanFactor.cpp @@ -83,7 +83,7 @@ int main(int argc, char* argv[]) { // params.setVerbosityLM("SUMMARY"); // params.linearSolverType = LevenbergMarquardtParams::Iterative; // auto pcg = std::make_shared(); - // pcg->preconditioner_ = + // pcg->preconditioner = // std::make_shared(); // std::make_shared(); // params.iterativeParams = pcg;