From 4a0257c0ce720a8202d7e96798347cfafb4cd1f0 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 14 Oct 2024 16:51:43 -0400 Subject: [PATCH 01/20] move test file to nonlinear directory --- .../nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/testGradientDescentOptimizer.cpp => gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp (100%) diff --git a/tests/testGradientDescentOptimizer.cpp b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp similarity index 100% rename from tests/testGradientDescentOptimizer.cpp rename to gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp From 322a23d49c9e26d00cf546c7c95e52997983f523 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 14 Oct 2024 21:01:31 -0400 Subject: [PATCH 02/20] clean up and formatting --- ...estNonlinearConjugateGradientOptimizer.cpp | 61 +++++++++---------- 1 file changed, 29 insertions(+), 32 deletions(-) diff --git a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp index d7ca70459..36673c7a0 100644 --- a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp @@ -1,56 +1,52 @@ /** - * @file NonlinearConjugateGradientOptimizer.cpp - * @brief Test simple CG optimizer + * @file testNonlinearConjugateGradientOptimizer.cpp + * @brief Test nonlinear CG optimizer * @author Yong-Dian Jian + * @author Varun Agrawal * @date June 11, 2012 */ -/** - * @file testGradientDescentOptimizer.cpp - * @brief Small test of NonlinearConjugateGradientOptimizer - * @author Yong-Dian Jian - * @date Jun 11, 2012 - */ - -#include +#include +#include #include #include #include -#include - -#include - +#include using namespace std; using namespace gtsam; // Generate a small PoseSLAM problem std::tuple generateProblem() { - // 1. Create graph container and add factors to it NonlinearFactorGraph graph; // 2a. Add Gaussian prior - Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin - SharedDiagonal priorNoise = noiseModel::Diagonal::Sigmas( - Vector3(0.3, 0.3, 0.1)); + Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin + SharedDiagonal priorNoise = + noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1)); graph.addPrior(1, priorMean, priorNoise); // 2b. Add odometry factors - SharedDiagonal odometryNoise = noiseModel::Diagonal::Sigmas( - Vector3(0.2, 0.2, 0.1)); - graph.emplace_shared>(1, 2, Pose2(2.0, 0.0, 0.0), odometryNoise); - graph.emplace_shared>(2, 3, Pose2(2.0, 0.0, M_PI_2), odometryNoise); - graph.emplace_shared>(3, 4, Pose2(2.0, 0.0, M_PI_2), odometryNoise); - graph.emplace_shared>(4, 5, Pose2(2.0, 0.0, M_PI_2), odometryNoise); + SharedDiagonal odometryNoise = + noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1)); + graph.emplace_shared>(1, 2, Pose2(2.0, 0.0, 0.0), + odometryNoise); + graph.emplace_shared>(2, 3, Pose2(2.0, 0.0, M_PI_2), + odometryNoise); + graph.emplace_shared>(3, 4, Pose2(2.0, 0.0, M_PI_2), + odometryNoise); + graph.emplace_shared>(4, 5, Pose2(2.0, 0.0, M_PI_2), + odometryNoise); // 2c. Add pose constraint - SharedDiagonal constraintUncertainty = noiseModel::Diagonal::Sigmas( - Vector3(0.2, 0.2, 0.1)); + SharedDiagonal constraintUncertainty = + noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1)); graph.emplace_shared>(5, 2, Pose2(2.0, 0.0, M_PI_2), - constraintUncertainty); + constraintUncertainty); - // 3. Create the data structure to hold the initialEstimate estimate to the solution + // 3. Create the data structure to hold the initialEstimate estimate to the + // solution Values initialEstimate; Pose2 x1(0.5, 0.0, 0.2); initialEstimate.insert(1, x1); @@ -68,16 +64,17 @@ std::tuple generateProblem() { /* ************************************************************************* */ TEST(NonlinearConjugateGradientOptimizer, Optimize) { -const auto [graph, initialEstimate] = generateProblem(); -// cout << "initial error = " << graph.error(initialEstimate) << endl; + const auto [graph, initialEstimate] = generateProblem(); + // cout << "initial error = " << graph.error(initialEstimate) << endl; NonlinearOptimizerParams param; - param.maxIterations = 500; /* requires a larger number of iterations to converge */ + param.maxIterations = + 500; /* requires a larger number of iterations to converge */ param.verbosity = NonlinearOptimizerParams::SILENT; NonlinearConjugateGradientOptimizer optimizer(graph, initialEstimate, param); Values result = optimizer.optimize(); -// cout << "cg final error = " << graph.error(result) << endl; + // cout << "cg final error = " << graph.error(result) << endl; EXPECT_DOUBLES_EQUAL(0.0, graph.error(result), 1e-4); } From 91d637b0928b6ef9426b66d079852fd732c07943 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 14 Oct 2024 21:16:31 -0400 Subject: [PATCH 03/20] improve PCGSolverParameters --- gtsam/linear/PCGSolver.cpp | 2 +- gtsam/linear/PCGSolver.h | 26 +++++++++++++++----------- gtsam/sfm/ShonanAveraging.cpp | 11 +++-------- 3 files changed, 19 insertions(+), 20 deletions(-) diff --git a/gtsam/linear/PCGSolver.cpp b/gtsam/linear/PCGSolver.cpp index 5ec6fa67b..320c3e535 100644 --- a/gtsam/linear/PCGSolver.cpp +++ b/gtsam/linear/PCGSolver.cpp @@ -40,7 +40,7 @@ void PCGSolverParameters::print(ostream &os) const { /*****************************************************************************/ PCGSolver::PCGSolver(const PCGSolverParameters &p) { parameters_ = p; - preconditioner_ = createPreconditioner(p.preconditioner_); + preconditioner_ = createPreconditioner(p.preconditioner()); } void PCGSolverParameters::setPreconditionerParams(const std::shared_ptr preconditioner) { diff --git a/gtsam/linear/PCGSolver.h b/gtsam/linear/PCGSolver.h index 5ed2c7c6d..35f6bdacb 100644 --- a/gtsam/linear/PCGSolver.h +++ b/gtsam/linear/PCGSolver.h @@ -34,26 +34,30 @@ struct PreconditionerParameters; * Parameters for PCG */ struct GTSAM_EXPORT PCGSolverParameters: public ConjugateGradientParameters { -public: + public: typedef ConjugateGradientParameters Base; typedef std::shared_ptr shared_ptr; - PCGSolverParameters() { - } +protected: + std::shared_ptr preconditioner_; + +public: + PCGSolverParameters() {} + + PCGSolverParameters( + const std::shared_ptr &preconditioner) + : preconditioner_(preconditioner) {} void print(std::ostream &os) const override; - /* interface to preconditioner parameters */ - inline const PreconditionerParameters& preconditioner() const { - return *preconditioner_; + const std::shared_ptr preconditioner() const { + return preconditioner_; } - // needed for python wrapper + void setPreconditionerParams( + const std::shared_ptr preconditioner); + void print(const std::string &s) const; - - std::shared_ptr preconditioner_; - - void setPreconditionerParams(const std::shared_ptr preconditioner); }; /** diff --git a/gtsam/sfm/ShonanAveraging.cpp b/gtsam/sfm/ShonanAveraging.cpp index 7c8b07f37..830f1c719 100644 --- a/gtsam/sfm/ShonanAveraging.cpp +++ b/gtsam/sfm/ShonanAveraging.cpp @@ -67,20 +67,15 @@ ShonanAveragingParameters::ShonanAveragingParameters( builderParameters.augmentationWeight = SubgraphBuilderParameters::SKELETON; builderParameters.augmentationFactor = 0.0; - auto pcg = std::make_shared(); - // Choose optimization method if (method == "SUBGRAPH") { lm.iterativeParams = std::make_shared(builderParameters); } else if (method == "SGPC") { - pcg->preconditioner_ = - std::make_shared(builderParameters); - lm.iterativeParams = pcg; + lm.iterativeParams = std::make_shared( + std::make_shared(builderParameters)); } else if (method == "JACOBI") { - pcg->preconditioner_ = - std::make_shared(); - lm.iterativeParams = pcg; + lm.iterativeParams = std::make_shared(std::make_shared()); } else if (method == "QR") { lm.setLinearSolverType("MULTIFRONTAL_QR"); } else if (method == "CHOLESKY") { From 1b422d5c98632598e726a9525520ee5046104522 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 06:54:00 -0400 Subject: [PATCH 04/20] rearrange GaussianFactorGraphSystem --- gtsam/linear/PCGSolver.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/gtsam/linear/PCGSolver.h b/gtsam/linear/PCGSolver.h index 35f6bdacb..a6a16f722 100644 --- a/gtsam/linear/PCGSolver.h +++ b/gtsam/linear/PCGSolver.h @@ -91,16 +91,16 @@ public: * System class needed for calling preconditionedConjugateGradient */ class GTSAM_EXPORT GaussianFactorGraphSystem { -public: + GaussianFactorGraph gfg_; + Preconditioner preconditioner_; + KeyInfo keyInfo_; + std::map lambda_; + public: GaussianFactorGraphSystem(const GaussianFactorGraph &gfg, - const Preconditioner &preconditioner, const KeyInfo &info, - const std::map &lambda); - - const GaussianFactorGraph &gfg_; - const Preconditioner &preconditioner_; - const KeyInfo &keyInfo_; - const std::map &lambda_; + const Preconditioner &preconditioner, + const KeyInfo &info, + const std::map &lambda); void residual(const Vector &x, Vector &r) const; void multiply(const Vector &x, Vector& y) const; From ec8714a54896ff60cb48d340998b19b8f0336456 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 08:40:20 -0400 Subject: [PATCH 05/20] restrict pointer access via methods --- tests/testPCGSolver.cpp | 13 ++++++------- tests/testPreconditioner.cpp | 12 ++++++++---- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/tests/testPCGSolver.cpp b/tests/testPCGSolver.cpp index bc9f9e608..039864886 100644 --- a/tests/testPCGSolver.cpp +++ b/tests/testPCGSolver.cpp @@ -125,8 +125,8 @@ TEST( GaussianFactorGraphSystem, multiply_getb) TEST(PCGSolver, dummy) { LevenbergMarquardtParams params; params.linearSolverType = LevenbergMarquardtParams::Iterative; - auto pcg = std::make_shared(); - pcg->preconditioner_ = std::make_shared(); + auto pcg = std::make_shared( + std::make_shared()); params.iterativeParams = pcg; NonlinearFactorGraph fg = example::createReallyNonlinearFactorGraph(); @@ -145,9 +145,8 @@ TEST(PCGSolver, dummy) { TEST(PCGSolver, blockjacobi) { LevenbergMarquardtParams params; params.linearSolverType = LevenbergMarquardtParams::Iterative; - auto pcg = std::make_shared(); - pcg->preconditioner_ = - std::make_shared(); + auto pcg = std::make_shared( + std::make_shared()); params.iterativeParams = pcg; NonlinearFactorGraph fg = example::createReallyNonlinearFactorGraph(); @@ -166,8 +165,8 @@ TEST(PCGSolver, blockjacobi) { TEST(PCGSolver, subgraph) { LevenbergMarquardtParams params; params.linearSolverType = LevenbergMarquardtParams::Iterative; - auto pcg = std::make_shared(); - pcg->preconditioner_ = std::make_shared(); + auto pcg = std::make_shared( + std::make_shared()); params.iterativeParams = pcg; NonlinearFactorGraph fg = example::createReallyNonlinearFactorGraph(); diff --git a/tests/testPreconditioner.cpp b/tests/testPreconditioner.cpp index ecdf36322..9eff3e7f0 100644 --- a/tests/testPreconditioner.cpp +++ b/tests/testPreconditioner.cpp @@ -60,13 +60,15 @@ TEST( PCGsolver, verySimpleLinearSystem) { //pcg->setVerbosity("ERROR"); // With Dummy preconditioner - pcg->preconditioner_ = std::make_shared(); + pcg->setPreconditionerParams( + std::make_shared()); VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(exactSolution, deltaPCGDummy, 1e-7)); //deltaPCGDummy.print("PCG Dummy"); // With Block-Jacobi preconditioner - pcg->preconditioner_ = std::make_shared(); + pcg->setPreconditionerParams( + std::make_shared()); // It takes more than 1000 iterations for this test pcg->setMaxIterations(1500); VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG); @@ -111,13 +113,15 @@ TEST(PCGSolver, simpleLinearSystem) { //pcg->setVerbosity("ERROR"); // With Dummy preconditioner - pcg->preconditioner_ = std::make_shared(); + pcg->setPreconditionerParams( + std::make_shared()); VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(expectedSolution, deltaPCGDummy, 1e-5)); //deltaPCGDummy.print("PCG Dummy"); // With Block-Jacobi preconditioner - pcg->preconditioner_ = std::make_shared(); + pcg->setPreconditionerParams( + std::make_shared()); VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(expectedSolution, deltaPCGJacobi, 1e-5)); //deltaPCGJacobi.print("PCG Jacobi"); From a0c0cd1fced607d6c09c9ddb3f62e0c2f34ed7c3 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 09:14:47 -0400 Subject: [PATCH 06/20] improved docstrings --- gtsam/linear/ConjugateGradientSolver.h | 6 ++++-- gtsam/linear/PCGSolver.h | 6 +++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/gtsam/linear/ConjugateGradientSolver.h b/gtsam/linear/ConjugateGradientSolver.h index b09b1a352..ccdab45da 100644 --- a/gtsam/linear/ConjugateGradientSolver.h +++ b/gtsam/linear/ConjugateGradientSolver.h @@ -24,20 +24,22 @@ namespace gtsam { /** - * parameters for the conjugate gradient method + * Parameters for the Conjugate Gradient method */ class GTSAM_EXPORT ConjugateGradientParameters : public IterativeOptimizationParameters { -public: + public: typedef IterativeOptimizationParameters Base; typedef std::shared_ptr shared_ptr; + protected: size_t minIterations_; ///< minimum number of cg iterations size_t maxIterations_; ///< maximum number of cg iterations size_t reset_; ///< number of iterations before reset double epsilon_rel_; ///< threshold for relative error decrease double epsilon_abs_; ///< threshold for absolute error decrease + public: /* Matrix Operation Kernel */ enum BLASKernel { GTSAM = 0, ///< Jacobian Factor Graph of GTSAM diff --git a/gtsam/linear/PCGSolver.h b/gtsam/linear/PCGSolver.h index a6a16f722..cb671f288 100644 --- a/gtsam/linear/PCGSolver.h +++ b/gtsam/linear/PCGSolver.h @@ -31,7 +31,7 @@ class VectorValues; struct PreconditionerParameters; /** - * Parameters for PCG + * Parameters for Preconditioned Conjugate Gradient solver. */ struct GTSAM_EXPORT PCGSolverParameters: public ConjugateGradientParameters { public: @@ -91,8 +91,8 @@ public: * System class needed for calling preconditionedConjugateGradient */ class GTSAM_EXPORT GaussianFactorGraphSystem { - GaussianFactorGraph gfg_; - Preconditioner preconditioner_; + const GaussianFactorGraph &gfg_; + const Preconditioner &preconditioner_; KeyInfo keyInfo_; std::map lambda_; From a94169a973e42d77c752d4d4bcf8f2ffd56d8693 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 09:59:16 -0400 Subject: [PATCH 07/20] remove unnecessary typedefs --- .../NonlinearConjugateGradientOptimizer.cpp | 24 ++++++++++--------- .../NonlinearConjugateGradientOptimizer.h | 21 ++++++++-------- 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index 0b1a43815..403c72908 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -42,24 +42,26 @@ static VectorValues gradientInPlace(const NonlinearFactorGraph &nfg, } NonlinearConjugateGradientOptimizer::NonlinearConjugateGradientOptimizer( - const NonlinearFactorGraph& graph, const Values& initialValues, const Parameters& params) - : Base(graph, std::unique_ptr(new State(initialValues, graph.error(initialValues)))), - params_(params) {} + const NonlinearFactorGraph& graph, const Values& initialValues, + const Parameters& params) + : Base(graph, std::unique_ptr( + new State(initialValues, graph.error(initialValues)))), + params_(params) {} -double NonlinearConjugateGradientOptimizer::System::error(const State& state) const { +double NonlinearConjugateGradientOptimizer::System::error( + const Values& state) const { return graph_.error(state); } -NonlinearConjugateGradientOptimizer::System::Gradient NonlinearConjugateGradientOptimizer::System::gradient( - const State &state) const { +VectorValues NonlinearConjugateGradientOptimizer::System::gradient( + const Values& state) const { return gradientInPlace(graph_, state); } -NonlinearConjugateGradientOptimizer::System::State NonlinearConjugateGradientOptimizer::System::advance( - const State ¤t, const double alpha, const Gradient &g) const { - Gradient step = g; - step *= alpha; - return current.retract(step); +Values NonlinearConjugateGradientOptimizer::System::advance( + const Values& current, const double alpha, + const VectorValues& gradient) const { + return current.retract(alpha * gradient); } GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 3dd6c7e05..26f596b06 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -29,8 +29,6 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimiz /* a class for the nonlinearConjugateGradient template */ class System { public: - typedef Values State; - typedef VectorValues Gradient; typedef NonlinearOptimizerParams Parameters; protected: @@ -40,10 +38,10 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimiz System(const NonlinearFactorGraph &graph) : graph_(graph) { } - double error(const State &state) const; - Gradient gradient(const State &state) const; - State advance(const State ¤t, const double alpha, - const Gradient &g) const; + double error(const Values &state) const; + VectorValues gradient(const Values &state) const; + Values advance(const Values ¤t, const double alpha, + const VectorValues &g) const; }; public: @@ -52,7 +50,7 @@ public: typedef NonlinearOptimizerParams Parameters; typedef std::shared_ptr shared_ptr; -protected: + protected: Parameters params_; const NonlinearOptimizerParams& _params() const override { @@ -62,8 +60,9 @@ protected: public: /// Constructor - NonlinearConjugateGradientOptimizer(const NonlinearFactorGraph& graph, - const Values& initialValues, const Parameters& params = Parameters()); + NonlinearConjugateGradientOptimizer( + const NonlinearFactorGraph &graph, const Values &initialValues, + const Parameters ¶ms = Parameters()); /// Destructor ~NonlinearConjugateGradientOptimizer() override { @@ -163,8 +162,8 @@ std::tuple nonlinearConjugateGradient(const S &system, } V currentValues = initial; - typename S::Gradient currentGradient = system.gradient(currentValues), - prevGradient, direction = currentGradient; + VectorValues currentGradient = system.gradient(currentValues), prevGradient, + direction = currentGradient; /* do one step of gradient descent */ V prevValues = currentValues; From ba6e2b8d7f925cbd5ddcc3d6a77897cc3cd4dc1c Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 10:06:37 -0400 Subject: [PATCH 08/20] remove System inside NonlinearConjugateGradientOptimizer --- .../NonlinearConjugateGradientOptimizer.cpp | 19 +- .../NonlinearConjugateGradientOptimizer.h | 284 +++++++++--------- 2 files changed, 145 insertions(+), 158 deletions(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index 403c72908..40bd76a88 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -48,26 +48,27 @@ NonlinearConjugateGradientOptimizer::NonlinearConjugateGradientOptimizer( new State(initialValues, graph.error(initialValues)))), params_(params) {} -double NonlinearConjugateGradientOptimizer::System::error( +double NonlinearConjugateGradientOptimizer::error( const Values& state) const { return graph_.error(state); } -VectorValues NonlinearConjugateGradientOptimizer::System::gradient( +VectorValues NonlinearConjugateGradientOptimizer::gradient( const Values& state) const { return gradientInPlace(graph_, state); } -Values NonlinearConjugateGradientOptimizer::System::advance( +Values NonlinearConjugateGradientOptimizer::advance( const Values& current, const double alpha, const VectorValues& gradient) const { return current.retract(alpha * gradient); } GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { - const auto [newValues, dummy] = nonlinearConjugateGradient( - System(graph_), state_->values, params_, true /* single iteration */); - state_.reset(new State(newValues, graph_.error(newValues), state_->iterations + 1)); + const auto [newValues, dummy] = nonlinearConjugateGradient( + state_->values, params_, true /* single iteration */); + state_.reset( + new State(newValues, graph_.error(newValues), state_->iterations + 1)); // NOTE(frank): We don't linearize this system, so we must return null here. return nullptr; @@ -75,10 +76,10 @@ GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { const Values& NonlinearConjugateGradientOptimizer::optimize() { // Optimize until convergence - System system(graph_); const auto [newValues, iterations] = - nonlinearConjugateGradient(system, state_->values, params_, false); - state_.reset(new State(std::move(newValues), graph_.error(newValues), iterations)); + nonlinearConjugateGradient(state_->values, params_, false); + state_.reset( + new State(std::move(newValues), graph_.error(newValues), iterations)); return state_->values; } diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 26f596b06..4a4eb22c5 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -24,28 +24,9 @@ namespace gtsam { /** An implementation of the nonlinear CG method using the template below */ -class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimizer { - - /* a class for the nonlinearConjugateGradient template */ - class System { - public: - typedef NonlinearOptimizerParams Parameters; - - protected: - const NonlinearFactorGraph &graph_; - - public: - System(const NonlinearFactorGraph &graph) : - graph_(graph) { - } - double error(const Values &state) const; - VectorValues gradient(const Values &state) const; - Values advance(const Values ¤t, const double alpha, - const VectorValues &g) const; - }; - -public: - +class GTSAM_EXPORT NonlinearConjugateGradientOptimizer + : public NonlinearOptimizer { + public: typedef NonlinearOptimizer Base; typedef NonlinearOptimizerParams Parameters; typedef std::shared_ptr shared_ptr; @@ -53,20 +34,23 @@ public: protected: Parameters params_; - const NonlinearOptimizerParams& _params() const override { - return params_; - } - -public: + const NonlinearOptimizerParams &_params() const override { return params_; } + public: /// Constructor - NonlinearConjugateGradientOptimizer( - const NonlinearFactorGraph &graph, const Values &initialValues, - const Parameters ¶ms = Parameters()); + NonlinearConjugateGradientOptimizer(const NonlinearFactorGraph &graph, + const Values &initialValues, + const Parameters ¶ms = Parameters()); /// Destructor - ~NonlinearConjugateGradientOptimizer() override { - } + ~NonlinearConjugateGradientOptimizer() override {} + + double error(const Values &state) const; + + VectorValues gradient(const Values &state) const; + + Values advance(const Values ¤t, const double alpha, + const VectorValues &g) const; /** * Perform a single iteration, returning GaussianFactorGraph corresponding to @@ -79,145 +63,147 @@ public: * variable assignments. */ const Values& optimize() override; -}; -/** Implement the golden-section line search algorithm */ -template -double lineSearch(const S &system, const V currentValues, const W &gradient) { + /** Implement the golden-section line search algorithm */ + template + double lineSearch(const V currentValues, const W &gradient) { + /* normalize it such that it becomes a unit vector */ + const double g = gradient.norm(); - /* normalize it such that it becomes a unit vector */ - const double g = gradient.norm(); + // perform the golden section search algorithm to decide the the optimal + // step size detail refer to + // http://en.wikipedia.org/wiki/Golden_section_search + const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi, + tau = 1e-5; + double minStep = -1.0 / g, maxStep = 0, + newStep = minStep + (maxStep - minStep) / (phi + 1.0); - // perform the golden section search algorithm to decide the the optimal step size - // detail refer to http://en.wikipedia.org/wiki/Golden_section_search - const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi, tau = - 1e-5; - double minStep = -1.0 / g, maxStep = 0, newStep = minStep - + (maxStep - minStep) / (phi + 1.0); + V newValues = advance(currentValues, newStep, gradient); + double newError = error(newValues); - V newValues = system.advance(currentValues, newStep, gradient); - double newError = system.error(newValues); + while (true) { + const bool flag = (maxStep - newStep > newStep - minStep) ? true : false; + const double testStep = flag ? newStep + resphi * (maxStep - newStep) + : newStep - resphi * (newStep - minStep); - while (true) { - const bool flag = (maxStep - newStep > newStep - minStep) ? true : false; - const double testStep = - flag ? newStep + resphi * (maxStep - newStep) : - newStep - resphi * (newStep - minStep); + if ((maxStep - minStep) < + tau * (std::abs(testStep) + std::abs(newStep))) { + return 0.5 * (minStep + maxStep); + } - if ((maxStep - minStep) - < tau * (std::abs(testStep) + std::abs(newStep))) { - return 0.5 * (minStep + maxStep); - } + const V testValues = advance(currentValues, testStep, gradient); + const double testError = error(testValues); - const V testValues = system.advance(currentValues, testStep, gradient); - const double testError = system.error(testValues); - - // update the working range - if (testError >= newError) { - if (flag) - maxStep = testStep; - else - minStep = testStep; - } else { - if (flag) { - minStep = newStep; - newStep = testStep; - newError = testError; + // update the working range + if (testError >= newError) { + if (flag) + maxStep = testStep; + else + minStep = testStep; } else { - maxStep = newStep; - newStep = testStep; - newError = testError; + if (flag) { + minStep = newStep; + newStep = testStep; + newError = testError; + } else { + maxStep = newStep; + newStep = testStep; + newError = testError; + } } } - } - return 0.0; -} - -/** - * Implement the nonlinear conjugate gradient method using the Polak-Ribiere formula suggested in - * http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method. - * - * The S (system) class requires three member functions: error(state), gradient(state) and - * advance(state, step-size, direction). The V class denotes the state or the solution. - * - * The last parameter is a switch between gradient-descent and conjugate gradient - */ -template -std::tuple nonlinearConjugateGradient(const S &system, - const V &initial, const NonlinearOptimizerParams ¶ms, - const bool singleIteration, const bool gradientDescent = false) { - - // GTSAM_CONCEPT_MANIFOLD_TYPE(V) - - size_t iteration = 0; - - // check if we're already close enough - double currentError = system.error(initial); - if (currentError <= params.errorTol) { - if (params.verbosity >= NonlinearOptimizerParams::ERROR) { - std::cout << "Exiting, as error = " << currentError << " < " - << params.errorTol << std::endl; - } - return {initial, iteration}; + return 0.0; } - V currentValues = initial; - VectorValues currentGradient = system.gradient(currentValues), prevGradient, - direction = currentGradient; + /** + * Implement the nonlinear conjugate gradient method using the Polak-Ribiere + * formula suggested in + * http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method. + * + * The V class + * denotes the state or the solution. + * + * The last parameter is a switch between gradient-descent and conjugate + * gradient + */ + template + std::tuple nonlinearConjugateGradient( + const V &initial, const NonlinearOptimizerParams ¶ms, + const bool singleIteration, const bool gradientDescent = false) { + // GTSAM_CONCEPT_MANIFOLD_TYPE(V) - /* do one step of gradient descent */ - V prevValues = currentValues; - double prevError = currentError; - double alpha = lineSearch(system, currentValues, direction); - currentValues = system.advance(prevValues, alpha, direction); - currentError = system.error(currentValues); + size_t iteration = 0; - // Maybe show output - if (params.verbosity >= NonlinearOptimizerParams::ERROR) - std::cout << "Initial error: " << currentError << std::endl; - - // Iterative loop - do { - if (gradientDescent == true) { - direction = system.gradient(currentValues); - } else { - prevGradient = currentGradient; - currentGradient = system.gradient(currentValues); - // Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1 - const double beta = std::max(0.0, - currentGradient.dot(currentGradient - prevGradient) - / prevGradient.dot(prevGradient)); - direction = currentGradient + (beta * direction); + // check if we're already close enough + double currentError = error(initial); + if (currentError <= params.errorTol) { + if (params.verbosity >= NonlinearOptimizerParams::ERROR) { + std::cout << "Exiting, as error = " << currentError << " < " + << params.errorTol << std::endl; + } + return {initial, iteration}; } - alpha = lineSearch(system, currentValues, direction); + V currentValues = initial; + VectorValues currentGradient = gradient(currentValues), prevGradient, + direction = currentGradient; - prevValues = currentValues; - prevError = currentError; - - currentValues = system.advance(prevValues, alpha, direction); - currentError = system.error(currentValues); - - // User hook: - if (params.iterationHook) - params.iterationHook(iteration, prevError, currentError); + /* do one step of gradient descent */ + V prevValues = currentValues; + double prevError = currentError; + double alpha = lineSearch(currentValues, direction); + currentValues = advance(prevValues, alpha, direction); + currentError = error(currentValues); // Maybe show output if (params.verbosity >= NonlinearOptimizerParams::ERROR) - std::cout << "iteration: " << iteration << ", currentError: " << currentError << std::endl; - } while (++iteration < params.maxIterations && !singleIteration - && !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, - params.errorTol, prevError, currentError, params.verbosity)); + std::cout << "Initial error: " << currentError << std::endl; - // Printing if verbose - if (params.verbosity >= NonlinearOptimizerParams::ERROR - && iteration >= params.maxIterations) - std::cout - << "nonlinearConjugateGradient: Terminating because reached maximum iterations" - << std::endl; + // Iterative loop + do { + if (gradientDescent == true) { + direction = gradient(currentValues); + } else { + prevGradient = currentGradient; + currentGradient = gradient(currentValues); + // Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1 + const double beta = + std::max(0.0, currentGradient.dot(currentGradient - prevGradient) / + prevGradient.dot(prevGradient)); + direction = currentGradient + (beta * direction); + } - return {currentValues, iteration}; -} + alpha = lineSearch(currentValues, direction); + + prevValues = currentValues; + prevError = currentError; + + currentValues = advance(prevValues, alpha, direction); + currentError = error(currentValues); + + // User hook: + if (params.iterationHook) + params.iterationHook(iteration, prevError, currentError); + + // Maybe show output + if (params.verbosity >= NonlinearOptimizerParams::ERROR) + std::cout << "iteration: " << iteration + << ", currentError: " << currentError << std::endl; + } while (++iteration < params.maxIterations && !singleIteration && + !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, + params.errorTol, prevError, currentError, + params.verbosity)); + + // Printing if verbose + if (params.verbosity >= NonlinearOptimizerParams::ERROR && + iteration >= params.maxIterations) + std::cout << "nonlinearConjugateGradient: Terminating because reached " + "maximum iterations" + << std::endl; + + return {currentValues, iteration}; + } +}; } // \ namespace gtsam From 1de138678f800ca2ceb641d6521ae20089864f45 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 12:29:07 -0400 Subject: [PATCH 09/20] remove templates --- .../NonlinearConjugateGradientOptimizer.cpp | 2 +- .../NonlinearConjugateGradientOptimizer.h | 36 ++++++++----------- 2 files changed, 15 insertions(+), 23 deletions(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index 40bd76a88..6190b041e 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -65,7 +65,7 @@ Values NonlinearConjugateGradientOptimizer::advance( } GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { - const auto [newValues, dummy] = nonlinearConjugateGradient( + const auto [newValues, dummy] = nonlinearConjugateGradient( state_->values, params_, true /* single iteration */); state_.reset( new State(newValues, graph_.error(newValues), state_->iterations + 1)); diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 4a4eb22c5..28de45894 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -52,21 +52,20 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer Values advance(const Values ¤t, const double alpha, const VectorValues &g) const; - /** - * Perform a single iteration, returning GaussianFactorGraph corresponding to + /** + * Perform a single iteration, returning GaussianFactorGraph corresponding to * the linearized factor graph. */ GaussianFactorGraph::shared_ptr iterate() override; - /** - * Optimize for the maximum-likelihood estimate, returning a the optimized + /** + * Optimize for the maximum-likelihood estimate, returning a the optimized * variable assignments. */ - const Values& optimize() override; + const Values &optimize() override; /** Implement the golden-section line search algorithm */ - template - double lineSearch(const V currentValues, const W &gradient) { + double lineSearch(const Values ¤tValues, const VectorValues &gradient) const { /* normalize it such that it becomes a unit vector */ const double g = gradient.norm(); @@ -78,7 +77,7 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer double minStep = -1.0 / g, maxStep = 0, newStep = minStep + (maxStep - minStep) / (phi + 1.0); - V newValues = advance(currentValues, newStep, gradient); + Values newValues = advance(currentValues, newStep, gradient); double newError = error(newValues); while (true) { @@ -91,7 +90,7 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer return 0.5 * (minStep + maxStep); } - const V testValues = advance(currentValues, testStep, gradient); + const Values testValues = advance(currentValues, testStep, gradient); const double testError = error(testValues); // update the working range @@ -120,18 +119,12 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer * formula suggested in * http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method. * - * The V class - * denotes the state or the solution. - * * The last parameter is a switch between gradient-descent and conjugate * gradient */ - template - std::tuple nonlinearConjugateGradient( - const V &initial, const NonlinearOptimizerParams ¶ms, - const bool singleIteration, const bool gradientDescent = false) { - // GTSAM_CONCEPT_MANIFOLD_TYPE(V) - + std::tuple nonlinearConjugateGradient( + const Values &initial, const NonlinearOptimizerParams ¶ms, + const bool singleIteration, const bool gradientDescent = false) const { size_t iteration = 0; // check if we're already close enough @@ -144,12 +137,12 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer return {initial, iteration}; } - V currentValues = initial; + Values currentValues = initial; VectorValues currentGradient = gradient(currentValues), prevGradient, direction = currentGradient; /* do one step of gradient descent */ - V prevValues = currentValues; + Values prevValues = currentValues; double prevError = currentError; double alpha = lineSearch(currentValues, direction); currentValues = advance(prevValues, alpha, direction); @@ -205,5 +198,4 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer } }; -} // \ namespace gtsam - +} // namespace gtsam From 69729d603beb76e300a9ea40d9109af456b1a5f5 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 12:35:11 -0400 Subject: [PATCH 10/20] move all implementation to cpp file --- .../NonlinearConjugateGradientOptimizer.cpp | 139 +++++++++++++++++- .../NonlinearConjugateGradientOptimizer.h | 123 +--------------- 2 files changed, 134 insertions(+), 128 deletions(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index 6190b041e..9fce1776b 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -16,11 +16,11 @@ * @date Jun 11, 2012 */ -#include -#include -#include #include #include +#include +#include +#include #include @@ -34,8 +34,8 @@ typedef internal::NonlinearOptimizerState State; * @param values a linearization point * Can be moved to NonlinearFactorGraph.h if desired */ -static VectorValues gradientInPlace(const NonlinearFactorGraph &nfg, - const Values &values) { +static VectorValues gradientInPlace(const NonlinearFactorGraph& nfg, + const Values& values) { // Linearize graph GaussianFactorGraph::shared_ptr linear = nfg.linearize(values); return linear->gradientAtZero(); @@ -48,8 +48,7 @@ NonlinearConjugateGradientOptimizer::NonlinearConjugateGradientOptimizer( new State(initialValues, graph.error(initialValues)))), params_(params) {} -double NonlinearConjugateGradientOptimizer::error( - const Values& state) const { +double NonlinearConjugateGradientOptimizer::error(const Values& state) const { return graph_.error(state); } @@ -83,5 +82,129 @@ const Values& NonlinearConjugateGradientOptimizer::optimize() { return state_->values; } -} /* namespace gtsam */ +double NonlinearConjugateGradientOptimizer::lineSearch( + const Values& currentValues, const VectorValues& gradient) const { + /* normalize it such that it becomes a unit vector */ + const double g = gradient.norm(); + // perform the golden section search algorithm to decide the the optimal + // step size detail refer to + // http://en.wikipedia.org/wiki/Golden_section_search + const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi, + tau = 1e-5; + double minStep = -1.0 / g, maxStep = 0, + newStep = minStep + (maxStep - minStep) / (phi + 1.0); + + Values newValues = advance(currentValues, newStep, gradient); + double newError = error(newValues); + + while (true) { + const bool flag = (maxStep - newStep > newStep - minStep) ? true : false; + const double testStep = flag ? newStep + resphi * (maxStep - newStep) + : newStep - resphi * (newStep - minStep); + + if ((maxStep - minStep) < tau * (std::abs(testStep) + std::abs(newStep))) { + return 0.5 * (minStep + maxStep); + } + + const Values testValues = advance(currentValues, testStep, gradient); + const double testError = error(testValues); + + // update the working range + if (testError >= newError) { + if (flag) + maxStep = testStep; + else + minStep = testStep; + } else { + if (flag) { + minStep = newStep; + newStep = testStep; + newError = testError; + } else { + maxStep = newStep; + newStep = testStep; + newError = testError; + } + } + } + return 0.0; +} + +std::tuple +NonlinearConjugateGradientOptimizer::nonlinearConjugateGradient( + const Values& initial, const NonlinearOptimizerParams& params, + const bool singleIteration, const bool gradientDescent) const { + size_t iteration = 0; + + // check if we're already close enough + double currentError = error(initial); + if (currentError <= params.errorTol) { + if (params.verbosity >= NonlinearOptimizerParams::ERROR) { + std::cout << "Exiting, as error = " << currentError << " < " + << params.errorTol << std::endl; + } + return {initial, iteration}; + } + + Values currentValues = initial; + VectorValues currentGradient = gradient(currentValues), prevGradient, + direction = currentGradient; + + /* do one step of gradient descent */ + Values prevValues = currentValues; + double prevError = currentError; + double alpha = lineSearch(currentValues, direction); + currentValues = advance(prevValues, alpha, direction); + currentError = error(currentValues); + + // Maybe show output + if (params.verbosity >= NonlinearOptimizerParams::ERROR) + std::cout << "Initial error: " << currentError << std::endl; + + // Iterative loop + do { + if (gradientDescent == true) { + direction = gradient(currentValues); + } else { + prevGradient = currentGradient; + currentGradient = gradient(currentValues); + // Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1 + const double beta = + std::max(0.0, currentGradient.dot(currentGradient - prevGradient) / + prevGradient.dot(prevGradient)); + direction = currentGradient + (beta * direction); + } + + alpha = lineSearch(currentValues, direction); + + prevValues = currentValues; + prevError = currentError; + + currentValues = advance(prevValues, alpha, direction); + currentError = error(currentValues); + + // User hook: + if (params.iterationHook) + params.iterationHook(iteration, prevError, currentError); + + // Maybe show output + if (params.verbosity >= NonlinearOptimizerParams::ERROR) + std::cout << "iteration: " << iteration + << ", currentError: " << currentError << std::endl; + } while (++iteration < params.maxIterations && !singleIteration && + !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, + params.errorTol, prevError, currentError, + params.verbosity)); + + // Printing if verbose + if (params.verbosity >= NonlinearOptimizerParams::ERROR && + iteration >= params.maxIterations) + std::cout << "nonlinearConjugateGradient: Terminating because reached " + "maximum iterations" + << std::endl; + + return {currentValues, iteration}; +} + +} /* namespace gtsam */ diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 28de45894..cdc0634d6 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -65,54 +65,8 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer const Values &optimize() override; /** Implement the golden-section line search algorithm */ - double lineSearch(const Values ¤tValues, const VectorValues &gradient) const { - /* normalize it such that it becomes a unit vector */ - const double g = gradient.norm(); - - // perform the golden section search algorithm to decide the the optimal - // step size detail refer to - // http://en.wikipedia.org/wiki/Golden_section_search - const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi, - tau = 1e-5; - double minStep = -1.0 / g, maxStep = 0, - newStep = minStep + (maxStep - minStep) / (phi + 1.0); - - Values newValues = advance(currentValues, newStep, gradient); - double newError = error(newValues); - - while (true) { - const bool flag = (maxStep - newStep > newStep - minStep) ? true : false; - const double testStep = flag ? newStep + resphi * (maxStep - newStep) - : newStep - resphi * (newStep - minStep); - - if ((maxStep - minStep) < - tau * (std::abs(testStep) + std::abs(newStep))) { - return 0.5 * (minStep + maxStep); - } - - const Values testValues = advance(currentValues, testStep, gradient); - const double testError = error(testValues); - - // update the working range - if (testError >= newError) { - if (flag) - maxStep = testStep; - else - minStep = testStep; - } else { - if (flag) { - minStep = newStep; - newStep = testStep; - newError = testError; - } else { - maxStep = newStep; - newStep = testStep; - newError = testError; - } - } - } - return 0.0; - } + double lineSearch(const Values ¤tValues, + const VectorValues &gradient) const; /** * Implement the nonlinear conjugate gradient method using the Polak-Ribiere @@ -124,78 +78,7 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer */ std::tuple nonlinearConjugateGradient( const Values &initial, const NonlinearOptimizerParams ¶ms, - const bool singleIteration, const bool gradientDescent = false) const { - size_t iteration = 0; - - // check if we're already close enough - double currentError = error(initial); - if (currentError <= params.errorTol) { - if (params.verbosity >= NonlinearOptimizerParams::ERROR) { - std::cout << "Exiting, as error = " << currentError << " < " - << params.errorTol << std::endl; - } - return {initial, iteration}; - } - - Values currentValues = initial; - VectorValues currentGradient = gradient(currentValues), prevGradient, - direction = currentGradient; - - /* do one step of gradient descent */ - Values prevValues = currentValues; - double prevError = currentError; - double alpha = lineSearch(currentValues, direction); - currentValues = advance(prevValues, alpha, direction); - currentError = error(currentValues); - - // Maybe show output - if (params.verbosity >= NonlinearOptimizerParams::ERROR) - std::cout << "Initial error: " << currentError << std::endl; - - // Iterative loop - do { - if (gradientDescent == true) { - direction = gradient(currentValues); - } else { - prevGradient = currentGradient; - currentGradient = gradient(currentValues); - // Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1 - const double beta = - std::max(0.0, currentGradient.dot(currentGradient - prevGradient) / - prevGradient.dot(prevGradient)); - direction = currentGradient + (beta * direction); - } - - alpha = lineSearch(currentValues, direction); - - prevValues = currentValues; - prevError = currentError; - - currentValues = advance(prevValues, alpha, direction); - currentError = error(currentValues); - - // User hook: - if (params.iterationHook) - params.iterationHook(iteration, prevError, currentError); - - // Maybe show output - if (params.verbosity >= NonlinearOptimizerParams::ERROR) - std::cout << "iteration: " << iteration - << ", currentError: " << currentError << std::endl; - } while (++iteration < params.maxIterations && !singleIteration && - !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, - params.errorTol, prevError, currentError, - params.verbosity)); - - // Printing if verbose - if (params.verbosity >= NonlinearOptimizerParams::ERROR && - iteration >= params.maxIterations) - std::cout << "nonlinearConjugateGradient: Terminating because reached " - "maximum iterations" - << std::endl; - - return {currentValues, iteration}; - } + const bool singleIteration, const bool gradientDescent = false) const; }; } // namespace gtsam From df1c008955cd90f7fe2ec79e5e71d4fc78072e6d Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 23:31:29 -0400 Subject: [PATCH 11/20] make ConjugateGradientParameters a public struct --- gtsam/linear/ConjugateGradientSolver.cpp | 14 ++--- gtsam/linear/ConjugateGradientSolver.h | 76 ++++++++++++++---------- gtsam/linear/iterative-inl.h | 17 +++--- 3 files changed, 61 insertions(+), 46 deletions(-) diff --git a/gtsam/linear/ConjugateGradientSolver.cpp b/gtsam/linear/ConjugateGradientSolver.cpp index 006e7d67d..b2725ea1e 100644 --- a/gtsam/linear/ConjugateGradientSolver.cpp +++ b/gtsam/linear/ConjugateGradientSolver.cpp @@ -26,13 +26,13 @@ namespace gtsam { /*****************************************************************************/ void ConjugateGradientParameters::print(ostream &os) const { - Base::print(os); - cout << "ConjugateGradientParameters" << endl - << "minIter: " << minIterations_ << endl - << "maxIter: " << maxIterations_ << endl - << "resetIter: " << reset_ << endl - << "eps_rel: " << epsilon_rel_ << endl - << "eps_abs: " << epsilon_abs_ << endl; + Base::print(os); + cout << "ConjugateGradientParameters" << endl + << "minIter: " << minIterations << endl + << "maxIter: " << maxIterations << endl + << "resetIter: " << reset << endl + << "eps_rel: " << epsilon_rel << endl + << "eps_abs: " << epsilon_abs << endl; } /*****************************************************************************/ diff --git a/gtsam/linear/ConjugateGradientSolver.h b/gtsam/linear/ConjugateGradientSolver.h index ccdab45da..ab804e3f4 100644 --- a/gtsam/linear/ConjugateGradientSolver.h +++ b/gtsam/linear/ConjugateGradientSolver.h @@ -26,38 +26,50 @@ namespace gtsam { /** * Parameters for the Conjugate Gradient method */ -class GTSAM_EXPORT ConjugateGradientParameters : public IterativeOptimizationParameters { - - public: +struct GTSAM_EXPORT ConjugateGradientParameters + : public IterativeOptimizationParameters { typedef IterativeOptimizationParameters Base; typedef std::shared_ptr shared_ptr; - protected: - size_t minIterations_; ///< minimum number of cg iterations - size_t maxIterations_; ///< maximum number of cg iterations - size_t reset_; ///< number of iterations before reset - double epsilon_rel_; ///< threshold for relative error decrease - double epsilon_abs_; ///< threshold for absolute error decrease + size_t minIterations; ///< minimum number of cg iterations + size_t maxIterations; ///< maximum number of cg iterations + size_t reset; ///< number of iterations before reset + double epsilon_rel; ///< threshold for relative error decrease + double epsilon_abs; ///< threshold for absolute error decrease - public: /* Matrix Operation Kernel */ enum BLASKernel { - GTSAM = 0, ///< Jacobian Factor Graph of GTSAM - } blas_kernel_ ; + GTSAM = 0, ///< Jacobian Factor Graph of GTSAM + } blas_kernel; ConjugateGradientParameters() - : minIterations_(1), maxIterations_(500), reset_(501), epsilon_rel_(1e-3), - epsilon_abs_(1e-3), blas_kernel_(GTSAM) {} + : minIterations(1), + maxIterations(500), + reset(501), + epsilon_rel(1e-3), + epsilon_abs(1e-3), + blas_kernel(GTSAM) {} - ConjugateGradientParameters(size_t minIterations, size_t maxIterations, size_t reset, - double epsilon_rel, double epsilon_abs, BLASKernel blas) - : minIterations_(minIterations), maxIterations_(maxIterations), reset_(reset), - epsilon_rel_(epsilon_rel), epsilon_abs_(epsilon_abs), blas_kernel_(blas) {} + ConjugateGradientParameters(size_t minIterations, size_t maxIterations, + size_t reset, double epsilon_rel, + double epsilon_abs, BLASKernel blas) + : minIterations(minIterations), + maxIterations(maxIterations), + reset(reset), + epsilon_rel(epsilon_rel), + epsilon_abs(epsilon_abs), + blas_kernel(blas) {} ConjugateGradientParameters(const ConjugateGradientParameters &p) - : Base(p), minIterations_(p.minIterations_), maxIterations_(p.maxIterations_), reset_(p.reset_), - epsilon_rel_(p.epsilon_rel_), epsilon_abs_(p.epsilon_abs_), blas_kernel_(GTSAM) {} + : Base(p), + minIterations(p.minIterations), + maxIterations(p.maxIterations), + reset(p.reset), + epsilon_rel(p.epsilon_rel), + epsilon_abs(p.epsilon_abs), + blas_kernel(GTSAM) {} +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V43 /* general interface */ inline size_t minIterations() const { return minIterations_; } inline size_t maxIterations() const { return maxIterations_; } @@ -79,6 +91,7 @@ class GTSAM_EXPORT ConjugateGradientParameters : public IterativeOptimizationPar inline void setEpsilon(double value) { epsilon_rel_ = value; } inline void setEpsilon_rel(double value) { epsilon_rel_ = value; } inline void setEpsilon_abs(double value) { epsilon_abs_ = value; } +#endif void print() const { Base::print(); } @@ -111,18 +124,19 @@ V preconditionedConjugateGradient(const S &system, const V &initial, double currentGamma = system.dot(residual, residual), prevGamma, alpha, beta; - const size_t iMaxIterations = parameters.maxIterations(), - iMinIterations = parameters.minIterations(), - iReset = parameters.reset() ; - const double threshold = std::max(parameters.epsilon_abs(), - parameters.epsilon() * parameters.epsilon() * currentGamma); + const size_t iMaxIterations = parameters.maxIterations, + iMinIterations = parameters.minIterations, + iReset = parameters.reset; + const double threshold = + std::max(parameters.epsilon_abs, + parameters.epsilon_rel * parameters.epsilon_rel * currentGamma); - if (parameters.verbosity() >= ConjugateGradientParameters::COMPLEXITY ) - std::cout << "[PCG] epsilon = " << parameters.epsilon() - << ", max = " << parameters.maxIterations() - << ", reset = " << parameters.reset() - << ", ||r0||^2 = " << currentGamma - << ", threshold = " << threshold << std::endl; + if (parameters.verbosity() >= ConjugateGradientParameters::COMPLEXITY) + std::cout << "[PCG] epsilon = " << parameters.epsilon_rel + << ", max = " << parameters.maxIterations + << ", reset = " << parameters.reset + << ", ||r0||^2 = " << currentGamma + << ", threshold = " << threshold << std::endl; size_t k; for ( k = 1 ; k <= iMaxIterations && (currentGamma > threshold || k <= iMinIterations) ; k++ ) { diff --git a/gtsam/linear/iterative-inl.h b/gtsam/linear/iterative-inl.h index c96d643b2..a8185b544 100644 --- a/gtsam/linear/iterative-inl.h +++ b/gtsam/linear/iterative-inl.h @@ -49,10 +49,12 @@ namespace gtsam { // init gamma and calculate threshold gamma = dot(g,g); - threshold = std::max(parameters_.epsilon_abs(), parameters_.epsilon() * parameters_.epsilon() * gamma); + threshold = + std::max(parameters_.epsilon_abs, + parameters_.epsilon_rel * parameters_.epsilon_rel * gamma); // Allocate and calculate A*d for first iteration - if (gamma > parameters_.epsilon_abs()) Ad = Ab * d; + if (gamma > parameters_.epsilon_abs) Ad = Ab * d; } /* ************************************************************************* */ @@ -79,13 +81,13 @@ namespace gtsam { // take a step, return true if converged bool step(const S& Ab, V& x) { - if ((++k) >= ((int)parameters_.maxIterations())) return true; + if ((++k) >= ((int)parameters_.maxIterations)) return true; //----------------------------------> double alpha = takeOptimalStep(x); // update gradient (or re-calculate at reset time) - if (k % parameters_.reset() == 0) g = Ab.gradient(x); + if (k % parameters_.reset == 0) g = Ab.gradient(x); // axpy(alpha, Ab ^ Ad, g); // g += alpha*(Ab^Ad) else Ab.transposeMultiplyAdd(alpha, Ad, g); @@ -126,11 +128,10 @@ namespace gtsam { CGState state(Ab, x, parameters, steepest); if (parameters.verbosity() != ConjugateGradientParameters::SILENT) - std::cout << "CG: epsilon = " << parameters.epsilon() - << ", maxIterations = " << parameters.maxIterations() + std::cout << "CG: epsilon = " << parameters.epsilon_rel + << ", maxIterations = " << parameters.maxIterations << ", ||g0||^2 = " << state.gamma - << ", threshold = " << state.threshold - << std::endl; + << ", threshold = " << state.threshold << std::endl; if ( state.gamma < state.threshold ) { if (parameters.verbosity() != ConjugateGradientParameters::SILENT) From 49a3b2e8a22e9bbe6b848bd0e885647c5d0e110a Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 23:43:41 -0400 Subject: [PATCH 12/20] make PCGSolverParameters a public struct --- gtsam/linear/PCGSolver.cpp | 8 ++------ gtsam/linear/PCGSolver.h | 17 +++-------------- gtsam/linear/linear.i | 20 ++++++++------------ 3 files changed, 13 insertions(+), 32 deletions(-) diff --git a/gtsam/linear/PCGSolver.cpp b/gtsam/linear/PCGSolver.cpp index 320c3e535..c0510961f 100644 --- a/gtsam/linear/PCGSolver.cpp +++ b/gtsam/linear/PCGSolver.cpp @@ -34,17 +34,13 @@ namespace gtsam { void PCGSolverParameters::print(ostream &os) const { Base::print(os); os << "PCGSolverParameters:" << endl; - preconditioner_->print(os); + preconditioner->print(os); } /*****************************************************************************/ PCGSolver::PCGSolver(const PCGSolverParameters &p) { parameters_ = p; - preconditioner_ = createPreconditioner(p.preconditioner()); -} - -void PCGSolverParameters::setPreconditionerParams(const std::shared_ptr preconditioner) { - preconditioner_ = preconditioner; + preconditioner_ = createPreconditioner(p.preconditioner); } void PCGSolverParameters::print(const std::string &s) const { diff --git a/gtsam/linear/PCGSolver.h b/gtsam/linear/PCGSolver.h index cb671f288..17cc2d3db 100644 --- a/gtsam/linear/PCGSolver.h +++ b/gtsam/linear/PCGSolver.h @@ -33,30 +33,19 @@ struct PreconditionerParameters; /** * Parameters for Preconditioned Conjugate Gradient solver. */ -struct GTSAM_EXPORT PCGSolverParameters: public ConjugateGradientParameters { - public: +struct GTSAM_EXPORT PCGSolverParameters : public ConjugateGradientParameters { typedef ConjugateGradientParameters Base; typedef std::shared_ptr shared_ptr; -protected: - std::shared_ptr preconditioner_; + std::shared_ptr preconditioner; -public: PCGSolverParameters() {} PCGSolverParameters( const std::shared_ptr &preconditioner) - : preconditioner_(preconditioner) {} + : preconditioner(preconditioner) {} void print(std::ostream &os) const override; - - const std::shared_ptr preconditioner() const { - return preconditioner_; - } - - void setPreconditionerParams( - const std::shared_ptr preconditioner); - void print(const std::string &s) const; }; diff --git a/gtsam/linear/linear.i b/gtsam/linear/linear.i index af6c2ee22..eecc0192c 100644 --- a/gtsam/linear/linear.i +++ b/gtsam/linear/linear.i @@ -710,17 +710,11 @@ virtual class IterativeOptimizationParameters { #include virtual class ConjugateGradientParameters : gtsam::IterativeOptimizationParameters { ConjugateGradientParameters(); - int getMinIterations() const ; - int getMaxIterations() const ; - int getReset() const; - double getEpsilon_rel() const; - double getEpsilon_abs() const; - - void setMinIterations(int value); - void setMaxIterations(int value); - void setReset(int value); - void setEpsilon_rel(double value); - void setEpsilon_abs(double value); + int minIterations; + int maxIterations; + int reset; + double epsilon_rel; + double epsilon_abs; }; #include @@ -739,8 +733,10 @@ virtual class BlockJacobiPreconditionerParameters : gtsam::PreconditionerParamet #include virtual class PCGSolverParameters : gtsam::ConjugateGradientParameters { PCGSolverParameters(); + PCGSolverParameters(gtsam::PreconditionerParameters* preconditioner); void print(string s = ""); - void setPreconditionerParams(gtsam::PreconditionerParameters* preconditioner); + + gtsam::PreconditionerParameters* preconditioner; }; #include From 94a95b69cba31dc6d8317cca370da365a44d55aa Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 23:43:52 -0400 Subject: [PATCH 13/20] update tests --- python/gtsam/tests/test_NonlinearOptimizer.py | 2 +- tests/testIterative.cpp | 12 ++++---- tests/testPreconditioner.cpp | 28 +++++++++---------- timing/timeShonanFactor.cpp | 2 +- 4 files changed, 22 insertions(+), 22 deletions(-) diff --git a/python/gtsam/tests/test_NonlinearOptimizer.py b/python/gtsam/tests/test_NonlinearOptimizer.py index 1b4209509..2b276425f 100644 --- a/python/gtsam/tests/test_NonlinearOptimizer.py +++ b/python/gtsam/tests/test_NonlinearOptimizer.py @@ -69,7 +69,7 @@ class TestScenario(GtsamTestCase): lmParams = LevenbergMarquardtParams.CeresDefaults() lmParams.setLinearSolverType("ITERATIVE") cgParams = PCGSolverParameters() - cgParams.setPreconditionerParams(DummyPreconditionerParameters()) + cgParams.preconditioner = DummyPreconditionerParameters() lmParams.setIterativeParams(cgParams) actual = LevenbergMarquardtOptimizer(self.fg, self.initial_values, lmParams).optimize() self.assertAlmostEqual(0, self.fg.error(actual)) diff --git a/tests/testIterative.cpp b/tests/testIterative.cpp index 6d9918b76..4df598f80 100644 --- a/tests/testIterative.cpp +++ b/tests/testIterative.cpp @@ -95,9 +95,9 @@ TEST( Iterative, conjugateGradientDescent_hard_constraint ) VectorValues zeros = config.zeroVectors(); ConjugateGradientParameters parameters; - parameters.setEpsilon_abs(1e-3); - parameters.setEpsilon_rel(1e-5); - parameters.setMaxIterations(100); + parameters.epsilon_abs = 1e-3; + parameters.epsilon_rel = 1e-5; + parameters.maxIterations = 100; VectorValues actual = conjugateGradientDescent(*fg, zeros, parameters); VectorValues expected; @@ -122,9 +122,9 @@ TEST( Iterative, conjugateGradientDescent_soft_constraint ) VectorValues zeros = config.zeroVectors(); ConjugateGradientParameters parameters; - parameters.setEpsilon_abs(1e-3); - parameters.setEpsilon_rel(1e-5); - parameters.setMaxIterations(100); + parameters.epsilon_abs = 1e-3; + parameters.epsilon_rel = 1e-5; + parameters.maxIterations = 100; VectorValues actual = conjugateGradientDescent(*fg, zeros, parameters); VectorValues expected; diff --git a/tests/testPreconditioner.cpp b/tests/testPreconditioner.cpp index 9eff3e7f0..2cea40c3d 100644 --- a/tests/testPreconditioner.cpp +++ b/tests/testPreconditioner.cpp @@ -54,21 +54,21 @@ TEST( PCGsolver, verySimpleLinearSystem) { // Solve the system using Preconditioned Conjugate Gradient solver // Common PCG parameters gtsam::PCGSolverParameters::shared_ptr pcg = std::make_shared(); - pcg->setMaxIterations(500); - pcg->setEpsilon_abs(0.0); - pcg->setEpsilon_rel(0.0); + pcg->maxIterations = 500; + pcg->epsilon_abs = 0.0; + pcg->epsilon_rel = 0.0; //pcg->setVerbosity("ERROR"); // With Dummy preconditioner - pcg->setPreconditionerParams( - std::make_shared()); + pcg->preconditioner = + std::make_shared(); VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(exactSolution, deltaPCGDummy, 1e-7)); //deltaPCGDummy.print("PCG Dummy"); // With Block-Jacobi preconditioner - pcg->setPreconditionerParams( - std::make_shared()); + pcg->preconditioner = + std::make_shared(); // It takes more than 1000 iterations for this test pcg->setMaxIterations(1500); VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG); @@ -107,21 +107,21 @@ TEST(PCGSolver, simpleLinearSystem) { // Solve the system using Preconditioned Conjugate Gradient solver // Common PCG parameters gtsam::PCGSolverParameters::shared_ptr pcg = std::make_shared(); - pcg->setMaxIterations(500); - pcg->setEpsilon_abs(0.0); - pcg->setEpsilon_rel(0.0); + pcg->maxIterations = 500; + pcg->epsilon_abs = 0.0; + pcg->epsilon_rel = 0.0; //pcg->setVerbosity("ERROR"); // With Dummy preconditioner - pcg->setPreconditionerParams( - std::make_shared()); + pcg->preconditioner = + std::make_shared(); VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(expectedSolution, deltaPCGDummy, 1e-5)); //deltaPCGDummy.print("PCG Dummy"); // With Block-Jacobi preconditioner - pcg->setPreconditionerParams( - std::make_shared()); + pcg->preconditioner = + std::make_shared(); VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(expectedSolution, deltaPCGJacobi, 1e-5)); //deltaPCGJacobi.print("PCG Jacobi"); diff --git a/timing/timeShonanFactor.cpp b/timing/timeShonanFactor.cpp index fc97d01a8..3cdb596cd 100644 --- a/timing/timeShonanFactor.cpp +++ b/timing/timeShonanFactor.cpp @@ -83,7 +83,7 @@ int main(int argc, char* argv[]) { // params.setVerbosityLM("SUMMARY"); // params.linearSolverType = LevenbergMarquardtParams::Iterative; // auto pcg = std::make_shared(); - // pcg->preconditioner_ = + // pcg->preconditioner = // std::make_shared(); // std::make_shared(); // params.iterativeParams = pcg; From d2ca1ef285230a7263e4966a94aedf700bf9e9ac Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 23:43:56 -0400 Subject: [PATCH 14/20] updat example --- examples/SFMExample_SmartFactorPCG.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/examples/SFMExample_SmartFactorPCG.cpp b/examples/SFMExample_SmartFactorPCG.cpp index a913d32ad..b64fcd048 100644 --- a/examples/SFMExample_SmartFactorPCG.cpp +++ b/examples/SFMExample_SmartFactorPCG.cpp @@ -94,11 +94,10 @@ int main(int argc, char* argv[]) { parameters.maxIterations = 500; PCGSolverParameters::shared_ptr pcg = std::make_shared(); - pcg->preconditioner_ = - std::make_shared(); + pcg->preconditioner = std::make_shared(); // Following is crucial: - pcg->setEpsilon_abs(1e-10); - pcg->setEpsilon_rel(1e-10); + pcg->epsilon_abs = 1e-10; + pcg->epsilon_rel = 1e-10; parameters.iterativeParams = pcg; LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, parameters); From 5c0171b69cfb9d3884328d1d6f17368d6c2acc8c Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 23:56:43 -0400 Subject: [PATCH 15/20] re-add typedefs --- .../NonlinearConjugateGradientOptimizer.cpp | 18 +++++++++++------- .../NonlinearConjugateGradientOptimizer.h | 14 ++++++++------ 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index 403c72908..211acc78d 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -49,19 +49,23 @@ NonlinearConjugateGradientOptimizer::NonlinearConjugateGradientOptimizer( params_(params) {} double NonlinearConjugateGradientOptimizer::System::error( - const Values& state) const { + const State& state) const { return graph_.error(state); } -VectorValues NonlinearConjugateGradientOptimizer::System::gradient( - const Values& state) const { +NonlinearConjugateGradientOptimizer::System::Gradient +NonlinearConjugateGradientOptimizer::System::gradient( + const State& state) const { return gradientInPlace(graph_, state); } -Values NonlinearConjugateGradientOptimizer::System::advance( - const Values& current, const double alpha, - const VectorValues& gradient) const { - return current.retract(alpha * gradient); +NonlinearConjugateGradientOptimizer::System::State +NonlinearConjugateGradientOptimizer::System::advance(const State& current, + const double alpha, + const Gradient& g) const { + Gradient step = g; + step *= alpha; + return current.retract(step); } GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 26f596b06..f662403dc 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -29,6 +29,8 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimiz /* a class for the nonlinearConjugateGradient template */ class System { public: + typedef Values State; + typedef VectorValues Gradient; typedef NonlinearOptimizerParams Parameters; protected: @@ -38,10 +40,10 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimiz System(const NonlinearFactorGraph &graph) : graph_(graph) { } - double error(const Values &state) const; - VectorValues gradient(const Values &state) const; - Values advance(const Values ¤t, const double alpha, - const VectorValues &g) const; + double error(const State &state) const; + Gradient gradient(const State &state) const; + State advance(const State ¤t, const double alpha, + const Gradient &g) const; }; public: @@ -162,8 +164,8 @@ std::tuple nonlinearConjugateGradient(const S &system, } V currentValues = initial; - VectorValues currentGradient = system.gradient(currentValues), prevGradient, - direction = currentGradient; + typename S::Gradient currentGradient = system.gradient(currentValues), + prevGradient, direction = currentGradient; /* do one step of gradient descent */ V prevValues = currentValues; From 9931e29108d6b50d2382696c55b85216759f8c3c Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 23:56:52 -0400 Subject: [PATCH 16/20] fix tests --- tests/testPreconditioner.cpp | 2 +- tests/testSubgraphSolver.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/testPreconditioner.cpp b/tests/testPreconditioner.cpp index 2cea40c3d..967584b7d 100644 --- a/tests/testPreconditioner.cpp +++ b/tests/testPreconditioner.cpp @@ -70,7 +70,7 @@ TEST( PCGsolver, verySimpleLinearSystem) { pcg->preconditioner = std::make_shared(); // It takes more than 1000 iterations for this test - pcg->setMaxIterations(1500); + pcg->maxIterations = 1500; VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(exactSolution, deltaPCGJacobi, 1e-5)); diff --git a/tests/testSubgraphSolver.cpp b/tests/testSubgraphSolver.cpp index 026f3c919..e31ce23b5 100644 --- a/tests/testSubgraphSolver.cpp +++ b/tests/testSubgraphSolver.cpp @@ -48,7 +48,7 @@ static double error(const GaussianFactorGraph& fg, const VectorValues& x) { TEST( SubgraphSolver, Parameters ) { LONGS_EQUAL(SubgraphSolverParameters::SILENT, kParameters.verbosity()); - LONGS_EQUAL(500, kParameters.maxIterations()); + LONGS_EQUAL(500, kParameters.maxIterations); } /* ************************************************************************* */ From 38b5dd5eb04aee5a4a90108b1a482b24ae073754 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 16 Oct 2024 09:17:22 -0400 Subject: [PATCH 17/20] update deprecated methods --- gtsam/linear/ConjugateGradientSolver.h | 36 +++++++++++++------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/gtsam/linear/ConjugateGradientSolver.h b/gtsam/linear/ConjugateGradientSolver.h index ab804e3f4..9cdb41382 100644 --- a/gtsam/linear/ConjugateGradientSolver.h +++ b/gtsam/linear/ConjugateGradientSolver.h @@ -71,26 +71,26 @@ struct GTSAM_EXPORT ConjugateGradientParameters #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V43 /* general interface */ - inline size_t minIterations() const { return minIterations_; } - inline size_t maxIterations() const { return maxIterations_; } - inline size_t reset() const { return reset_; } - inline double epsilon() const { return epsilon_rel_; } - inline double epsilon_rel() const { return epsilon_rel_; } - inline double epsilon_abs() const { return epsilon_abs_; } + inline size_t minIterations() const { return minIterations; } + inline size_t maxIterations() const { return maxIterations; } + inline size_t reset() const { return reset; } + inline double epsilon() const { return epsilon_rel; } + inline double epsilon_rel() const { return epsilon_rel; } + inline double epsilon_abs() const { return epsilon_abs; } - inline size_t getMinIterations() const { return minIterations_; } - inline size_t getMaxIterations() const { return maxIterations_; } - inline size_t getReset() const { return reset_; } - inline double getEpsilon() const { return epsilon_rel_; } - inline double getEpsilon_rel() const { return epsilon_rel_; } - inline double getEpsilon_abs() const { return epsilon_abs_; } + inline size_t getMinIterations() const { return minIterations; } + inline size_t getMaxIterations() const { return maxIterations; } + inline size_t getReset() const { return reset; } + inline double getEpsilon() const { return epsilon_rel; } + inline double getEpsilon_rel() const { return epsilon_rel; } + inline double getEpsilon_abs() const { return epsilon_abs; } - inline void setMinIterations(size_t value) { minIterations_ = value; } - inline void setMaxIterations(size_t value) { maxIterations_ = value; } - inline void setReset(size_t value) { reset_ = value; } - inline void setEpsilon(double value) { epsilon_rel_ = value; } - inline void setEpsilon_rel(double value) { epsilon_rel_ = value; } - inline void setEpsilon_abs(double value) { epsilon_abs_ = value; } + inline void setMinIterations(size_t value) { minIterations = value; } + inline void setMaxIterations(size_t value) { maxIterations = value; } + inline void setReset(size_t value) { reset = value; } + inline void setEpsilon(double value) { epsilon_rel = value; } + inline void setEpsilon_rel(double value) { epsilon_rel = value; } + inline void setEpsilon_abs(double value) { epsilon_abs = value; } #endif From 7dd6d977fe5695aeb3e0dc7aa72b31b12013ba78 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 16 Oct 2024 09:44:47 -0400 Subject: [PATCH 18/20] undo template removal --- .../NonlinearConjugateGradientOptimizer.cpp | 139 +-------------- .../NonlinearConjugateGradientOptimizer.h | 162 +++++++++++++++++- 2 files changed, 163 insertions(+), 138 deletions(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index 9fce1776b..dc44deeb7 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -48,24 +48,25 @@ NonlinearConjugateGradientOptimizer::NonlinearConjugateGradientOptimizer( new State(initialValues, graph.error(initialValues)))), params_(params) {} -double NonlinearConjugateGradientOptimizer::error(const Values& state) const { +double NonlinearConjugateGradientOptimizer::System::error( + const Values& state) const { return graph_.error(state); } -VectorValues NonlinearConjugateGradientOptimizer::gradient( +VectorValues NonlinearConjugateGradientOptimizer::System::gradient( const Values& state) const { return gradientInPlace(graph_, state); } -Values NonlinearConjugateGradientOptimizer::advance( +Values NonlinearConjugateGradientOptimizer::System::advance( const Values& current, const double alpha, const VectorValues& gradient) const { return current.retract(alpha * gradient); } GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { - const auto [newValues, dummy] = nonlinearConjugateGradient( - state_->values, params_, true /* single iteration */); + const auto [newValues, dummy] = nonlinearConjugateGradient( + System(graph_), state_->values, params_, true /* single iteration */); state_.reset( new State(newValues, graph_.error(newValues), state_->iterations + 1)); @@ -75,136 +76,12 @@ GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { const Values& NonlinearConjugateGradientOptimizer::optimize() { // Optimize until convergence + System system(graph_); const auto [newValues, iterations] = - nonlinearConjugateGradient(state_->values, params_, false); + nonlinearConjugateGradient(system, state_->values, params_, false); state_.reset( new State(std::move(newValues), graph_.error(newValues), iterations)); return state_->values; } -double NonlinearConjugateGradientOptimizer::lineSearch( - const Values& currentValues, const VectorValues& gradient) const { - /* normalize it such that it becomes a unit vector */ - const double g = gradient.norm(); - - // perform the golden section search algorithm to decide the the optimal - // step size detail refer to - // http://en.wikipedia.org/wiki/Golden_section_search - const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi, - tau = 1e-5; - double minStep = -1.0 / g, maxStep = 0, - newStep = minStep + (maxStep - minStep) / (phi + 1.0); - - Values newValues = advance(currentValues, newStep, gradient); - double newError = error(newValues); - - while (true) { - const bool flag = (maxStep - newStep > newStep - minStep) ? true : false; - const double testStep = flag ? newStep + resphi * (maxStep - newStep) - : newStep - resphi * (newStep - minStep); - - if ((maxStep - minStep) < tau * (std::abs(testStep) + std::abs(newStep))) { - return 0.5 * (minStep + maxStep); - } - - const Values testValues = advance(currentValues, testStep, gradient); - const double testError = error(testValues); - - // update the working range - if (testError >= newError) { - if (flag) - maxStep = testStep; - else - minStep = testStep; - } else { - if (flag) { - minStep = newStep; - newStep = testStep; - newError = testError; - } else { - maxStep = newStep; - newStep = testStep; - newError = testError; - } - } - } - return 0.0; -} - -std::tuple -NonlinearConjugateGradientOptimizer::nonlinearConjugateGradient( - const Values& initial, const NonlinearOptimizerParams& params, - const bool singleIteration, const bool gradientDescent) const { - size_t iteration = 0; - - // check if we're already close enough - double currentError = error(initial); - if (currentError <= params.errorTol) { - if (params.verbosity >= NonlinearOptimizerParams::ERROR) { - std::cout << "Exiting, as error = " << currentError << " < " - << params.errorTol << std::endl; - } - return {initial, iteration}; - } - - Values currentValues = initial; - VectorValues currentGradient = gradient(currentValues), prevGradient, - direction = currentGradient; - - /* do one step of gradient descent */ - Values prevValues = currentValues; - double prevError = currentError; - double alpha = lineSearch(currentValues, direction); - currentValues = advance(prevValues, alpha, direction); - currentError = error(currentValues); - - // Maybe show output - if (params.verbosity >= NonlinearOptimizerParams::ERROR) - std::cout << "Initial error: " << currentError << std::endl; - - // Iterative loop - do { - if (gradientDescent == true) { - direction = gradient(currentValues); - } else { - prevGradient = currentGradient; - currentGradient = gradient(currentValues); - // Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1 - const double beta = - std::max(0.0, currentGradient.dot(currentGradient - prevGradient) / - prevGradient.dot(prevGradient)); - direction = currentGradient + (beta * direction); - } - - alpha = lineSearch(currentValues, direction); - - prevValues = currentValues; - prevError = currentError; - - currentValues = advance(prevValues, alpha, direction); - currentError = error(currentValues); - - // User hook: - if (params.iterationHook) - params.iterationHook(iteration, prevError, currentError); - - // Maybe show output - if (params.verbosity >= NonlinearOptimizerParams::ERROR) - std::cout << "iteration: " << iteration - << ", currentError: " << currentError << std::endl; - } while (++iteration < params.maxIterations && !singleIteration && - !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, - params.errorTol, prevError, currentError, - params.verbosity)); - - // Printing if verbose - if (params.verbosity >= NonlinearOptimizerParams::ERROR && - iteration >= params.maxIterations) - std::cout << "nonlinearConjugateGradient: Terminating because reached " - "maximum iterations" - << std::endl; - - return {currentValues, iteration}; -} - } /* namespace gtsam */ diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index cdc0634d6..2c4628c29 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -26,6 +26,22 @@ namespace gtsam { /** An implementation of the nonlinear CG method using the template below */ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimizer { + /* a class for the nonlinearConjugateGradient template */ + class System { + public: + typedef NonlinearOptimizerParams Parameters; + + protected: + const NonlinearFactorGraph &graph_; + + public: + System(const NonlinearFactorGraph &graph) : graph_(graph) {} + double error(const Values &state) const; + VectorValues gradient(const Values &state) const; + Values advance(const Values ¤t, const double alpha, + const VectorValues &g) const; + }; + public: typedef NonlinearOptimizer Base; typedef NonlinearOptimizerParams Parameters; @@ -45,13 +61,6 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer /// Destructor ~NonlinearConjugateGradientOptimizer() override {} - double error(const Values &state) const; - - VectorValues gradient(const Values &state) const; - - Values advance(const Values ¤t, const double alpha, - const VectorValues &g) const; - /** * Perform a single iteration, returning GaussianFactorGraph corresponding to * the linearized factor graph. @@ -81,4 +90,143 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer const bool singleIteration, const bool gradientDescent = false) const; }; +/** Implement the golden-section line search algorithm */ +template +double lineSearch(const S &system, const V currentValues, const W &gradient) { + /* normalize it such that it becomes a unit vector */ + const double g = gradient.norm(); + + // perform the golden section search algorithm to decide the the optimal step + // size detail refer to http://en.wikipedia.org/wiki/Golden_section_search + const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi, + tau = 1e-5; + double minStep = -1.0 / g, maxStep = 0, + newStep = minStep + (maxStep - minStep) / (phi + 1.0); + + V newValues = system.advance(currentValues, newStep, gradient); + double newError = system.error(newValues); + + while (true) { + const bool flag = (maxStep - newStep > newStep - minStep) ? true : false; + const double testStep = flag ? newStep + resphi * (maxStep - newStep) + : newStep - resphi * (newStep - minStep); + + if ((maxStep - minStep) < tau * (std::abs(testStep) + std::abs(newStep))) { + return 0.5 * (minStep + maxStep); + } + + const V testValues = system.advance(currentValues, testStep, gradient); + const double testError = system.error(testValues); + + // update the working range + if (testError >= newError) { + if (flag) + maxStep = testStep; + else + minStep = testStep; + } else { + if (flag) { + minStep = newStep; + newStep = testStep; + newError = testError; + } else { + maxStep = newStep; + newStep = testStep; + newError = testError; + } + } + } + return 0.0; +} + +/** + * Implement the nonlinear conjugate gradient method using the Polak-Ribiere + * formula suggested in + * http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method. + * + * The S (system) class requires three member functions: error(state), + * gradient(state) and advance(state, step-size, direction). The V class denotes + * the state or the solution. + * + * The last parameter is a switch between gradient-descent and conjugate + * gradient + */ +template +std::tuple nonlinearConjugateGradient( + const S &system, const V &initial, const NonlinearOptimizerParams ¶ms, + const bool singleIteration, const bool gradientDescent = false) { + // GTSAM_CONCEPT_MANIFOLD_TYPE(V) + + size_t iteration = 0; + + // check if we're already close enough + double currentError = system.error(initial); + if (currentError <= params.errorTol) { + if (params.verbosity >= NonlinearOptimizerParams::ERROR) { + std::cout << "Exiting, as error = " << currentError << " < " + << params.errorTol << std::endl; + } + return {initial, iteration}; + } + + V currentValues = initial; + VectorValues currentGradient = system.gradient(currentValues), prevGradient, + direction = currentGradient; + + /* do one step of gradient descent */ + V prevValues = currentValues; + double prevError = currentError; + double alpha = lineSearch(system, currentValues, direction); + currentValues = system.advance(prevValues, alpha, direction); + currentError = system.error(currentValues); + + // Maybe show output + if (params.verbosity >= NonlinearOptimizerParams::ERROR) + std::cout << "Initial error: " << currentError << std::endl; + + // Iterative loop + do { + if (gradientDescent == true) { + direction = system.gradient(currentValues); + } else { + prevGradient = currentGradient; + currentGradient = system.gradient(currentValues); + // Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1 + const double beta = + std::max(0.0, currentGradient.dot(currentGradient - prevGradient) / + prevGradient.dot(prevGradient)); + direction = currentGradient + (beta * direction); + } + + alpha = lineSearch(system, currentValues, direction); + + prevValues = currentValues; + prevError = currentError; + + currentValues = system.advance(prevValues, alpha, direction); + currentError = system.error(currentValues); + + // User hook: + if (params.iterationHook) + params.iterationHook(iteration, prevError, currentError); + + // Maybe show output + if (params.verbosity >= NonlinearOptimizerParams::ERROR) + std::cout << "iteration: " << iteration + << ", currentError: " << currentError << std::endl; + } while (++iteration < params.maxIterations && !singleIteration && + !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, + params.errorTol, prevError, currentError, + params.verbosity)); + + // Printing if verbose + if (params.verbosity >= NonlinearOptimizerParams::ERROR && + iteration >= params.maxIterations) + std::cout << "nonlinearConjugateGradient: Terminating because reached " + "maximum iterations" + << std::endl; + + return {currentValues, iteration}; +} + } // namespace gtsam From f5f7590e78e13d218759b21a64459972d0c7c728 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 16 Oct 2024 09:47:23 -0400 Subject: [PATCH 19/20] kill methods with same name in favor of using variables directly --- gtsam/linear/ConjugateGradientSolver.h | 8 -------- 1 file changed, 8 deletions(-) diff --git a/gtsam/linear/ConjugateGradientSolver.h b/gtsam/linear/ConjugateGradientSolver.h index 9cdb41382..46d49ca4f 100644 --- a/gtsam/linear/ConjugateGradientSolver.h +++ b/gtsam/linear/ConjugateGradientSolver.h @@ -70,14 +70,6 @@ struct GTSAM_EXPORT ConjugateGradientParameters blas_kernel(GTSAM) {} #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V43 - /* general interface */ - inline size_t minIterations() const { return minIterations; } - inline size_t maxIterations() const { return maxIterations; } - inline size_t reset() const { return reset; } - inline double epsilon() const { return epsilon_rel; } - inline double epsilon_rel() const { return epsilon_rel; } - inline double epsilon_abs() const { return epsilon_abs; } - inline size_t getMinIterations() const { return minIterations; } inline size_t getMaxIterations() const { return maxIterations; } inline size_t getReset() const { return reset; } From d883e16cd1c6fe770d81ee188349a946c0254bc3 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 16 Oct 2024 10:20:45 -0400 Subject: [PATCH 20/20] remove extra methods --- .../NonlinearConjugateGradientOptimizer.h | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 1858fbc2e..369291fae 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -74,22 +74,6 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer * variable assignments. */ const Values &optimize() override; - - /** Implement the golden-section line search algorithm */ - double lineSearch(const Values ¤tValues, - const VectorValues &gradient) const; - - /** - * Implement the nonlinear conjugate gradient method using the Polak-Ribiere - * formula suggested in - * http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method. - * - * The last parameter is a switch between gradient-descent and conjugate - * gradient - */ - std::tuple nonlinearConjugateGradient( - const Values &initial, const NonlinearOptimizerParams ¶ms, - const bool singleIteration, const bool gradientDescent = false) const; }; /** Implement the golden-section line search algorithm */