diff --git a/gtsam/linear/ConjugateGradientSolver.h b/gtsam/linear/ConjugateGradientSolver.h index 06b2aa178..3a42b8842 100644 --- a/gtsam/linear/ConjugateGradientSolver.h +++ b/gtsam/linear/ConjugateGradientSolver.h @@ -19,7 +19,9 @@ namespace gtsam { * parameters for the conjugate gradient method */ -struct ConjugateGradientParameters : public IterativeOptimizationParameters { +class ConjugateGradientParameters : public IterativeOptimizationParameters { + +public: typedef IterativeOptimizationParameters Base; typedef boost::shared_ptr shared_ptr; diff --git a/gtsam/linear/SubgraphSolver.cpp b/gtsam/linear/SubgraphSolver.cpp index c91365f19..891585ee5 100644 --- a/gtsam/linear/SubgraphSolver.cpp +++ b/gtsam/linear/SubgraphSolver.cpp @@ -13,14 +13,13 @@ #include #include #include +#include #include #include #include #include #include -#include - #include #include diff --git a/gtsam/linear/SubgraphSolver.h b/gtsam/linear/SubgraphSolver.h index ec9a5f19a..debb373f2 100644 --- a/gtsam/linear/SubgraphSolver.h +++ b/gtsam/linear/SubgraphSolver.h @@ -19,7 +19,8 @@ namespace gtsam { -struct SubgraphSolverParameters : public ConjugateGradientParameters { +class SubgraphSolverParameters : public ConjugateGradientParameters { +public: typedef ConjugateGradientParameters Base; SubgraphSolverParameters() : Base() {} }; diff --git a/gtsam/linear/VectorValues.h b/gtsam/linear/VectorValues.h index f3ef57418..3531989c9 100644 --- a/gtsam/linear/VectorValues.h +++ b/gtsam/linear/VectorValues.h @@ -262,6 +262,11 @@ namespace gtsam { return gtsam::dot(this->values_, V.values_); } + /** Vector L2 norm */ + inline double norm() const { + return this->vector().norm(); + } + /** * + operator does element-wise addition. Both VectorValues must have the * same structure (checked when NDEBUG is not defined). diff --git a/gtsam_unstable/linear/iterative-inl.h b/gtsam/linear/iterative-inl.h similarity index 99% rename from gtsam_unstable/linear/iterative-inl.h rename to gtsam/linear/iterative-inl.h index f384b7504..3c9cffc7b 100644 --- a/gtsam_unstable/linear/iterative-inl.h +++ b/gtsam/linear/iterative-inl.h @@ -18,7 +18,7 @@ #pragma once -#include +#include #include #include diff --git a/gtsam_unstable/linear/iterative.cpp b/gtsam/linear/iterative.cpp similarity index 98% rename from gtsam_unstable/linear/iterative.cpp rename to gtsam/linear/iterative.cpp index eb1d898dd..6dadb2d72 100644 --- a/gtsam_unstable/linear/iterative.cpp +++ b/gtsam/linear/iterative.cpp @@ -16,7 +16,7 @@ * @date Dec 28, 2009 */ -#include +#include #include #include #include diff --git a/gtsam_unstable/linear/iterative.h b/gtsam/linear/iterative.h similarity index 98% rename from gtsam_unstable/linear/iterative.h rename to gtsam/linear/iterative.h index 29233ae27..4237f2632 100644 --- a/gtsam_unstable/linear/iterative.h +++ b/gtsam/linear/iterative.h @@ -149,5 +149,5 @@ namespace gtsam { } // namespace gtsam -#include +#include diff --git a/gtsam/nonlinear/GradientDescentOptimizer.cpp b/gtsam/nonlinear/GradientDescentOptimizer.cpp index 9416b4f6c..9a4b6b37b 100644 --- a/gtsam/nonlinear/GradientDescentOptimizer.cpp +++ b/gtsam/nonlinear/GradientDescentOptimizer.cpp @@ -5,15 +5,13 @@ * @date Jun 11, 2012 */ -#include - #include #include #include #include #include -#include +#include using namespace std; @@ -30,7 +28,7 @@ void gradientInPlace(const NonlinearFactorGraph &nfg, const Values &values, cons const FactorGraph::shared_ptr jfg = linear->dynamicCastFactors >(); // compute the gradient direction - gtsam::gradientAtZero(*jfg, g); + gradientAtZero(*jfg, g); } @@ -112,9 +110,27 @@ void GradientDescentOptimizer::iterate() { std::cout << "minStep = " << minStep << ", maxStep = " << maxStep << ", newStep = " << newStep << ", newError = " << newError << std::endl; } } - // Increment the iteration counter ++state_.iterations; } +double GradientDescentOptimizer2::System::error(const State &state) const { + return graph_.error(state); +} + +GradientDescentOptimizer2::System::Gradient GradientDescentOptimizer2::System::gradient(const State &state) const { + Gradient result = state.zeroVectors(ordering_); + gradientInPlace(graph_, state, ordering_, result); + return result; +} +GradientDescentOptimizer2::System::State GradientDescentOptimizer2::System::advance(const State ¤t, const double alpha, const Gradient &g) const { + Gradient step = g; + step.vector() *= alpha; + return current.retract(step, ordering_); +} + +Values GradientDescentOptimizer2::optimize() { + return gradientDescent(System(graph_, *ordering_), initialEstimate_, params_); +} + } /* namespace gtsam */ diff --git a/gtsam/nonlinear/GradientDescentOptimizer.h b/gtsam/nonlinear/GradientDescentOptimizer.h index 03e1c018b..1a78416de 100644 --- a/gtsam/nonlinear/GradientDescentOptimizer.h +++ b/gtsam/nonlinear/GradientDescentOptimizer.h @@ -11,7 +11,7 @@ namespace gtsam { -class GradientDescentOptimizer; +/* an implementation of gradient-descent method using the NLO interface */ class GradientDescentParams : public NonlinearOptimizerParams { @@ -70,5 +70,151 @@ protected: }; +#include + +/* Yet another implementation of the gradient-descent method using the iterative.h interface */ +class GradientDescentParams2 : public NonlinearOptimizerParams { +public: + GradientDescentParams2(){} +}; + +class GradientDescentOptimizer2 { + + class System { + + public: + + typedef Values State; + typedef VectorValues Gradient; + + protected: + + NonlinearFactorGraph graph_; + Ordering ordering_; + + public: + + System(const NonlinearFactorGraph &graph, const Ordering &ordering): graph_(graph), ordering_(ordering) {} + double error(const State &state) const ; + Gradient gradient(const State &state) const ; + State advance(const State ¤t, const double alpha, const Gradient &g) const ; + }; + +public: + + typedef GradientDescentParams2 Parameters; + typedef boost::shared_ptr shared_ptr; + +protected: + + NonlinearFactorGraph graph_; + Values initialEstimate_; + Parameters params_; + Ordering::shared_ptr ordering_; + VectorValues::shared_ptr gradient_; + +public: + + GradientDescentOptimizer2(const NonlinearFactorGraph& graph, const Values& initialValues, const Parameters& params = Parameters()) + : graph_(graph), initialEstimate_(initialValues), params_(params), + ordering_(initialValues.orderingArbitrary()), + gradient_(new VectorValues(initialValues.zeroVectors(*ordering_))) {} + + virtual ~GradientDescentOptimizer2() {} + + virtual Values optimize () ; +}; + + +template +double lineSearch(const S &system, const V currentValues, const W &gradient) { + + /* normalize it such that it becomes a unit vector */ + const double g = gradient.norm(); + + // perform the golden section search algorithm to decide the the optimal step size + // detail refer to http://en.wikipedia.org/wiki/Golden_section_search + const double phi = 0.5*(1.0+std::sqrt(5.0)), resphi = 2.0 - phi, tau = 1e-5; + double minStep = -1.0/g, maxStep = 0, + newStep = minStep + (maxStep-minStep) / (phi+1.0) ; + + V newValues = system.advance(currentValues, newStep, gradient); + double newError = system.error(newValues); + + while (true) { + const bool flag = (maxStep - newStep > newStep - minStep) ? true : false ; + const double testStep = flag ? + newStep + resphi * (maxStep - newStep) : newStep - resphi * (newStep - minStep); + + if ( (maxStep- minStep) < tau * (std::fabs(testStep) + std::fabs(newStep)) ) { + return 0.5*(minStep+maxStep); + } + + const V testValues = system.advance(currentValues, testStep, gradient); + const double testError = system.error(testValues); + + // update the working range + if ( testError >= newError ) { + if ( flag ) maxStep = testStep; + else minStep = testStep; + } + else { + if ( flag ) { + minStep = newStep; + newStep = testStep; + newError = testError; + } + else { + maxStep = newStep; + newStep = testStep; + newError = testError; + } + } + } + + return 0.0; +} + +template +V gradientDescent(const S &system, const V &initial, const NonlinearOptimizerParams ¶ms) { + + V currentValues = initial, prevValues; + double currentError = system.error(currentValues), prevError; + Index iteration = 0; + + // check if we're already close enough + if(currentError <= params.errorTol) { + if (params.verbosity >= NonlinearOptimizerParams::ERROR) + std::cout << "Exiting, as error = " << currentError << " < " << params.errorTol << std::endl; + return currentValues; + } + + // Maybe show output + if (params.verbosity >= NonlinearOptimizerParams::ERROR) std::cout << "Initial error: " << currentError << std::endl; + + // Iterative loop + do { + // Do next iteration + const typename S::Gradient gradient = system.gradient(currentValues); + const double alpha = lineSearch(system, currentValues, gradient); + + prevValues = currentValues; + prevError = currentError; + + currentValues = system.advance(prevValues, alpha, gradient); + currentError = system.error(currentValues); + + // Maybe show output + if(params.verbosity >= NonlinearOptimizerParams::ERROR) std::cout << "currentError: " << currentError << std::endl; + } while( ++iteration < params.maxIterations && + !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, + params.errorTol, prevError, currentError, params.verbosity)); + + // Printing if verbose + if (params.verbosity >= NonlinearOptimizerParams::ERROR && iteration >= params.maxIterations) + std::cout << "Terminating because reached maximum iterations" << std::endl; + + return currentValues; +} } diff --git a/gtsam_unstable/CMakeLists.txt b/gtsam_unstable/CMakeLists.txt index 2d9f597bd..b03dc3d73 100644 --- a/gtsam_unstable/CMakeLists.txt +++ b/gtsam_unstable/CMakeLists.txt @@ -4,7 +4,6 @@ set (gtsam_unstable_subdirs base discrete dynamics - linear nonlinear ) diff --git a/tests/testGradientDescentOptimizer.cpp b/tests/testGradientDescentOptimizer.cpp index de4de4a6f..fac025c77 100644 --- a/tests/testGradientDescentOptimizer.cpp +++ b/tests/testGradientDescentOptimizer.cpp @@ -5,19 +5,21 @@ * @date Jun 11, 2012 */ -#include #include +#include #include -#include #include +#include +#include + using namespace std; using namespace gtsam; -/* ************************************************************************* */ -TEST(optimize, GradientDescentOptimizer) { + +boost::tuple generateProblem() { // 1. Create graph container and add factors to it pose2SLAM::Graph graph ; @@ -45,9 +47,21 @@ TEST(optimize, GradientDescentOptimizer) { Pose2 x3(4.1, 0.1, M_PI_2); initialEstimate.insertPose(3, x3); Pose2 x4(4.0, 2.0, M_PI ); initialEstimate.insertPose(4, x4); Pose2 x5(2.1, 2.1,-M_PI_2); initialEstimate.insertPose(5, x5); + + return boost::tie(graph, initialEstimate); +} + + +/* ************************************************************************* */ +TEST(optimize, GradientDescentOptimizer) { + + pose2SLAM::Graph graph ; + pose2SLAM::Values initialEstimate; + + boost::tie(graph, initialEstimate) = generateProblem(); // cout << "initial error = " << graph.error(initialEstimate) << endl ; - // 4. Single Step Optimization using Levenberg-Marquardt + // Single Step Optimization using Levenberg-Marquardt GradientDescentParams param; param.maxIterations = 500; /* requires a larger number of iterations to converge */ param.verbosity = NonlinearOptimizerParams::SILENT; @@ -58,8 +72,32 @@ TEST(optimize, GradientDescentOptimizer) { /* the optimality of the solution is not comparable to the */ DOUBLES_EQUAL(0.0, graph.error(result), 1e-2); + + CHECK(1); } +/* ************************************************************************* */ +TEST(optimize, GradientDescentOptimizer2) { + + pose2SLAM::Graph graph ; + pose2SLAM::Values initialEstimate; + + boost::tie(graph, initialEstimate) = generateProblem(); +// cout << "initial error = " << graph.error(initialEstimate) << endl ; + + // Single Step Optimization using Levenberg-Marquardt + GradientDescentParams2 param; + param.maxIterations = 500; /* requires a larger number of iterations to converge */ + param.verbosity = NonlinearOptimizerParams::ERROR; + + + GradientDescentOptimizer2 optimizer(graph, initialEstimate, param); + Values result = optimizer.optimize(); +// cout << "solver final error = " << graph.error(result) << endl; + + /* the optimality of the solution is not comparable to the */ + DOUBLES_EQUAL(0.0, graph.error(result), 1e-2); +} /* ************************************************************************* */ int main() { TestResult tr; return TestRegistry::runAllTests(tr); } diff --git a/tests/testSubgraphPreconditioner.cpp b/tests/testSubgraphPreconditioner.cpp index 0f18924da..382ac4cc7 100644 --- a/tests/testSubgraphPreconditioner.cpp +++ b/tests/testSubgraphPreconditioner.cpp @@ -15,9 +15,9 @@ * @author Frank Dellaert **/ -#include #include #include +#include #include #include #include