add a nonlinear gradient-descent optimizer, and a unit test.

todo: 1. test wolfe condition or armijo rule, 2. use iterative.h
release/4.3a0
Yong-Dian Jian 2012-06-11 22:10:23 +00:00
parent e53aecf970
commit 83dc580220
3 changed files with 260 additions and 0 deletions

View File

@ -0,0 +1,120 @@
/**
* @file GradientDescentOptimizer.cpp
* @brief
* @author ydjian
* @date Jun 11, 2012
*/
#include <cmath>
#include <gtsam/nonlinear/GradientDescentOptimizer.h>
#include <gtsam/nonlinear/Ordering.h>
#include <gtsam/nonlinear/Values.h>
#include <gtsam/linear/JacobianFactorGraph.h>
#include <gtsam/linear/VectorValues.h>
#include <gtsam_unstable/linear/iterative.h>
using namespace std;
namespace gtsam {
/**
* Return the gradient vector of a nonlinear factor given a linearization point and a variable ordering
* Can be moved to NonlinearFactorGraph.h if desired
*/
void gradientInPlace(const NonlinearFactorGraph &nfg, const Values &values, const Ordering &ordering, VectorValues &g) {
// Linearize graph
GaussianFactorGraph::shared_ptr linear = nfg.linearize(values, ordering);
const FactorGraph<JacobianFactor>::shared_ptr jfg = linear->dynamicCastFactors<FactorGraph<JacobianFactor> >();
// compute the gradient direction
gtsam::gradientAtZero(*jfg, g);
}
/* ************************************************************************* */
void GradientDescentOptimizer::iterate() {
// Pull out parameters we'll use
const NonlinearOptimizerParams::Verbosity nloVerbosity = params_.verbosity;
// compute the gradient vector
gradientInPlace(graph_, state_.values, *ordering_, *gradient_);
/* normalize it such that it becomes a unit vector */
const double g = gradient_->vector().norm();
gradient_->vector() /= g;
// perform the golden section search algorithm to decide the the optimal step size
// detail refer to http://en.wikipedia.org/wiki/Golden_section_search
VectorValues step = VectorValues::SameStructure(*gradient_);
const double phi = 0.5*(1.0+std::sqrt(5.0)), resphi = 2.0 - phi, tau = 1e-5;
double minStep = -1.0, maxStep = 0,
newStep = minStep + (maxStep-minStep) / (phi+1.0) ;
step.vector() = newStep * gradient_->vector();
Values newValues = state_.values.retract(step, *ordering_);
double newError = graph_.error(newValues);
if ( nloVerbosity ) {
std::cout << "minStep = " << minStep << ", maxStep = " << maxStep << ", newStep = " << newStep << ", newError = " << newError << std::endl;
}
while (true) {
const bool flag = (maxStep - newStep > newStep - minStep) ? true : false ;
const double testStep = flag ?
newStep + resphi * (maxStep - newStep) : newStep - resphi * (newStep - minStep);
if ( (maxStep- minStep) < tau * (std::fabs(testStep) + std::fabs(newStep)) ) {
newStep = 0.5*(minStep+maxStep);
step.vector() = newStep * gradient_->vector();
newValues = state_.values.retract(step, *ordering_);
newError = graph_.error(newValues);
if ( newError < state_.error ) {
state_.values = state_.values.retract(step, *ordering_);
state_.error = graph_.error(state_.values);
}
break;
}
step.vector() = testStep * gradient_->vector();
const Values testValues = state_.values.retract(step, *ordering_);
const double testError = graph_.error(testValues);
// update the working range
if ( testError < newError ) {
if ( flag ) {
minStep = newStep;
newStep = testStep;
newError = testError;
}
else {
maxStep = newStep;
newStep = testStep;
newError = testError;
}
}
else {
if ( flag ) {
maxStep = testStep;
}
else {
minStep = testStep;
}
}
if ( nloVerbosity ) {
std::cout << "minStep = " << minStep << ", maxStep = " << maxStep << ", newStep = " << newStep << ", newError = " << newError << std::endl;
}
}
// Increment the iteration counter
++state_.iterations;
}
} /* namespace gtsam */

View File

@ -0,0 +1,74 @@
/**
* @file GradientDescentOptimizer.cpp
* @brief
* @author ydjian
* @date Jun 11, 2012
*/
#pragma once
#include <gtsam/nonlinear/NonlinearOptimizer.h>
namespace gtsam {
class GradientDescentOptimizer;
class GradientDescentParams : public NonlinearOptimizerParams {
public:
typedef NonlinearOptimizerParams Base;
GradientDescentParams():Base() {}
virtual void print(const std::string& str = "") const {
Base::print(str);
}
virtual ~GradientDescentParams() {}
};
class GradientDescentState : public NonlinearOptimizerState {
public:
typedef NonlinearOptimizerState Base;
GradientDescentState(const NonlinearFactorGraph& graph, const Values& values)
: Base(graph, values) {}
};
class GradientDescentOptimizer : public NonlinearOptimizer {
public:
typedef boost::shared_ptr<GradientDescentOptimizer> shared_ptr;
typedef NonlinearOptimizer Base;
typedef GradientDescentState States;
typedef GradientDescentParams Parameters;
protected:
Parameters params_;
States state_;
Ordering::shared_ptr ordering_;
VectorValues::shared_ptr gradient_;
public:
GradientDescentOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Parameters& params = Parameters())
: Base(graph), params_(params), state_(graph, initialValues),
ordering_(initialValues.orderingArbitrary()),
gradient_(new VectorValues(initialValues.zeroVectors(*ordering_))) {}
virtual ~GradientDescentOptimizer() {}
virtual void iterate();
protected:
virtual const NonlinearOptimizerState& _state() const { return state_; }
virtual const NonlinearOptimizerParams& _params() const { return params_; }
};
}

View File

@ -0,0 +1,66 @@
/**
* @file testGradientDescentOptimizer.cpp
* @brief
* @author ydjian
* @date Jun 11, 2012
*/
#include <gtsam/nonlinear/GradientDescentOptimizer.h>
#include <gtsam/slam/pose2SLAM.h>
#include <CppUnitLite/TestHarness.h>
#include <boost/shared_ptr.hpp>
#include <boost/make_shared.hpp>
using namespace std;
using namespace gtsam;
/* ************************************************************************* */
TEST(optimize, GradientDescentOptimizer) {
// 1. Create graph container and add factors to it
pose2SLAM::Graph graph ;
// 2a. Add Gaussian prior
Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin
SharedDiagonal priorNoise(Vector_(3, 0.3, 0.3, 0.1));
graph.addPrior(1, priorMean, priorNoise);
// 2b. Add odometry factors
SharedDiagonal odometryNoise(Vector_(3, 0.2, 0.2, 0.1));
graph.addOdometry(1, 2, Pose2(2.0, 0.0, 0.0 ), odometryNoise);
graph.addOdometry(2, 3, Pose2(2.0, 0.0, M_PI_2), odometryNoise);
graph.addOdometry(3, 4, Pose2(2.0, 0.0, M_PI_2), odometryNoise);
graph.addOdometry(4, 5, Pose2(2.0, 0.0, M_PI_2), odometryNoise);
// 2c. Add pose constraint
SharedDiagonal constraintUncertainty(Vector_(3, 0.2, 0.2, 0.1));
graph.addConstraint(5, 2, Pose2(2.0, 0.0, M_PI_2), constraintUncertainty);
// 3. Create the data structure to hold the initialEstimate estinmate to the solution
pose2SLAM::Values initialEstimate;
Pose2 x1(0.5, 0.0, 0.2 ); initialEstimate.insertPose(1, x1);
Pose2 x2(2.3, 0.1,-0.2 ); initialEstimate.insertPose(2, x2);
Pose2 x3(4.1, 0.1, M_PI_2); initialEstimate.insertPose(3, x3);
Pose2 x4(4.0, 2.0, M_PI ); initialEstimate.insertPose(4, x4);
Pose2 x5(2.1, 2.1,-M_PI_2); initialEstimate.insertPose(5, x5);
// cout << "initial error = " << graph.error(initialEstimate) << endl ;
// 4. Single Step Optimization using Levenberg-Marquardt
GradientDescentParams param;
param.maxIterations = 500; /* requires a larger number of iterations to converge */
param.verbosity = NonlinearOptimizerParams::SILENT;
GradientDescentOptimizer optimizer(graph, initialEstimate, param);
Values result = optimizer.optimize();
// cout << "solver final error = " << graph.error(result) << endl;
/* the optimality of the solution is not comparable to the */
DOUBLES_EQUAL(0.0, graph.error(result), 1e-2);
}
/* ************************************************************************* */
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
/* ************************************************************************* */