Cleaned up (obsolete?) CG-based solver by Yong-Dian

release/4.3a0
dellaert 2015-02-18 08:20:09 +01:00
parent ae1e454ffd
commit fad11c0f4b
3 changed files with 153 additions and 107 deletions

View File

@ -1,7 +1,18 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file GradientDescentOptimizer.cpp
* @brief
* @author ydjian
* @file NonlinearConjugateGradientOptimizer.cpp
* @brief Simple non-linear optimizer that solves using *non-preconditioned* CG
* @author Yong-Dian Jian
* @date Jun 11, 2012
*/
@ -16,22 +27,31 @@ using namespace std;
namespace gtsam {
/* Return the gradient vector of a nonlinear factor given a linearization point and a variable ordering
* Can be moved to NonlinearFactorGraph.h if desired */
VectorValues gradientInPlace(const NonlinearFactorGraph &nfg, const Values &values) {
/**
* @brief Return the gradient vector of a nonlinear factor graph
* @param nfg the graph
* @param values a linearization point
* Can be moved to NonlinearFactorGraph.h if desired
*/
static VectorValues gradientInPlace(const NonlinearFactorGraph &nfg,
const Values &values) {
// Linearize graph
GaussianFactorGraph::shared_ptr linear = nfg.linearize(values);
return linear->gradientAtZero();
}
double NonlinearConjugateGradientOptimizer::System::error(const State &state) const {
double NonlinearConjugateGradientOptimizer::System::error(
const State &state) const {
return graph_.error(state);
}
NonlinearConjugateGradientOptimizer::System::Gradient NonlinearConjugateGradientOptimizer::System::gradient(const State &state) const {
NonlinearConjugateGradientOptimizer::System::Gradient NonlinearConjugateGradientOptimizer::System::gradient(
const State &state) const {
return gradientInPlace(graph_, state);
}
NonlinearConjugateGradientOptimizer::System::State NonlinearConjugateGradientOptimizer::System::advance(const State &current, const double alpha, const Gradient &g) const {
NonlinearConjugateGradientOptimizer::System::State NonlinearConjugateGradientOptimizer::System::advance(
const State &current, const double alpha, const Gradient &g) const {
Gradient step = g;
step *= alpha;
return current.retract(step);
@ -39,13 +59,17 @@ NonlinearConjugateGradientOptimizer::System::State NonlinearConjugateGradientOpt
void NonlinearConjugateGradientOptimizer::iterate() {
int dummy;
boost::tie(state_.values, dummy) = nonlinearConjugateGradient<System, Values>(System(graph_), state_.values, params_, true /* single iterations */);
boost::tie(state_.values, dummy) = nonlinearConjugateGradient<System, Values>(
System(graph_), state_.values, params_, true /* single iterations */);
++state_.iterations;
state_.error = graph_.error(state_.values);
}
const Values& NonlinearConjugateGradientOptimizer::optimize() {
boost::tie(state_.values, state_.iterations) = nonlinearConjugateGradient<System, Values>(System(graph_), state_.values, params_, false /* up to convergent */);
// Optimize until convergence
System system(graph_);
boost::tie(state_.values, state_.iterations) = //
nonlinearConjugateGradient(system, state_.values, params_, false);
state_.error = graph_.error(state_.values);
return state_.values;
}

View File

@ -1,8 +1,19 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file GradientDescentOptimizer.cpp
* @brief
* @file NonlinearConjugateGradientOptimizer.h
* @brief Simple non-linear optimizer that solves using *non-preconditioned* CG
* @author Yong-Dian Jian
* @date Jun 11, 2012
* @date June 11, 2012
*/
#pragma once
@ -13,15 +24,18 @@
namespace gtsam {
/** An implementation of the nonlinear cg method using the template below */
/** An implementation of the nonlinear CG method using the template below */
class GTSAM_EXPORT NonlinearConjugateGradientState: public NonlinearOptimizerState {
public:
typedef NonlinearOptimizerState Base;
NonlinearConjugateGradientState(const NonlinearFactorGraph& graph, const Values& values)
: Base(graph, values) {}
NonlinearConjugateGradientState(const NonlinearFactorGraph& graph,
const Values& values) :
Base(graph, values) {
}
};
class GTSAM_EXPORT NonlinearConjugateGradientOptimizer: public NonlinearOptimizer {
/* a class for the nonlinearConjugateGradient template */
class System {
public:
@ -33,33 +47,45 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimiz
const NonlinearFactorGraph &graph_;
public:
System(const NonlinearFactorGraph &graph): graph_(graph) {}
System(const NonlinearFactorGraph &graph) :
graph_(graph) {
}
double error(const State &state) const;
Gradient gradient(const State &state) const;
State advance(const State &current, const double alpha, const Gradient &g) const ;
State advance(const State &current, const double alpha,
const Gradient &g) const;
};
public:
typedef NonlinearOptimizer Base;
typedef NonlinearConjugateGradientState States;
typedef NonlinearOptimizerParams Parameters;
typedef boost::shared_ptr<NonlinearConjugateGradientOptimizer> shared_ptr;
protected:
States state_;
NonlinearConjugateGradientState state_;
Parameters params_;
public:
NonlinearConjugateGradientOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
const Parameters& params = Parameters())
: Base(graph), state_(graph, initialValues), params_(params) {}
/// Constructor
NonlinearConjugateGradientOptimizer(const NonlinearFactorGraph& graph,
const Values& initialValues, const Parameters& params = Parameters()) :
Base(graph), state_(graph, initialValues), params_(params) {
}
/// Destructor
virtual ~NonlinearConjugateGradientOptimizer() {
}
virtual ~NonlinearConjugateGradientOptimizer() {}
virtual void iterate();
virtual const Values& optimize();
virtual const NonlinearOptimizerState& _state() const { return state_; }
virtual const NonlinearOptimizerParams& _params() const { return params_; }
virtual const NonlinearOptimizerState& _state() const {
return state_;
}
virtual const NonlinearOptimizerParams& _params() const {
return params_;
}
};
/** Implement the golden-section line search algorithm */
@ -71,19 +97,22 @@ double lineSearch(const S &system, const V currentValues, const W &gradient) {
// perform the golden section search algorithm to decide the the optimal step size
// detail refer to http://en.wikipedia.org/wiki/Golden_section_search
const double phi = 0.5*(1.0+std::sqrt(5.0)), resphi = 2.0 - phi, tau = 1e-5;
double minStep = -1.0/g, maxStep = 0,
newStep = minStep + (maxStep-minStep) / (phi+1.0) ;
const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi, tau =
1e-5;
double minStep = -1.0 / g, maxStep = 0, newStep = minStep
+ (maxStep - minStep) / (phi + 1.0);
V newValues = system.advance(currentValues, newStep, gradient);
double newError = system.error(newValues);
while (true) {
const bool flag = (maxStep - newStep > newStep - minStep) ? true : false;
const double testStep = flag ?
newStep + resphi * (maxStep - newStep) : newStep - resphi * (newStep - minStep);
const double testStep =
flag ? newStep + resphi * (maxStep - newStep) :
newStep - resphi * (newStep - minStep);
if ( (maxStep- minStep) < tau * (std::fabs(testStep) + std::fabs(newStep)) ) {
if ((maxStep - minStep)
< tau * (std::fabs(testStep) + std::fabs(newStep))) {
return 0.5 * (minStep + maxStep);
}
@ -92,16 +121,16 @@ double lineSearch(const S &system, const V currentValues, const W &gradient) {
// update the working range
if (testError >= newError) {
if ( flag ) maxStep = testStep;
else minStep = testStep;
}
else {
if (flag)
maxStep = testStep;
else
minStep = testStep;
} else {
if (flag) {
minStep = newStep;
newStep = testStep;
newError = testError;
}
else {
} else {
maxStep = newStep;
newStep = testStep;
newError = testError;
@ -112,7 +141,7 @@ double lineSearch(const S &system, const V currentValues, const W &gradient) {
}
/**
* Implement the nonlinear conjugate gradient method using the Polak-Ribieve formula suggested in
* Implement the nonlinear conjugate gradient method using the Polak-Ribiere formula suggested in
* http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method.
*
* The S (system) class requires three member functions: error(state), gradient(state) and
@ -121,7 +150,9 @@ double lineSearch(const S &system, const V currentValues, const W &gradient) {
* The last parameter is a switch between gradient-descent and conjugate gradient
*/
template<class S, class V>
boost::tuple<V, int> nonlinearConjugateGradient(const S &system, const V &initial, const NonlinearOptimizerParams &params, const bool singleIteration, const bool gradientDescent = false) {
boost::tuple<V, int> nonlinearConjugateGradient(const S &system,
const V &initial, const NonlinearOptimizerParams &params,
const bool singleIteration, const bool gradientDescent = false) {
// GTSAM_CONCEPT_MANIFOLD_TYPE(V);
@ -131,55 +162,65 @@ boost::tuple<V, int> nonlinearConjugateGradient(const S &system, const V &initia
double currentError = system.error(initial);
if (currentError <= params.errorTol) {
if (params.verbosity >= NonlinearOptimizerParams::ERROR) {
std::cout << "Exiting, as error = " << currentError << " < " << params.errorTol << std::endl;
std::cout << "Exiting, as error = " << currentError << " < "
<< params.errorTol << std::endl;
}
return boost::tie(initial, iteration);
}
V currentValues = initial;
typename S::Gradient currentGradient = system.gradient(currentValues), prevGradient,
direction = currentGradient;
typename S::Gradient currentGradient = system.gradient(currentValues),
prevGradient, direction = currentGradient;
/* do one step of gradient descent */
V prevValues = currentValues; double prevError = currentError;
V prevValues = currentValues;
double prevError = currentError;
double alpha = lineSearch(system, currentValues, direction);
currentValues = system.advance(prevValues, alpha, direction);
currentError = system.error(currentValues);
// Maybe show output
if (params.verbosity >= NonlinearOptimizerParams::ERROR) std::cout << "Initial error: " << currentError << std::endl;
if (params.verbosity >= NonlinearOptimizerParams::ERROR)
std::cout << "Initial error: " << currentError << std::endl;
// Iterative loop
do {
if (gradientDescent == true) {
direction = system.gradient(currentValues);
}
else {
} else {
prevGradient = currentGradient;
currentGradient = system.gradient(currentValues);
const double beta = std::max(0.0, currentGradient.dot(currentGradient-prevGradient)/currentGradient.dot(currentGradient));
// Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1
const double beta = std::max(0.0,
currentGradient.dot(currentGradient - prevGradient)
/ currentGradient.dot(currentGradient));
direction = currentGradient + (beta * direction);
}
alpha = lineSearch(system, currentValues, direction);
prevValues = currentValues; prevError = currentError;
prevValues = currentValues;
prevError = currentError;
currentValues = system.advance(prevValues, alpha, direction);
currentError = system.error(currentValues);
// Maybe show output
if(params.verbosity >= NonlinearOptimizerParams::ERROR) std::cout << "currentError: " << currentError << std::endl;
} while( ++iteration < params.maxIterations &&
!singleIteration &&
!checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, params.errorTol, prevError, currentError, params.verbosity));
if (params.verbosity >= NonlinearOptimizerParams::ERROR)
std::cout << "iteration: " << iteration << ", currentError: " << currentError << std::endl;
} while (++iteration < params.maxIterations && !singleIteration
&& !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol,
params.errorTol, prevError, currentError, params.verbosity));
// Printing if verbose
if (params.verbosity >= NonlinearOptimizerParams::ERROR && iteration >= params.maxIterations)
std::cout << "nonlinearConjugateGradient: Terminating because reached maximum iterations" << std::endl;
if (params.verbosity >= NonlinearOptimizerParams::ERROR
&& iteration >= params.maxIterations)
std::cout
<< "nonlinearConjugateGradient: Terminating because reached maximum iterations"
<< std::endl;
return boost::tie(currentValues, iteration);
}
}
} // \ namespace gtsam

View File

@ -1,7 +1,14 @@
/**
* @file NonlinearConjugateGradientOptimizer.cpp
* @brief Test simple CG optimizer
* @author Yong-Dian Jian
* @date June 11, 2012
*/
/**
* @file testGradientDescentOptimizer.cpp
* @brief
* @author ydjian
* @brief Small test of NonlinearConjugateGradientOptimizer
* @author Yong-Dian Jian
* @date Jun 11, 2012
*/
@ -22,7 +29,7 @@
using namespace std;
using namespace gtsam;
// Generate a small PoseSLAM problem
boost::tuple<NonlinearFactorGraph, Values> generateProblem() {
// 1. Create graph container and add factors to it
@ -56,49 +63,23 @@ boost::tuple<NonlinearFactorGraph, Values> generateProblem() {
}
/* ************************************************************************* */
TEST(optimize, GradientDescentOptimizer) {
TEST(NonlinearConjugateGradientOptimizer, Optimize) {
NonlinearFactorGraph graph;
Values initialEstimate;
boost::tie(graph, initialEstimate) = generateProblem();
// cout << "initial error = " << graph.error(initialEstimate) << endl ;
cout << "initial error = " << graph.error(initialEstimate) << endl ;
// Single Step Optimization using Levenberg-Marquardt
NonlinearOptimizerParams param;
param.maxIterations = 500; /* requires a larger number of iterations to converge */
param.verbosity = NonlinearOptimizerParams::SILENT;
param.verbosity = NonlinearOptimizerParams::ERROR;
NonlinearConjugateGradientOptimizer optimizer(graph, initialEstimate, param);
Values result = optimizer.optimize();
// cout << "gd1 solver final error = " << graph.error(result) << endl;
cout << "cg final error = " << graph.error(result) << endl;
/* the optimality of the solution is not comparable to the */
DOUBLES_EQUAL(0.0, graph.error(result), 1e-2);
CHECK(1);
}
/* ************************************************************************* */
TEST(optimize, ConjugateGradientOptimizer) {
NonlinearFactorGraph graph;
Values initialEstimate;
boost::tie(graph, initialEstimate) = generateProblem();
// cout << "initial error = " << graph.error(initialEstimate) << endl ;
// Single Step Optimization using Levenberg-Marquardt
NonlinearOptimizerParams param;
param.maxIterations = 500; /* requires a larger number of iterations to converge */
param.verbosity = NonlinearOptimizerParams::SILENT;
NonlinearConjugateGradientOptimizer optimizer(graph, initialEstimate, param);
Values result = optimizer.optimize();
// cout << "cg final error = " << graph.error(result) << endl;
/* the optimality of the solution is not comparable to the */
DOUBLES_EQUAL(0.0, graph.error(result), 1e-2);
EXPECT_DOUBLES_EQUAL(0.0, graph.error(result), 1e-4);
}
/* ************************************************************************* */