add Cal3DS2.calibrate() with fixed point iteration
reorg nonlinear conjugate gradient solvers wrapper for the linear solversrelease/4.3a0
parent
a66a42189c
commit
168ddf5457
|
@ -78,6 +78,36 @@ Point2 Cal3DS2::uncalibrate(const Point2& p,
|
|||
return Point2(fx_* x2 + s_ * y2 + u0_, fy_ * y2 + v0_) ;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Point2 Cal3DS2::calibrate(const Point2& pi, const double tol) const {
|
||||
// Use the following fixed point iteration to invert the radial distortion.
|
||||
// pn_{t+1} = (inv(K)*pi - dp(pn_{t})) / g(pn_{t})
|
||||
|
||||
const Point2 invKPi ((1 / fx_) * (pi.x() - u0_ - (s_ / fy_) * (pi.y() - v0_)),
|
||||
(1 / fy_) * (pi.y() - v0_));
|
||||
|
||||
// initialize by ignoring the distortion at all, might be problematic for pixels around boundary
|
||||
Point2 pn = invKPi;
|
||||
|
||||
// iterate until the uncalibrate is close to the actual pixel coordinate
|
||||
const int maxIterations = 10;
|
||||
int iteration;
|
||||
for ( iteration = 0 ; iteration < maxIterations ; ++iteration ) {
|
||||
if ( uncalibrate(pn).dist(pi) <= tol ) break;
|
||||
const double x = pn.x(), y = pn.y(), xy = x*y, xx = x*x, yy = y*y ;
|
||||
const double r = xx + yy ;
|
||||
const double g = (1+k1_*r+k2_*r*r) ;
|
||||
const double dx = 2*k3_*xy + k4_*(r+2*xx) ;
|
||||
const double dy = 2*k4_*xy + k3_*(r+2*yy) ;
|
||||
pn = (invKPi - Point2(dx,dy))/g ;
|
||||
}
|
||||
|
||||
if ( iteration >= maxIterations )
|
||||
throw std::runtime_error("Cal3DS2::calibrate fails to converge. need a better initialization");
|
||||
|
||||
return pn;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix Cal3DS2::D2d_intrinsic(const Point2& p) const {
|
||||
//const double fx = fx_, fy = fy_, s = s_ ;
|
||||
|
|
|
@ -96,6 +96,9 @@ public:
|
|||
boost::optional<Matrix&> H1 = boost::none,
|
||||
boost::optional<Matrix&> H2 = boost::none) const ;
|
||||
|
||||
/// Conver a pixel coordinate to ideal coordinate
|
||||
Point2 calibrate(const Point2& p, const double tol=1e-5) const;
|
||||
|
||||
/// Derivative of uncalibrate wrpt intrinsic coordinates
|
||||
Matrix D2d_intrinsic(const Point2& p) const ;
|
||||
|
||||
|
|
|
@ -136,8 +136,8 @@ namespace gtsam {
|
|||
/// convert image coordinates uv to intrinsic coordinates xy
|
||||
Point2 calibrate(const Point2& p) const {
|
||||
const double u = p.x(), v = p.y();
|
||||
return Point2((1 / fx_) * (u - u0_ - (s_ / fy_) * (v - v0_)), (1 / fy_)
|
||||
* (v - v0_));
|
||||
return Point2((1 / fx_) * (u - u0_ - (s_ / fy_) * (v - v0_)),
|
||||
(1 / fy_) * (v - v0_));
|
||||
}
|
||||
|
||||
/// @}
|
||||
|
|
|
@ -29,7 +29,7 @@ static Cal3DS2 K(500, 100, 0.1, 320, 240, 1e-3, 2.0*1e-3, 3.0*1e-3, 4.0*1e-3);
|
|||
static Point2 p(2,3);
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( Cal3DS2, calibrate)
|
||||
TEST( Cal3DS2, uncalibrate)
|
||||
{
|
||||
Vector k = K.k() ;
|
||||
double r = p.x()*p.x() + p.y()*p.y() ;
|
||||
|
@ -43,6 +43,14 @@ TEST( Cal3DS2, calibrate)
|
|||
CHECK(assert_equal(q,p_i));
|
||||
}
|
||||
|
||||
TEST( Cal3DS2, calibrate )
|
||||
{
|
||||
Point2 pn(0.5, 0.5);
|
||||
Point2 pi = K.uncalibrate(pn);
|
||||
Point2 pn_hat = K.calibrate(pi);
|
||||
CHECK( pn.equals(pn_hat, 1e-5));
|
||||
}
|
||||
|
||||
Point2 uncalibrate_(const Cal3DS2& k, const Point2& pt) { return k.uncalibrate(pt); }
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -16,8 +16,6 @@
|
|||
* @date Feb 26, 2012
|
||||
*/
|
||||
|
||||
#include <gtsam/inference/EliminationTree.h>
|
||||
#include <gtsam/linear/GaussianJunctionTree.h>
|
||||
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
|
||||
|
||||
using namespace std;
|
||||
|
@ -32,22 +30,8 @@ void GaussNewtonOptimizer::iterate() {
|
|||
// Linearize graph
|
||||
GaussianFactorGraph::shared_ptr linear = graph_.linearize(current.values, *params_.ordering);
|
||||
|
||||
// Optimize
|
||||
VectorValues delta;
|
||||
{
|
||||
if ( params_.isMultifrontal() ) {
|
||||
delta = GaussianJunctionTree(*linear).optimize(params_.getEliminationFunction());
|
||||
}
|
||||
else if ( params_.isSequential() ) {
|
||||
delta = gtsam::optimize(*EliminationTree<GaussianFactor>::Create(*linear)->eliminate(params_.getEliminationFunction()));
|
||||
}
|
||||
else if ( params_.isCG() ) {
|
||||
throw runtime_error("todo: ");
|
||||
}
|
||||
else {
|
||||
throw runtime_error("Optimization parameter is invalid: GaussNewtonParams::elimination");
|
||||
}
|
||||
}
|
||||
// Solve Factor Graph
|
||||
const VectorValues delta = solveGaussianFactorGraph(*linear, params_);
|
||||
|
||||
// Maybe show output
|
||||
if(params_.verbosity >= NonlinearOptimizerParams::DELTA) delta.print("delta");
|
||||
|
|
|
@ -1,138 +0,0 @@
|
|||
/**
|
||||
* @file GradientDescentOptimizer.cpp
|
||||
* @brief
|
||||
* @author ydjian
|
||||
* @date Jun 11, 2012
|
||||
*/
|
||||
|
||||
#include <gtsam/nonlinear/GradientDescentOptimizer.h>
|
||||
#include <gtsam/nonlinear/Ordering.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/linear/JacobianFactorGraph.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
|
||||
#include <cmath>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* Return the gradient vector of a nonlinear factor given a linearization point and a variable ordering
|
||||
* Can be moved to NonlinearFactorGraph.h if desired
|
||||
*/
|
||||
void gradientInPlace(const NonlinearFactorGraph &nfg, const Values &values, const Ordering &ordering, VectorValues &g) {
|
||||
|
||||
// Linearize graph
|
||||
GaussianFactorGraph::shared_ptr linear = nfg.linearize(values, ordering);
|
||||
FactorGraph<JacobianFactor> jfg; jfg.reserve(linear->size());
|
||||
BOOST_FOREACH(const GaussianFactorGraph::sharedFactor& factor, *linear) {
|
||||
if(boost::shared_ptr<JacobianFactor> jf = boost::dynamic_pointer_cast<JacobianFactor>(factor))
|
||||
jfg.push_back((jf));
|
||||
else
|
||||
jfg.push_back(boost::make_shared<JacobianFactor>(*factor));
|
||||
}
|
||||
|
||||
// compute the gradient direction
|
||||
gradientAtZero(jfg, g);
|
||||
}
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
void GradientDescentOptimizer::iterate() {
|
||||
|
||||
|
||||
// Pull out parameters we'll use
|
||||
const NonlinearOptimizerParams::Verbosity nloVerbosity = params_.verbosity;
|
||||
|
||||
// compute the gradient vector
|
||||
gradientInPlace(graph_, state_.values, *ordering_, *gradient_);
|
||||
|
||||
/* normalize it such that it becomes a unit vector */
|
||||
const double g = gradient_->vector().norm();
|
||||
gradient_->vector() /= g;
|
||||
|
||||
// perform the golden section search algorithm to decide the the optimal step size
|
||||
// detail refer to http://en.wikipedia.org/wiki/Golden_section_search
|
||||
VectorValues step = VectorValues::SameStructure(*gradient_);
|
||||
const double phi = 0.5*(1.0+std::sqrt(5.0)), resphi = 2.0 - phi, tau = 1e-5;
|
||||
double minStep = -1.0, maxStep = 0,
|
||||
newStep = minStep + (maxStep-minStep) / (phi+1.0) ;
|
||||
|
||||
step.vector() = newStep * gradient_->vector();
|
||||
Values newValues = state_.values.retract(step, *ordering_);
|
||||
double newError = graph_.error(newValues);
|
||||
|
||||
if ( nloVerbosity ) {
|
||||
std::cout << "minStep = " << minStep << ", maxStep = " << maxStep << ", newStep = " << newStep << ", newError = " << newError << std::endl;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
const bool flag = (maxStep - newStep > newStep - minStep) ? true : false ;
|
||||
const double testStep = flag ?
|
||||
newStep + resphi * (maxStep - newStep) : newStep - resphi * (newStep - minStep);
|
||||
|
||||
if ( (maxStep- minStep) < tau * (std::fabs(testStep) + std::fabs(newStep)) ) {
|
||||
newStep = 0.5*(minStep+maxStep);
|
||||
step.vector() = newStep * gradient_->vector();
|
||||
newValues = state_.values.retract(step, *ordering_);
|
||||
newError = graph_.error(newValues);
|
||||
|
||||
if ( newError < state_.error ) {
|
||||
state_.values = state_.values.retract(step, *ordering_);
|
||||
state_.error = graph_.error(state_.values);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
step.vector() = testStep * gradient_->vector();
|
||||
const Values testValues = state_.values.retract(step, *ordering_);
|
||||
const double testError = graph_.error(testValues);
|
||||
|
||||
// update the working range
|
||||
if ( testError >= newError ) {
|
||||
if ( flag ) maxStep = testStep;
|
||||
else minStep = testStep;
|
||||
}
|
||||
else {
|
||||
if ( flag ) {
|
||||
minStep = newStep;
|
||||
newStep = testStep;
|
||||
newError = testError;
|
||||
}
|
||||
else {
|
||||
maxStep = newStep;
|
||||
newStep = testStep;
|
||||
newError = testError;
|
||||
}
|
||||
}
|
||||
|
||||
if ( nloVerbosity ) {
|
||||
std::cout << "minStep = " << minStep << ", maxStep = " << maxStep << ", newStep = " << newStep << ", newError = " << newError << std::endl;
|
||||
}
|
||||
}
|
||||
// Increment the iteration counter
|
||||
++state_.iterations;
|
||||
}
|
||||
|
||||
double ConjugateGradientOptimizer::System::error(const State &state) const {
|
||||
return graph_.error(state);
|
||||
}
|
||||
|
||||
ConjugateGradientOptimizer::System::Gradient ConjugateGradientOptimizer::System::gradient(const State &state) const {
|
||||
Gradient result = state.zeroVectors(ordering_);
|
||||
gradientInPlace(graph_, state, ordering_, result);
|
||||
return result;
|
||||
}
|
||||
ConjugateGradientOptimizer::System::State ConjugateGradientOptimizer::System::advance(const State ¤t, const double alpha, const Gradient &g) const {
|
||||
Gradient step = g;
|
||||
step.vector() *= alpha;
|
||||
return current.retract(step, ordering_);
|
||||
}
|
||||
|
||||
Values ConjugateGradientOptimizer::optimize() {
|
||||
return conjugateGradient<System, Values>(System(graph_, *ordering_), initialEstimate_, params_, !cg_);
|
||||
}
|
||||
|
||||
} /* namespace gtsam */
|
|
@ -19,12 +19,9 @@
|
|||
#include <cmath>
|
||||
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/SuccessiveLinearizationOptimizer.h>
|
||||
|
||||
#include <gtsam/base/cholesky.h> // For NegativeMatrixException
|
||||
#include <gtsam/inference/EliminationTree.h>
|
||||
#include <gtsam/linear/GaussianJunctionTree.h>
|
||||
#include <gtsam/linear/SimpleSPCGSolver.h>
|
||||
#include <gtsam/linear/SubgraphSolver.h>
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <string>
|
||||
|
||||
|
@ -106,34 +103,8 @@ void LevenbergMarquardtOptimizer::iterate() {
|
|||
|
||||
// Try solving
|
||||
try {
|
||||
|
||||
// Optimize
|
||||
VectorValues delta;
|
||||
if ( params_.isMultifrontal() ) {
|
||||
delta = GaussianJunctionTree(dampedSystem).optimize(params_.getEliminationFunction());
|
||||
}
|
||||
else if ( params_.isSequential() ) {
|
||||
delta = gtsam::optimize(*EliminationTree<GaussianFactor>::Create(dampedSystem)->eliminate(params_.getEliminationFunction()));
|
||||
}
|
||||
else if ( params_.isCG() ) {
|
||||
|
||||
if ( !params_.iterativeParams ) throw runtime_error("LMSolver: cg parameter has to be assigned ...");
|
||||
|
||||
if ( boost::dynamic_pointer_cast<SimpleSPCGSolverParameters>(params_.iterativeParams)) {
|
||||
SimpleSPCGSolver solver (dampedSystem, *boost::dynamic_pointer_cast<SimpleSPCGSolverParameters>(params_.iterativeParams));
|
||||
delta = solver.optimize();
|
||||
}
|
||||
else if ( boost::dynamic_pointer_cast<SubgraphSolverParameters>(params_.iterativeParams) ) {
|
||||
SubgraphSolver solver (dampedSystem, *boost::dynamic_pointer_cast<SubgraphSolverParameters>(params_.iterativeParams));
|
||||
delta = solver.optimize();
|
||||
}
|
||||
else {
|
||||
throw runtime_error("LMSolver: special cg parameter type is not handled in LM solver ...");
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw runtime_error("Optimization parameter is invalid: LevenbergMarquardtParams::elimination");
|
||||
}
|
||||
// Solve Damped Gaussian Factor Graph
|
||||
const VectorValues delta = solveGaussianFactorGraph(dampedSystem, params_);
|
||||
|
||||
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "linear delta norm = " << delta.vector().norm() << endl;
|
||||
if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA) delta.print("delta");
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
/**
|
||||
* @file GradientDescentOptimizer.cpp
|
||||
* @brief
|
||||
* @author ydjian
|
||||
* @date Jun 11, 2012
|
||||
*/
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h>
|
||||
#include <gtsam/nonlinear/Ordering.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/linear/JacobianFactorGraph.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
|
||||
#include <cmath>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* Return the gradient vector of a nonlinear factor given a linearization point and a variable ordering
|
||||
* Can be moved to NonlinearFactorGraph.h if desired */
|
||||
void gradientInPlace(const NonlinearFactorGraph &nfg, const Values &values, const Ordering &ordering, VectorValues &g) {
|
||||
// Linearize graph
|
||||
GaussianFactorGraph::shared_ptr linear = nfg.linearize(values, ordering);
|
||||
FactorGraph<JacobianFactor> jfg; jfg.reserve(linear->size());
|
||||
BOOST_FOREACH(const GaussianFactorGraph::sharedFactor& factor, *linear) {
|
||||
if(boost::shared_ptr<JacobianFactor> jf = boost::dynamic_pointer_cast<JacobianFactor>(factor))
|
||||
jfg.push_back((jf));
|
||||
else
|
||||
jfg.push_back(boost::make_shared<JacobianFactor>(*factor));
|
||||
}
|
||||
// compute the gradient direction
|
||||
gradientAtZero(jfg, g);
|
||||
}
|
||||
|
||||
double NonlinearConjugateGradientOptimizer::System::error(const State &state) const {
|
||||
return graph_.error(state);
|
||||
}
|
||||
|
||||
NonlinearConjugateGradientOptimizer::System::Gradient NonlinearConjugateGradientOptimizer::System::gradient(const State &state) const {
|
||||
Gradient result = state.zeroVectors(ordering_);
|
||||
gradientInPlace(graph_, state, ordering_, result);
|
||||
return result;
|
||||
}
|
||||
NonlinearConjugateGradientOptimizer::System::State NonlinearConjugateGradientOptimizer::System::advance(const State ¤t, const double alpha, const Gradient &g) const {
|
||||
Gradient step = g;
|
||||
step.vector() *= alpha;
|
||||
return current.retract(step, ordering_);
|
||||
}
|
||||
|
||||
void NonlinearConjugateGradientOptimizer::iterate() {
|
||||
size_t dummy ;
|
||||
boost::tie(state_.values, dummy) = nonlinearConjugateGradient<System, Values>(System(graph_, *ordering_), state_.values, params_, true /* single iterations */);
|
||||
++state_.iterations;
|
||||
state_.error = graph_.error(state_.values);
|
||||
}
|
||||
|
||||
const Values& NonlinearConjugateGradientOptimizer::optimize() {
|
||||
boost::tie(state_.values, state_.iterations) = nonlinearConjugateGradient<System, Values>(System(graph_, *ordering_), state_.values, params_, false /* up to convergent */);
|
||||
state_.error = graph_.error(state_.values);
|
||||
return state_.values;
|
||||
}
|
||||
|
||||
} /* namespace gtsam */
|
|
@ -1,7 +1,7 @@
|
|||
/**
|
||||
* @file GradientDescentOptimizer.cpp
|
||||
* @brief
|
||||
* @author ydjian
|
||||
* @author Yong-Dian Jian
|
||||
* @date Jun 11, 2012
|
||||
*/
|
||||
|
||||
|
@ -9,75 +9,31 @@
|
|||
|
||||
#include <gtsam/base/Manifold.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
#include <boost/tuple/tuple.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* an implementation of gradient-descent method using the NLO interface */
|
||||
|
||||
class GradientDescentState : public NonlinearOptimizerState {
|
||||
|
||||
/** An implementation of the nonlinear cg method using the template below */
|
||||
class NonlinearConjugateGradientState : public NonlinearOptimizerState {
|
||||
public:
|
||||
|
||||
typedef NonlinearOptimizerState Base;
|
||||
|
||||
GradientDescentState(const NonlinearFactorGraph& graph, const Values& values)
|
||||
NonlinearConjugateGradientState(const NonlinearFactorGraph& graph, const Values& values)
|
||||
: Base(graph, values) {}
|
||||
};
|
||||
|
||||
class GradientDescentOptimizer : public NonlinearOptimizer {
|
||||
|
||||
public:
|
||||
|
||||
typedef boost::shared_ptr<GradientDescentOptimizer> shared_ptr;
|
||||
typedef NonlinearOptimizer Base;
|
||||
typedef GradientDescentState States;
|
||||
typedef NonlinearOptimizerParams Parameters;
|
||||
|
||||
protected:
|
||||
|
||||
Parameters params_;
|
||||
States state_;
|
||||
Ordering::shared_ptr ordering_;
|
||||
VectorValues::shared_ptr gradient_;
|
||||
|
||||
public:
|
||||
|
||||
GradientDescentOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Parameters& params = Parameters())
|
||||
: Base(graph), params_(params), state_(graph, initialValues),
|
||||
ordering_(initialValues.orderingArbitrary()),
|
||||
gradient_(new VectorValues(initialValues.zeroVectors(*ordering_))) {}
|
||||
|
||||
virtual ~GradientDescentOptimizer() {}
|
||||
|
||||
virtual void iterate();
|
||||
|
||||
protected:
|
||||
|
||||
virtual const NonlinearOptimizerState& _state() const { return state_; }
|
||||
virtual const NonlinearOptimizerParams& _params() const { return params_; }
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* An implementation of the nonlinear cg method using the template below
|
||||
*/
|
||||
|
||||
class ConjugateGradientOptimizer {
|
||||
|
||||
class NonlinearConjugateGradientOptimizer : public NonlinearOptimizer {
|
||||
/* a class for the nonlinearConjugateGradient template */
|
||||
class System {
|
||||
|
||||
public:
|
||||
|
||||
typedef Values State;
|
||||
typedef VectorValues Gradient;
|
||||
typedef NonlinearOptimizerParams Parameters;
|
||||
|
||||
protected:
|
||||
|
||||
NonlinearFactorGraph graph_;
|
||||
Ordering ordering_;
|
||||
const NonlinearFactorGraph &graph_;
|
||||
const Ordering &ordering_;
|
||||
|
||||
public:
|
||||
|
||||
System(const NonlinearFactorGraph &graph, const Ordering &ordering): graph_(graph), ordering_(ordering) {}
|
||||
double error(const State &state) const ;
|
||||
Gradient gradient(const State &state) const ;
|
||||
|
@ -85,35 +41,32 @@ class ConjugateGradientOptimizer {
|
|||
};
|
||||
|
||||
public:
|
||||
|
||||
typedef NonlinearOptimizer Base;
|
||||
typedef NonlinearConjugateGradientState States;
|
||||
typedef NonlinearOptimizerParams Parameters;
|
||||
typedef boost::shared_ptr<ConjugateGradientOptimizer> shared_ptr;
|
||||
typedef boost::shared_ptr<NonlinearConjugateGradientOptimizer> shared_ptr;
|
||||
|
||||
protected:
|
||||
|
||||
NonlinearFactorGraph graph_;
|
||||
Values initialEstimate_;
|
||||
States state_;
|
||||
Parameters params_;
|
||||
Ordering::shared_ptr ordering_;
|
||||
VectorValues::shared_ptr gradient_;
|
||||
bool cg_;
|
||||
|
||||
public:
|
||||
|
||||
ConjugateGradientOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
|
||||
const Parameters& params = Parameters(), const bool cg = true)
|
||||
: graph_(graph), initialEstimate_(initialValues), params_(params),
|
||||
ordering_(initialValues.orderingArbitrary()),
|
||||
gradient_(new VectorValues(initialValues.zeroVectors(*ordering_))),
|
||||
cg_(cg) {}
|
||||
NonlinearConjugateGradientOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
|
||||
const Parameters& params = Parameters())
|
||||
: Base(graph), state_(graph, initialValues), params_(params), ordering_(initialValues.orderingArbitrary()),
|
||||
gradient_(new VectorValues(initialValues.zeroVectors(*ordering_))){}
|
||||
|
||||
virtual ~ConjugateGradientOptimizer() {}
|
||||
virtual Values optimize () ;
|
||||
virtual ~NonlinearConjugateGradientOptimizer() {}
|
||||
virtual void iterate();
|
||||
virtual const Values& optimize ();
|
||||
virtual const NonlinearOptimizerState& _state() const { return state_; }
|
||||
virtual const NonlinearOptimizerParams& _params() const { return params_; }
|
||||
};
|
||||
|
||||
/**
|
||||
* Implement the golden-section line search algorithm
|
||||
*/
|
||||
/** Implement the golden-section line search algorithm */
|
||||
template <class S, class V, class W>
|
||||
double lineSearch(const S &system, const V currentValues, const W &gradient) {
|
||||
|
||||
|
@ -171,18 +124,20 @@ double lineSearch(const S &system, const V currentValues, const W &gradient) {
|
|||
*
|
||||
* The last parameter is a switch between gradient-descent and conjugate gradient
|
||||
*/
|
||||
|
||||
template <class S, class V>
|
||||
V conjugateGradient(const S &system, const V &initial, const NonlinearOptimizerParams ¶ms, const bool gradientDescent) {
|
||||
boost::tuple<V, size_t> nonlinearConjugateGradient(const S &system, const V &initial, const NonlinearOptimizerParams ¶ms, const bool singleIteration, const bool gradientDescent = false) {
|
||||
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(V);
|
||||
// GTSAM_CONCEPT_MANIFOLD_TYPE(V);
|
||||
|
||||
Index iteration = 0;
|
||||
|
||||
// check if we're already close enough
|
||||
double currentError = system.error(initial);
|
||||
if(currentError <= params.errorTol) {
|
||||
if (params.verbosity >= NonlinearOptimizerParams::ERROR)
|
||||
if (params.verbosity >= NonlinearOptimizerParams::ERROR){
|
||||
std::cout << "Exiting, as error = " << currentError << " < " << params.errorTol << std::endl;
|
||||
return initial;
|
||||
}
|
||||
return boost::tie(initial, iteration);
|
||||
}
|
||||
|
||||
V currentValues = initial;
|
||||
|
@ -194,14 +149,12 @@ V conjugateGradient(const S &system, const V &initial, const NonlinearOptimizerP
|
|||
double alpha = lineSearch(system, currentValues, direction);
|
||||
currentValues = system.advance(prevValues, alpha, direction);
|
||||
currentError = system.error(currentValues);
|
||||
Index iteration = 0;
|
||||
|
||||
// Maybe show output
|
||||
if (params.verbosity >= NonlinearOptimizerParams::ERROR) std::cout << "Initial error: " << currentError << std::endl;
|
||||
|
||||
// Iterative loop
|
||||
do {
|
||||
|
||||
if ( gradientDescent == true) {
|
||||
direction = system.gradient(currentValues);
|
||||
}
|
||||
|
@ -222,13 +175,14 @@ V conjugateGradient(const S &system, const V &initial, const NonlinearOptimizerP
|
|||
// Maybe show output
|
||||
if(params.verbosity >= NonlinearOptimizerParams::ERROR) std::cout << "currentError: " << currentError << std::endl;
|
||||
} while( ++iteration < params.maxIterations &&
|
||||
!singleIteration &&
|
||||
!checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, params.errorTol, prevError, currentError, params.verbosity));
|
||||
|
||||
// Printing if verbose
|
||||
if (params.verbosity >= NonlinearOptimizerParams::ERROR && iteration >= params.maxIterations)
|
||||
std::cout << "Terminating because reached maximum iterations" << std::endl;
|
||||
std::cout << "nonlinearConjugateGradient: Terminating because reached maximum iterations" << std::endl;
|
||||
|
||||
return currentValues;
|
||||
return boost::tie(currentValues, iteration);
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,81 @@
|
|||
/**
|
||||
* @file SuccessiveLinearizationOptimizer.cpp
|
||||
* @brief
|
||||
* @date Jul 24, 2012
|
||||
* @author Yong-Dian Jian
|
||||
*/
|
||||
|
||||
#include <gtsam/nonlinear/SuccessiveLinearizationOptimizer.h>
|
||||
#include <gtsam/inference/EliminationTree.h>
|
||||
#include <gtsam/linear/GaussianJunctionTree.h>
|
||||
#include <gtsam/linear/SimpleSPCGSolver.h>
|
||||
#include <gtsam/linear/SubgraphSolver.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
#include <stdexcept>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
void SuccessiveLinearizationParams::print(const std::string& str) const {
|
||||
NonlinearOptimizerParams::print(str);
|
||||
switch ( linearSolverType ) {
|
||||
case MULTIFRONTAL_CHOLESKY:
|
||||
std::cout << " linear solver type: MULTIFRONTAL CHOLESKY\n";
|
||||
break;
|
||||
case MULTIFRONTAL_QR:
|
||||
std::cout << " linear solver type: MULTIFRONTAL QR\n";
|
||||
break;
|
||||
case SEQUENTIAL_CHOLESKY:
|
||||
std::cout << " linear solver type: SEQUENTIAL CHOLESKY\n";
|
||||
break;
|
||||
case SEQUENTIAL_QR:
|
||||
std::cout << " linear solver type: SEQUENTIAL QR\n";
|
||||
break;
|
||||
case CHOLMOD:
|
||||
std::cout << " linear solver type: CHOLMOD\n";
|
||||
break;
|
||||
case CG:
|
||||
std::cout << " linear solver type: CG\n";
|
||||
break;
|
||||
default:
|
||||
std::cout << " linear solver type: (invalid)\n";
|
||||
break;
|
||||
}
|
||||
|
||||
if(ordering)
|
||||
std::cout << " ordering: custom\n";
|
||||
else
|
||||
std::cout << " ordering: COLAMD\n";
|
||||
|
||||
std::cout.flush();
|
||||
}
|
||||
|
||||
VectorValues solveGaussianFactorGraph(const GaussianFactorGraph &gfg, const SuccessiveLinearizationParams ¶ms) {
|
||||
VectorValues delta;
|
||||
if ( params.isMultifrontal() ) {
|
||||
delta = GaussianJunctionTree(gfg).optimize(params.getEliminationFunction());
|
||||
}
|
||||
else if ( params.isSequential() ) {
|
||||
delta = gtsam::optimize(*EliminationTree<GaussianFactor>::Create(gfg)->eliminate(params.getEliminationFunction()));
|
||||
}
|
||||
else if ( params.isCG() ) {
|
||||
if ( !params.iterativeParams ) throw std::runtime_error("solveGaussianFactorGraph: cg parameter has to be assigned ...");
|
||||
if ( boost::dynamic_pointer_cast<SimpleSPCGSolverParameters>(params.iterativeParams)) {
|
||||
SimpleSPCGSolver solver (gfg, *boost::dynamic_pointer_cast<SimpleSPCGSolverParameters>(params.iterativeParams));
|
||||
delta = solver.optimize();
|
||||
}
|
||||
else if ( boost::dynamic_pointer_cast<SubgraphSolverParameters>(params.iterativeParams) ) {
|
||||
SubgraphSolver solver (gfg, *boost::dynamic_pointer_cast<SubgraphSolverParameters>(params.iterativeParams));
|
||||
delta = solver.optimize();
|
||||
}
|
||||
else {
|
||||
throw std::runtime_error("solveGaussianFactorGraph: special cg parameter type is not handled in LM solver ...");
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw std::runtime_error("solveGaussianFactorGraph: Optimization parameter is invalid");
|
||||
}
|
||||
return delta;
|
||||
}
|
||||
|
||||
}
|
|
@ -40,43 +40,8 @@ public:
|
|||
IterativeOptimizationParameters::shared_ptr iterativeParams; ///< The container for iterativeOptimization parameters. used in CG Solvers.
|
||||
|
||||
SuccessiveLinearizationParams() : linearSolverType(MULTIFRONTAL_CHOLESKY) {}
|
||||
|
||||
virtual ~SuccessiveLinearizationParams() {}
|
||||
|
||||
virtual void print(const std::string& str = "") const {
|
||||
NonlinearOptimizerParams::print(str);
|
||||
switch ( linearSolverType ) {
|
||||
case MULTIFRONTAL_CHOLESKY:
|
||||
std::cout << " linear solver type: MULTIFRONTAL CHOLESKY\n";
|
||||
break;
|
||||
case MULTIFRONTAL_QR:
|
||||
std::cout << " linear solver type: MULTIFRONTAL QR\n";
|
||||
break;
|
||||
case SEQUENTIAL_CHOLESKY:
|
||||
std::cout << " linear solver type: SEQUENTIAL CHOLESKY\n";
|
||||
break;
|
||||
case SEQUENTIAL_QR:
|
||||
std::cout << " linear solver type: SEQUENTIAL QR\n";
|
||||
break;
|
||||
case CHOLMOD:
|
||||
std::cout << " linear solver type: CHOLMOD\n";
|
||||
break;
|
||||
case CG:
|
||||
std::cout << " linear solver type: CG\n";
|
||||
break;
|
||||
default:
|
||||
std::cout << " linear solver type: (invalid)\n";
|
||||
break;
|
||||
}
|
||||
|
||||
if(ordering)
|
||||
std::cout << " ordering: custom\n";
|
||||
else
|
||||
std::cout << " ordering: COLAMD\n";
|
||||
|
||||
std::cout.flush();
|
||||
}
|
||||
|
||||
inline bool isMultifrontal() const {
|
||||
return (linearSolverType == MULTIFRONTAL_CHOLESKY) || (linearSolverType == MULTIFRONTAL_QR);
|
||||
}
|
||||
|
@ -93,7 +58,9 @@ public:
|
|||
return (linearSolverType == CG);
|
||||
}
|
||||
|
||||
GaussianFactorGraph::Eliminate getEliminationFunction() {
|
||||
virtual void print(const std::string& str) const;
|
||||
|
||||
GaussianFactorGraph::Eliminate getEliminationFunction() const {
|
||||
switch (linearSolverType) {
|
||||
case MULTIFRONTAL_CHOLESKY:
|
||||
case SEQUENTIAL_CHOLESKY:
|
||||
|
@ -111,4 +78,7 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
/* a wrapper for solving a GaussianFactorGraph according to the parameters */
|
||||
VectorValues solveGaussianFactorGraph(const GaussianFactorGraph &gfg, const SuccessiveLinearizationParams ¶ms) ;
|
||||
|
||||
} /* namespace gtsam */
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*/
|
||||
|
||||
#include <gtsam/slam/pose2SLAM.h>
|
||||
#include <gtsam/nonlinear/GradientDescentOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
|
@ -51,31 +51,6 @@ boost::tuple<pose2SLAM::Graph, Values> generateProblem() {
|
|||
return boost::tie(graph, initialEstimate);
|
||||
}
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(optimize, GradientDescentOptimizer) {
|
||||
|
||||
pose2SLAM::Graph graph ;
|
||||
pose2SLAM::Values initialEstimate;
|
||||
|
||||
boost::tie(graph, initialEstimate) = generateProblem();
|
||||
// cout << "initial error = " << graph.error(initialEstimate) << endl ;
|
||||
|
||||
// Single Step Optimization using Levenberg-Marquardt
|
||||
NonlinearOptimizerParams param;
|
||||
param.maxIterations = 500; /* requires a larger number of iterations to converge */
|
||||
param.verbosity = NonlinearOptimizerParams::SILENT;
|
||||
|
||||
GradientDescentOptimizer optimizer(graph, initialEstimate, param);
|
||||
Values result = optimizer.optimize();
|
||||
// cout << "gd1 solver final error = " << graph.error(result) << endl;
|
||||
|
||||
/* the optimality of the solution is not comparable to the */
|
||||
DOUBLES_EQUAL(0.0, graph.error(result), 1e-2);
|
||||
|
||||
CHECK(1);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(optimize, ConjugateGradientOptimizer) {
|
||||
|
||||
|
@ -90,8 +65,7 @@ TEST(optimize, ConjugateGradientOptimizer) {
|
|||
param.maxIterations = 500; /* requires a larger number of iterations to converge */
|
||||
param.verbosity = NonlinearOptimizerParams::SILENT;
|
||||
|
||||
|
||||
ConjugateGradientOptimizer optimizer(graph, initialEstimate, param, true);
|
||||
NonlinearConjugateGradientOptimizer optimizer(graph, initialEstimate, param);
|
||||
Values result = optimizer.optimize();
|
||||
// cout << "cg final error = " << graph.error(result) << endl;
|
||||
|
||||
|
@ -99,32 +73,6 @@ TEST(optimize, ConjugateGradientOptimizer) {
|
|||
DOUBLES_EQUAL(0.0, graph.error(result), 1e-2);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(optimize, GradientDescentOptimizer2) {
|
||||
|
||||
pose2SLAM::Graph graph ;
|
||||
pose2SLAM::Values initialEstimate;
|
||||
|
||||
boost::tie(graph, initialEstimate) = generateProblem();
|
||||
// cout << "initial error = " << graph.error(initialEstimate) << endl ;
|
||||
|
||||
// Single Step Optimization using Levenberg-Marquardt
|
||||
NonlinearOptimizerParams param;
|
||||
param.maxIterations = 500; /* requires a larger number of iterations to converge */
|
||||
param.verbosity = NonlinearOptimizerParams::SILENT;
|
||||
|
||||
|
||||
ConjugateGradientOptimizer optimizer(graph, initialEstimate, param, false);
|
||||
Values result = optimizer.optimize();
|
||||
// cout << "gd2 solver final error = " << graph.error(result) << endl;
|
||||
|
||||
/* the optimality of the solution is not comparable to the */
|
||||
DOUBLES_EQUAL(0.0, graph.error(result), 1e-2);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
|
||||
/* ************************************************************************* */
|
||||
|
|
Loading…
Reference in New Issue