Made NonlinearConjugateGradientOptimizer compile
parent
55ce9eed1d
commit
90b1349f23
|
@ -5,13 +5,10 @@
|
|||
* @date Jun 11, 2012
|
||||
*/
|
||||
|
||||
#if 0
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h>
|
||||
#include <gtsam/nonlinear/OrderingOrdered.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/linear/GaussianFactorGraphOrdered.h>
|
||||
#include <gtsam/linear/VectorValuesOrdered.h>
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
|
||||
#include <cmath>
|
||||
|
||||
|
@ -21,18 +18,10 @@ namespace gtsam {
|
|||
|
||||
/* Return the gradient vector of a nonlinear factor given a linearization point and a variable ordering
|
||||
* Can be moved to NonlinearFactorGraph.h if desired */
|
||||
void gradientInPlace(const NonlinearFactorGraph &nfg, const Values &values, const OrderingOrdered &ordering, VectorValuesOrdered &g) {
|
||||
VectorValues gradientInPlace(const NonlinearFactorGraph &nfg, const Values &values) {
|
||||
// Linearize graph
|
||||
GaussianFactorGraphOrdered::shared_ptr linear = nfg.linearize(values, ordering);
|
||||
FactorGraphOrdered<JacobianFactorOrdered> jfg; jfg.reserve(linear->size());
|
||||
BOOST_FOREACH(const GaussianFactorGraphOrdered::sharedFactor& factor, *linear) {
|
||||
if(boost::shared_ptr<JacobianFactorOrdered> jf = boost::dynamic_pointer_cast<JacobianFactorOrdered>(factor))
|
||||
jfg.push_back((jf));
|
||||
else
|
||||
jfg.push_back(boost::make_shared<JacobianFactorOrdered>(*factor));
|
||||
}
|
||||
// compute the gradient direction
|
||||
gradientAtZero(jfg, g);
|
||||
GaussianFactorGraph::shared_ptr linear = nfg.linearize(values);
|
||||
return linear->gradientAtZero();
|
||||
}
|
||||
|
||||
double NonlinearConjugateGradientOptimizer::System::error(const State &state) const {
|
||||
|
@ -40,29 +29,26 @@ double NonlinearConjugateGradientOptimizer::System::error(const State &state) co
|
|||
}
|
||||
|
||||
NonlinearConjugateGradientOptimizer::System::Gradient NonlinearConjugateGradientOptimizer::System::gradient(const State &state) const {
|
||||
Gradient result = state.zeroVectors(ordering_);
|
||||
gradientInPlace(graph_, state, ordering_, result);
|
||||
return result;
|
||||
return gradientInPlace(graph_, state);
|
||||
}
|
||||
NonlinearConjugateGradientOptimizer::System::State NonlinearConjugateGradientOptimizer::System::advance(const State ¤t, const double alpha, const Gradient &g) const {
|
||||
Gradient step = g;
|
||||
scal(alpha, step);
|
||||
return current.retract(step, ordering_);
|
||||
step *= alpha;
|
||||
return current.retract(step);
|
||||
}
|
||||
|
||||
void NonlinearConjugateGradientOptimizer::iterate() {
|
||||
size_t dummy ;
|
||||
boost::tie(state_.values, dummy) = nonlinearConjugateGradient<System, Values>(System(graph_, *ordering_), state_.values, params_, true /* single iterations */);
|
||||
boost::tie(state_.values, dummy) = nonlinearConjugateGradient<System, Values>(System(graph_), state_.values, params_, true /* single iterations */);
|
||||
++state_.iterations;
|
||||
state_.error = graph_.error(state_.values);
|
||||
}
|
||||
|
||||
const Values& NonlinearConjugateGradientOptimizer::optimize() {
|
||||
boost::tie(state_.values, state_.iterations) = nonlinearConjugateGradient<System, Values>(System(graph_, *ordering_), state_.values, params_, false /* up to convergent */);
|
||||
boost::tie(state_.values, state_.iterations) = nonlinearConjugateGradient<System, Values>(System(graph_), state_.values, params_, false /* up to convergent */);
|
||||
state_.error = graph_.error(state_.values);
|
||||
return state_.values;
|
||||
}
|
||||
|
||||
} /* namespace gtsam */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -7,8 +7,6 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#if 0
|
||||
|
||||
#include <gtsam/base/Manifold.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
#include <boost/tuple/tuple.hpp>
|
||||
|
@ -28,15 +26,14 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimiz
|
|||
class System {
|
||||
public:
|
||||
typedef Values State;
|
||||
typedef VectorValuesOrdered Gradient;
|
||||
typedef VectorValues Gradient;
|
||||
typedef NonlinearOptimizerParams Parameters;
|
||||
|
||||
protected:
|
||||
const NonlinearFactorGraph &graph_;
|
||||
const OrderingOrdered &ordering_;
|
||||
|
||||
public:
|
||||
System(const NonlinearFactorGraph &graph, const OrderingOrdered &ordering): graph_(graph), ordering_(ordering) {}
|
||||
System(const NonlinearFactorGraph &graph): graph_(graph) {}
|
||||
double error(const State &state) const ;
|
||||
Gradient gradient(const State &state) const ;
|
||||
State advance(const State ¤t, const double alpha, const Gradient &g) const ;
|
||||
|
@ -51,15 +48,12 @@ public:
|
|||
protected:
|
||||
States state_;
|
||||
Parameters params_;
|
||||
OrderingOrdered::shared_ptr ordering_;
|
||||
VectorValuesOrdered::shared_ptr gradient_;
|
||||
|
||||
public:
|
||||
|
||||
NonlinearConjugateGradientOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
|
||||
const Parameters& params = Parameters())
|
||||
: Base(graph), state_(graph, initialValues), params_(params), ordering_(initialValues.orderingArbitrary()),
|
||||
gradient_(new VectorValuesOrdered(initialValues.zeroVectors(*ordering_))){}
|
||||
: Base(graph), state_(graph, initialValues), params_(params) {}
|
||||
|
||||
virtual ~NonlinearConjugateGradientOptimizer() {}
|
||||
virtual void iterate();
|
||||
|
@ -189,4 +183,3 @@ boost::tuple<V, size_t> nonlinearConjugateGradient(const S &system, const V &ini
|
|||
|
||||
}
|
||||
|
||||
#endif
|
|
@ -31,20 +31,20 @@ boost::tuple<NonlinearFactorGraph, Values> generateProblem() {
|
|||
// 2a. Add Gaussian prior
|
||||
Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin
|
||||
SharedDiagonal priorNoise = noiseModel::Diagonal::Sigmas(Vector_(3, 0.3, 0.3, 0.1));
|
||||
graph.add(PriorFactor<Pose2>(1, priorMean, priorNoise));
|
||||
graph += PriorFactor<Pose2>(1, priorMean, priorNoise);
|
||||
|
||||
// 2b. Add odometry factors
|
||||
SharedDiagonal odometryNoise = noiseModel::Diagonal::Sigmas(Vector_(3, 0.2, 0.2, 0.1));
|
||||
graph.add(BetweenFactor<Pose2>(1, 2, Pose2(2.0, 0.0, 0.0 ), odometryNoise));
|
||||
graph.add(BetweenFactor<Pose2>(2, 3, Pose2(2.0, 0.0, M_PI_2), odometryNoise));
|
||||
graph.add(BetweenFactor<Pose2>(3, 4, Pose2(2.0, 0.0, M_PI_2), odometryNoise));
|
||||
graph.add(BetweenFactor<Pose2>(4, 5, Pose2(2.0, 0.0, M_PI_2), odometryNoise));
|
||||
graph += BetweenFactor<Pose2>(1, 2, Pose2(2.0, 0.0, 0.0 ), odometryNoise);
|
||||
graph += BetweenFactor<Pose2>(2, 3, Pose2(2.0, 0.0, M_PI_2), odometryNoise);
|
||||
graph += BetweenFactor<Pose2>(3, 4, Pose2(2.0, 0.0, M_PI_2), odometryNoise);
|
||||
graph += BetweenFactor<Pose2>(4, 5, Pose2(2.0, 0.0, M_PI_2), odometryNoise);
|
||||
|
||||
// 2c. Add pose constraint
|
||||
SharedDiagonal constraintUncertainty = noiseModel::Diagonal::Sigmas(Vector_(3, 0.2, 0.2, 0.1));
|
||||
graph.add(BetweenFactor<Pose2>(5, 2, Pose2(2.0, 0.0, M_PI_2), constraintUncertainty));
|
||||
graph += BetweenFactor<Pose2>(5, 2, Pose2(2.0, 0.0, M_PI_2), constraintUncertainty);
|
||||
|
||||
// 3. Create the data structure to hold the initialEstimate estinmate to the solution
|
||||
// 3. Create the data structure to hold the initialEstimate estimate to the solution
|
||||
Values initialEstimate;
|
||||
Pose2 x1(0.5, 0.0, 0.2 ); initialEstimate.insert(1, x1);
|
||||
Pose2 x2(2.3, 0.1,-0.2 ); initialEstimate.insert(2, x2);
|
||||
|
|
Loading…
Reference in New Issue