Merge branch 'master' into retraction_name
parent
2b9a3db085
commit
1ec7d7e86e
22
gtsam.h
22
gtsam.h
|
@ -157,18 +157,18 @@ class PlanarSLAMGraph {
|
||||||
|
|
||||||
void print(string s) const;
|
void print(string s) const;
|
||||||
|
|
||||||
double error(const PlanarSLAMValues& c) const;
|
double error(const PlanarSLAMValues& values) const;
|
||||||
Ordering* orderingCOLAMD(const PlanarSLAMValues& config) const;
|
Ordering* orderingCOLAMD(const PlanarSLAMValues& values) const;
|
||||||
GaussianFactorGraph* linearize(const PlanarSLAMValues& config,
|
GaussianFactorGraph* linearize(const PlanarSLAMValues& values,
|
||||||
const Ordering& ordering) const;
|
const Ordering& ordering) const;
|
||||||
|
|
||||||
void addPrior(int i, const Pose2& p, const SharedNoiseModel& model);
|
void addPrior(int key, const Pose2& pose, const SharedNoiseModel& noiseModel);
|
||||||
void addPoseConstraint(int i, const Pose2& p);
|
void addPoseConstraint(int key, const Pose2& pose);
|
||||||
void addOdometry(int i, int j, const Pose2& z, const SharedNoiseModel& model);
|
void addOdometry(int key1, int key2, const Pose2& odometry, const SharedNoiseModel& noiseModel);
|
||||||
void addBearing(int i, int j, const Rot2& z, const SharedNoiseModel& model);
|
void addBearing(int poseKey, int pointKey, const Rot2& bearing, const SharedNoiseModel& noiseModel);
|
||||||
void addRange(int i, int j, double z, const SharedNoiseModel& model);
|
void addRange(int poseKey, int pointKey, double range, const SharedNoiseModel& noiseModel);
|
||||||
void addBearingRange(int i, int j, const Rot2& z1, double z2,
|
void addBearingRange(int poseKey, int pointKey, const Rot2& bearing, double range,
|
||||||
const SharedNoiseModel& model);
|
const SharedNoiseModel& noiseModel);
|
||||||
PlanarSLAMValues* optimize_(const PlanarSLAMValues& initialEstimate);
|
PlanarSLAMValues* optimize_(const PlanarSLAMValues& initialEstimate);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -176,5 +176,5 @@ class PlanarSLAMOdometry {
|
||||||
PlanarSLAMOdometry(int key1, int key2, const Pose2& measured,
|
PlanarSLAMOdometry(int key1, int key2, const Pose2& measured,
|
||||||
const SharedNoiseModel& model);
|
const SharedNoiseModel& model);
|
||||||
void print(string s) const;
|
void print(string s) const;
|
||||||
GaussianFactor* linearize(const PlanarSLAMValues& c, const Ordering& ordering) const;
|
GaussianFactor* linearize(const PlanarSLAMValues& center, const Ordering& ordering) const;
|
||||||
};
|
};
|
||||||
|
|
|
@ -43,6 +43,9 @@ public:
|
||||||
/** Constructor from size */
|
/** Constructor from size */
|
||||||
FastVector(size_t size) : Base(size) {}
|
FastVector(size_t size) : Base(size) {}
|
||||||
|
|
||||||
|
/** Constructor from size and initial values */
|
||||||
|
FastVector(size_t size, const VALUE& initial) : Base(size, initial) {}
|
||||||
|
|
||||||
/** Constructor from a range, passes through to base class */
|
/** Constructor from a range, passes through to base class */
|
||||||
template<typename INPUTITERATOR>
|
template<typename INPUTITERATOR>
|
||||||
FastVector(INPUTITERATOR first, INPUTITERATOR last) : Base(first, last) {}
|
FastVector(INPUTITERATOR first, INPUTITERATOR last) : Base(first, last) {}
|
||||||
|
|
|
@ -22,10 +22,14 @@
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
#include <deque>
|
#include <deque>
|
||||||
|
#include <boost/function.hpp>
|
||||||
|
#include <boost/shared_ptr.hpp>
|
||||||
|
|
||||||
#include <gtsam/base/types.h>
|
#include <gtsam/base/types.h>
|
||||||
|
#include <gtsam/base/FastVector.h>
|
||||||
#include <gtsam/inference/FactorGraph.h>
|
#include <gtsam/inference/FactorGraph.h>
|
||||||
#include <gtsam/inference/BayesNet.h>
|
#include <gtsam/inference/BayesNet.h>
|
||||||
|
#include <gtsam/linear/VectorValues.h>
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
|
@ -351,4 +355,31 @@ namespace gtsam {
|
||||||
|
|
||||||
}; // BayesTree
|
}; // BayesTree
|
||||||
|
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
template<class CONDITIONAL>
|
||||||
|
void _BayesTree_dim_adder(
|
||||||
|
std::vector<size_t>& dims,
|
||||||
|
const typename BayesTree<CONDITIONAL>::sharedClique& clique) {
|
||||||
|
|
||||||
|
if(clique) {
|
||||||
|
// Add dims from this clique
|
||||||
|
for(typename CONDITIONAL::const_iterator it = (*clique)->beginFrontals(); it != (*clique)->endFrontals(); ++it)
|
||||||
|
dims[*it] = (*clique)->dim(it);
|
||||||
|
|
||||||
|
// Traverse children
|
||||||
|
BOOST_FOREACH(const typename BayesTree<CONDITIONAL>::sharedClique& child, clique->children()) {
|
||||||
|
_BayesTree_dim_adder<CONDITIONAL>(dims, child);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
template<class CONDITIONAL>
|
||||||
|
boost::shared_ptr<VectorValues> allocateVectorValues(const BayesTree<CONDITIONAL>& bt) {
|
||||||
|
std::vector<size_t> dimensions(bt.nodes().size(), 0);
|
||||||
|
_BayesTree_dim_adder<CONDITIONAL>(dimensions, bt.root());
|
||||||
|
return boost::shared_ptr<VectorValues>(new VectorValues(dimensions));
|
||||||
|
}
|
||||||
|
|
||||||
} /// namespace gtsam
|
} /// namespace gtsam
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
|
|
||||||
#include <gtsam/inference/FactorGraph.h>
|
#include <gtsam/inference/FactorGraph.h>
|
||||||
#include <gtsam/inference/graph-inl.h>
|
#include <gtsam/inference/graph-inl.h>
|
||||||
|
#include <gtsam/inference/BayesTree-inl.h>
|
||||||
#include <gtsam/base/DSF.h>
|
#include <gtsam/base/DSF.h>
|
||||||
|
|
||||||
#include <boost/foreach.hpp>
|
#include <boost/foreach.hpp>
|
||||||
|
@ -124,5 +125,30 @@ namespace gtsam {
|
||||||
return typename DERIVED::shared_ptr(new DERIVED(keysBegin, keysEnd));
|
return typename DERIVED::shared_ptr(new DERIVED(keysBegin, keysEnd));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
template<class FACTOR, class CONDITIONAL>
|
||||||
|
void _FactorGraph_BayesTree_adder(
|
||||||
|
vector<typename FactorGraph<FACTOR>::sharedFactor>& factors,
|
||||||
|
const typename BayesTree<CONDITIONAL>::sharedClique& clique) {
|
||||||
|
|
||||||
|
if(clique) {
|
||||||
|
// Add factor from this clique
|
||||||
|
factors.push_back((*clique)->toFactor());
|
||||||
|
|
||||||
|
// Traverse children
|
||||||
|
BOOST_FOREACH(const typename BayesTree<CONDITIONAL>::sharedClique& child, clique->children()) {
|
||||||
|
_FactorGraph_BayesTree_adder<FACTOR,CONDITIONAL>(factors, child);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
template<class FACTOR>
|
||||||
|
template<class CONDITIONAL>
|
||||||
|
FactorGraph<FACTOR>::FactorGraph(const BayesTree<CONDITIONAL>& bayesTree) {
|
||||||
|
factors_.reserve(bayesTree.size());
|
||||||
|
_FactorGraph_BayesTree_adder<FACTOR,CONDITIONAL>(factors_, bayesTree.root());
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
} // namespace gtsam
|
} // namespace gtsam
|
||||||
|
|
|
@ -32,6 +32,9 @@
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
|
// Forward declarations
|
||||||
|
template<class CONDITIONAL> class BayesTree;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A factor graph is a bipartite graph with factor nodes connected to variable nodes.
|
* A factor graph is a bipartite graph with factor nodes connected to variable nodes.
|
||||||
* In this class, however, only factor nodes are kept around.
|
* In this class, however, only factor nodes are kept around.
|
||||||
|
@ -74,6 +77,10 @@ namespace gtsam {
|
||||||
template<class CONDITIONAL>
|
template<class CONDITIONAL>
|
||||||
FactorGraph(const BayesNet<CONDITIONAL>& bayesNet);
|
FactorGraph(const BayesNet<CONDITIONAL>& bayesNet);
|
||||||
|
|
||||||
|
/** convert from Bayes net */
|
||||||
|
template<class CONDITIONAL>
|
||||||
|
FactorGraph(const BayesTree<CONDITIONAL>& bayesTree);
|
||||||
|
|
||||||
/** convert from a derived type */
|
/** convert from a derived type */
|
||||||
template<class DERIVEDFACTOR>
|
template<class DERIVEDFACTOR>
|
||||||
FactorGraph(const FactorGraph<DERIVEDFACTOR>& factors) { factors_.insert(end(), factors.begin(), factors.end()); }
|
FactorGraph(const FactorGraph<DERIVEDFACTOR>& factors) { factors_.insert(end(), factors.begin(), factors.end()); }
|
||||||
|
@ -205,7 +212,7 @@ namespace gtsam {
|
||||||
template<class FACTORGRAPH>
|
template<class FACTORGRAPH>
|
||||||
FACTORGRAPH combine(const FACTORGRAPH& fg1, const FACTORGRAPH& fg2);
|
FACTORGRAPH combine(const FACTORGRAPH& fg1, const FACTORGRAPH& fg2);
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* These functions are defined here because they are templated on an
|
* These functions are defined here because they are templated on an
|
||||||
* additional parameter. Putting them in the -inl.h file would mean these
|
* additional parameter. Putting them in the -inl.h file would mean these
|
||||||
* would have to be explicitly instantiated for any possible derived factor
|
* would have to be explicitly instantiated for any possible derived factor
|
||||||
|
|
|
@ -25,8 +25,12 @@
|
||||||
#include <gtsam/base/FastVector.h>
|
#include <gtsam/base/FastVector.h>
|
||||||
#include <gtsam/linear/HessianFactor.h>
|
#include <gtsam/linear/HessianFactor.h>
|
||||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||||
#include <gtsam/inference/VariableSlots.h>
|
#include <gtsam/inference/BayesTree-inl.h>
|
||||||
#include <gtsam/inference/FactorGraph-inl.h>
|
#include <gtsam/inference/FactorGraph-inl.h>
|
||||||
|
#include <gtsam/inference/VariableSlots.h>
|
||||||
|
#include <gtsam/base/debug.h>
|
||||||
|
#include <gtsam/base/timing.h>
|
||||||
|
#include <gtsam/base/cholesky.h>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
|
@ -36,9 +40,10 @@ namespace gtsam {
|
||||||
INSTANTIATE_FACTOR_GRAPH(GaussianFactor);
|
INSTANTIATE_FACTOR_GRAPH(GaussianFactor);
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
GaussianFactorGraph::GaussianFactorGraph(const GaussianBayesNet& CBN) :
|
GaussianFactorGraph::GaussianFactorGraph(const GaussianBayesNet& CBN) : Base(CBN) {}
|
||||||
FactorGraph<GaussianFactor> (CBN) {
|
|
||||||
}
|
/* ************************************************************************* */
|
||||||
|
GaussianFactorGraph::GaussianFactorGraph(const BayesTree<GaussianConditional>& GBT) : Base(GBT) {}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
GaussianFactorGraph::Keys GaussianFactorGraph::keys() const {
|
GaussianFactorGraph::Keys GaussianFactorGraph::keys() const {
|
||||||
|
|
|
@ -72,6 +72,11 @@ namespace gtsam {
|
||||||
*/
|
*/
|
||||||
GaussianFactorGraph(const GaussianBayesNet& CBN);
|
GaussianFactorGraph(const GaussianBayesNet& CBN);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor that receives a BayesTree and returns a GaussianFactorGraph
|
||||||
|
*/
|
||||||
|
GaussianFactorGraph(const BayesTree<GaussianConditional>& GBT);
|
||||||
|
|
||||||
/** Constructor from a factor graph of GaussianFactor or a derived type */
|
/** Constructor from a factor graph of GaussianFactor or a derived type */
|
||||||
template<class DERIVEDFACTOR>
|
template<class DERIVEDFACTOR>
|
||||||
GaussianFactorGraph(const FactorGraph<DERIVEDFACTOR>& fg) {
|
GaussianFactorGraph(const FactorGraph<DERIVEDFACTOR>& fg) {
|
||||||
|
|
|
@ -51,32 +51,44 @@ Matrix GaussianISAM::marginalCovariance(Index j) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void optimize(const GaussianISAM::sharedClique& clique, VectorValues& result) {
|
void optimize(const BayesTree<GaussianConditional>::sharedClique& clique, VectorValues& result) {
|
||||||
// parents are assumed to already be solved and available in result
|
// parents are assumed to already be solved and available in result
|
||||||
// RHS for current conditional should already be in place in result
|
// RHS for current conditional should already be in place in result
|
||||||
clique->conditional()->solveInPlace(result);
|
clique->conditional()->solveInPlace(result);
|
||||||
|
|
||||||
BOOST_FOREACH(const GaussianISAM::sharedClique& child, clique->children_)
|
BOOST_FOREACH(const BayesTree<GaussianConditional>::sharedClique& child, clique->children_)
|
||||||
optimize(child, result);
|
optimize(child, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void GaussianISAM::treeRHS(const GaussianISAM::sharedClique& clique, VectorValues& result) {
|
void treeRHS(const BayesTree<GaussianConditional>::sharedClique& clique, VectorValues& result) {
|
||||||
clique->conditional()->rhs(result);
|
clique->conditional()->rhs(result);
|
||||||
BOOST_FOREACH(const GaussianISAM::sharedClique& child, clique->children_)
|
BOOST_FOREACH(const BayesTree<GaussianConditional>::sharedClique& child, clique->children_)
|
||||||
treeRHS(child, result);
|
treeRHS(child, result);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
VectorValues GaussianISAM::rhs(const GaussianISAM& bayesTree) {
|
VectorValues rhs(const BayesTree<GaussianConditional>& bayesTree, boost::optional<const GaussianISAM::Dims&> dims) {
|
||||||
VectorValues result(bayesTree.dims_); // allocate
|
VectorValues result;
|
||||||
|
if(dims)
|
||||||
|
result = VectorValues(*dims);
|
||||||
|
else
|
||||||
|
result = *allocateVectorValues(bayesTree); // allocate
|
||||||
treeRHS(bayesTree.root(), result); // recursively fill
|
treeRHS(bayesTree.root(), result); // recursively fill
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
VectorValues optimize(const GaussianISAM& bayesTree) {
|
VectorValues optimize(const GaussianISAM& isam) {
|
||||||
VectorValues result = GaussianISAM::rhs(bayesTree);
|
VectorValues result = rhs(isam, isam.dims_);
|
||||||
|
// starting from the root, call optimize on each conditional
|
||||||
|
optimize(isam.root(), result);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
VectorValues optimize(const BayesTree<GaussianConditional>& bayesTree) {
|
||||||
|
VectorValues result = rhs(bayesTree);
|
||||||
// starting from the root, call optimize on each conditional
|
// starting from the root, call optimize on each conditional
|
||||||
optimize(bayesTree.root(), result);
|
optimize(bayesTree.root(), result);
|
||||||
return result;
|
return result;
|
||||||
|
|
|
@ -19,6 +19,8 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <boost/optional.hpp>
|
||||||
|
|
||||||
#include <gtsam/inference/ISAM.h>
|
#include <gtsam/inference/ISAM.h>
|
||||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||||
#include <gtsam/linear/GaussianJunctionTree.h>
|
#include <gtsam/linear/GaussianJunctionTree.h>
|
||||||
|
@ -32,6 +34,8 @@ class GaussianISAM : public ISAM<GaussianConditional> {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
typedef std::deque<size_t, boost::fast_pool_allocator<size_t> > Dims;
|
||||||
|
|
||||||
/** Create an empty Bayes Tree */
|
/** Create an empty Bayes Tree */
|
||||||
GaussianISAM() : Super() {}
|
GaussianISAM() : Super() {}
|
||||||
|
|
||||||
|
@ -84,20 +88,21 @@ public:
|
||||||
/** return the conditional P(S|Root) on the separator given the root */
|
/** return the conditional P(S|Root) on the separator given the root */
|
||||||
static BayesNet<GaussianConditional> shortcut(sharedClique clique, sharedClique root);
|
static BayesNet<GaussianConditional> shortcut(sharedClique clique, sharedClique root);
|
||||||
|
|
||||||
/** load a VectorValues with the RHS of the system for backsubstitution */
|
|
||||||
static VectorValues rhs(const GaussianISAM& bayesTree);
|
|
||||||
|
|
||||||
protected:
|
|
||||||
|
|
||||||
/** recursively load RHS for system */
|
|
||||||
static void treeRHS(const GaussianISAM::sharedClique& clique, VectorValues& result);
|
|
||||||
|
|
||||||
}; // \class GaussianISAM
|
}; // \class GaussianISAM
|
||||||
|
|
||||||
|
/** load a VectorValues with the RHS of the system for backsubstitution */
|
||||||
|
VectorValues rhs(const BayesTree<GaussianConditional>& bayesTree, boost::optional<const GaussianISAM::Dims&> dims = boost::none);
|
||||||
|
|
||||||
|
/** recursively load RHS for system */
|
||||||
|
void treeRHS(const BayesTree<GaussianConditional>::sharedClique& clique, VectorValues& result);
|
||||||
|
|
||||||
// recursively optimize this conditional and all subtrees
|
// recursively optimize this conditional and all subtrees
|
||||||
void optimize(const GaussianISAM::sharedClique& clique, VectorValues& result);
|
void optimize(const BayesTree<GaussianConditional>::sharedClique& clique, VectorValues& result);
|
||||||
|
|
||||||
// optimize the BayesTree, starting from the root
|
// optimize the BayesTree, starting from the root
|
||||||
VectorValues optimize(const GaussianISAM& bayesTree);
|
VectorValues optimize(const GaussianISAM& isam);
|
||||||
|
|
||||||
|
// optimize the BayesTree, starting from the root
|
||||||
|
VectorValues optimize(const BayesTree<GaussianConditional>& bayesTree);
|
||||||
|
|
||||||
} // \namespace gtsam
|
} // \namespace gtsam
|
||||||
|
|
|
@ -56,11 +56,11 @@ namespace gtsam {
|
||||||
*
|
*
|
||||||
* Letting \f$ h(x) \f$ be a \a linear measurement prediction function, \f$ z \f$ be
|
* Letting \f$ h(x) \f$ be a \a linear measurement prediction function, \f$ z \f$ be
|
||||||
* the actual observed measurement, the residual is
|
* the actual observed measurement, the residual is
|
||||||
* \f[ f(x) = h(x) - z \text{.} \f]
|
* \f[ f(x) = h(x) - z . \f]
|
||||||
* If we expect noise with diagonal covariance matrix \f$ \Sigma \f$ on this
|
* If we expect noise with diagonal covariance matrix \f$ \Sigma \f$ on this
|
||||||
* measurement, then the negative log-likelihood of the Gaussian induced by this
|
* measurement, then the negative log-likelihood of the Gaussian induced by this
|
||||||
* measurement model is
|
* measurement model is
|
||||||
* \f[ E(x) = \frac{1}{2} (h(x) - z)^T \Sigma^{-1} (h(x) - z) \text. \f]
|
* \f[ E(x) = \frac{1}{2} (h(x) - z)^T \Sigma^{-1} (h(x) - z) . \f]
|
||||||
* Because \f$ h(x) \f$ is linear, we can write it as
|
* Because \f$ h(x) \f$ is linear, we can write it as
|
||||||
* \f[ h(x) = Ax + e \f]
|
* \f[ h(x) = Ax + e \f]
|
||||||
* and thus we have
|
* and thus we have
|
||||||
|
@ -75,7 +75,7 @@ namespace gtsam {
|
||||||
* for example, for a 2-way factor, the constructor would accept \f$ A1 \f$ and \f$ A2 \f$,
|
* for example, for a 2-way factor, the constructor would accept \f$ A1 \f$ and \f$ A2 \f$,
|
||||||
* as well as the variable indices \f$ j1 \f$ and \f$ j2 \f$
|
* as well as the variable indices \f$ j1 \f$ and \f$ j2 \f$
|
||||||
* and the negative log-likelihood represented by this factor would be
|
* and the negative log-likelihood represented by this factor would be
|
||||||
* \f[ E(x) = \frac{1}{2} (A_1 x_{j1} + A_2 x_{j2} - b)^T \Sigma^{-1} (A_1 x_{j1} + A_2 x_{j2} - b) \text{.} \f]
|
* \f[ E(x) = \frac{1}{2} (A_1 x_{j1} + A_2 x_{j2} - b)^T \Sigma^{-1} (A_1 x_{j1} + A_2 x_{j2} - b) . \f]
|
||||||
*/
|
*/
|
||||||
class JacobianFactor : public GaussianFactor {
|
class JacobianFactor : public GaussianFactor {
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -0,0 +1,7 @@
|
||||||
|
/**
|
||||||
|
* @file DoglegOptimizer-inl.h
|
||||||
|
* @brief Nonlinear factor graph optimizer using Powell's Dogleg algorithm
|
||||||
|
* @author Richard Roberts
|
||||||
|
*/
|
||||||
|
|
||||||
|
#pragma once
|
|
@ -0,0 +1,67 @@
|
||||||
|
/**
|
||||||
|
* @file DoglegOptimizer.h
|
||||||
|
* @brief Nonlinear factor graph optimizer using Powell's Dogleg algorithm
|
||||||
|
* @author Richard Roberts
|
||||||
|
*/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||||
|
|
||||||
|
namespace gtsam {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A class to perform nonlinear optimization using Powell's dogleg algorithm.
|
||||||
|
* This class is functional, meaning every method is \c const, and returns a new
|
||||||
|
* copy of the class.
|
||||||
|
*
|
||||||
|
* \tparam VALUES The LieValues or TupleValues type to hold the values to be
|
||||||
|
* estimated.
|
||||||
|
*
|
||||||
|
* \tparam GAUSSIAN_SOLVER The linear solver to use at each iteration,
|
||||||
|
* currently either GaussianSequentialSolver or GaussianMultifrontalSolver.
|
||||||
|
* The latter is typically faster, especially for non-trivial problems.
|
||||||
|
*/
|
||||||
|
template<class VALUES, class GAUSSIAN_SOLVER>
|
||||||
|
class DoglegOptimizer {
|
||||||
|
|
||||||
|
protected:
|
||||||
|
|
||||||
|
typedef DoglegOptimizer<VALUES, GAUSSIAN_SOLVER> This; ///< Typedef to this class
|
||||||
|
|
||||||
|
const sharedGraph graph_;
|
||||||
|
const sharedValues values_;
|
||||||
|
const double error_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
typedef VALUES ValuesType; ///< Typedef of the VALUES template parameter
|
||||||
|
typedef GAUSSIAN_SOLVER SolverType; ///< Typedef of the GAUSSIAN_SOLVER template parameter
|
||||||
|
typedef NonlinearFactorGraph<VALUES> GraphType; ///< A nonlinear factor graph templated on ValuesType
|
||||||
|
|
||||||
|
typedef boost::shared_ptr<const GraphType> sharedGraph; ///< A shared_ptr to GraphType
|
||||||
|
typedef boost::shared_ptr<const ValuesType> sharedValues; ///< A shared_ptr to ValuesType
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Construct a DoglegOptimizer from the factor graph to optimize and the
|
||||||
|
* initial estimate of the variable values, using the default variable
|
||||||
|
* ordering method, currently COLAMD.
|
||||||
|
* @param graph The factor graph to optimize
|
||||||
|
* @param initialization An initial estimate of the variable values
|
||||||
|
*/
|
||||||
|
DoglegOptimizer(sharedGraph graph, sharedValues initialization);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Construct a DoglegOptimizer from the factor graph to optimize and the
|
||||||
|
* initial estimate of the variable values, using the default variable
|
||||||
|
* ordering method, currently COLAMD. (this non-shared-pointer version
|
||||||
|
* incurs a performance hit for copying, see DoglegOptimizer(sharedGraph, sharedValues)).
|
||||||
|
* @param graph The factor graph to optimize
|
||||||
|
* @param initialization An initial estimate of the variable values
|
||||||
|
*/
|
||||||
|
DoglegOptimizer(const GraphType& graph, const ValuesType& initialization);
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,73 @@
|
||||||
|
/**
|
||||||
|
* @file DoglegOptimizerImpl.h
|
||||||
|
* @brief Nonlinear factor graph optimizer using Powell's Dogleg algorithm (detail implementation)
|
||||||
|
* @author Richard Roberts
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <gtsam/nonlinear/DoglegOptimizerImpl.h>
|
||||||
|
|
||||||
|
namespace gtsam {
|
||||||
|
/* ************************************************************************* */
|
||||||
|
VectorValues DoglegOptimizerImpl::ComputeDoglegPoint(
|
||||||
|
double Delta, const VectorValues& x_u, const VectorValues& x_n) {
|
||||||
|
|
||||||
|
// Get magnitude of each update and find out which segment Delta falls in
|
||||||
|
assert(Delta >= 0.0);
|
||||||
|
double DeltaSq = Delta*Delta;
|
||||||
|
double x_u_norm_sq = x_u.vector().squaredNorm();
|
||||||
|
double x_n_norm_sq = x_n.vector().squaredNorm();
|
||||||
|
cout << "Steepest descent magnitude " << sqrt(x_u_norm_sq) << ", Newton's method magnitude " << sqrt(x_n_norm_sq) << endl;
|
||||||
|
if(DeltaSq < x_u_norm_sq) {
|
||||||
|
// Trust region is smaller than steepest descent update
|
||||||
|
VectorValues x_d = VectorValues::SameStructure(x_u);
|
||||||
|
x_d.vector() = x_u.vector() * sqrt(DeltaSq / x_u_norm_sq);
|
||||||
|
cout << "In steepest descent region with fraction " << sqrt(DeltaSq / x_u_norm_sq) << " of steepest descent magnitude" << endl;
|
||||||
|
return x_d;
|
||||||
|
} else if(DeltaSq < x_n_norm_sq) {
|
||||||
|
// Trust region boundary is between steepest descent point and Newton's method point
|
||||||
|
return ComputeBlend(Delta, x_u, x_n);
|
||||||
|
} else {
|
||||||
|
assert(DeltaSq >= x_n_norm_sq);
|
||||||
|
cout << "In pure Newton's method region" << endl;
|
||||||
|
// Trust region is larger than Newton's method point
|
||||||
|
return x_n;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
VectorValues DoglegOptimizerImpl::ComputeBlend(double Delta, const VectorValues& x_u, const VectorValues& x_n) {
|
||||||
|
|
||||||
|
// See doc/trustregion.lyx or doc/trustregion.pdf
|
||||||
|
|
||||||
|
// Compute inner products
|
||||||
|
const double un = dot(x_u, x_n);
|
||||||
|
const double uu = dot(x_u, x_u);
|
||||||
|
const double nn = dot(x_n, x_n);
|
||||||
|
|
||||||
|
// Compute quadratic formula terms
|
||||||
|
const double a = uu - 2.*un + nn;
|
||||||
|
const double b = 2. * (un - uu);
|
||||||
|
const double c = uu - Delta*Delta;
|
||||||
|
double sqrt_b_m4ac = sqrt(b*b - 4*a*c);
|
||||||
|
|
||||||
|
// Compute blending parameter
|
||||||
|
double tau1 = (-b + sqrt_b_m4ac) / (2.*a);
|
||||||
|
double tau2 = (-b - sqrt_b_m4ac) / (2.*a);
|
||||||
|
|
||||||
|
double tau;
|
||||||
|
if(0.0 <= tau1 && tau1 <= 1.0) {
|
||||||
|
assert(!(0.0 <= tau2 && tau2 <= 1.0));
|
||||||
|
tau = tau1;
|
||||||
|
} else {
|
||||||
|
assert(0.0 <= tau2 && tau2 <= 1.0);
|
||||||
|
tau = tau2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute blended point
|
||||||
|
cout << "In blend region with fraction " << tau << " of Newton's method point" << endl;
|
||||||
|
VectorValues blend = VectorValues::SameStructure(x_u);
|
||||||
|
blend.vector() = (1. - tau) * x_u.vector() + tau * x_n.vector();
|
||||||
|
return blend;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,251 @@
|
||||||
|
/**
|
||||||
|
* @file DoglegOptimizerImpl.h
|
||||||
|
* @brief Nonlinear factor graph optimizer using Powell's Dogleg algorithm (detail implementation)
|
||||||
|
* @author Richard Roberts
|
||||||
|
*/
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <gtsam/linear/GaussianBayesNet.h>
|
||||||
|
#include <gtsam/linear/GaussianISAM.h> // To get optimize(BayesTree<GaussianConditional>)
|
||||||
|
#include <gtsam/nonlinear/NonlinearFactorGraph-inl.h>
|
||||||
|
#include <gtsam/nonlinear/Ordering.h>
|
||||||
|
|
||||||
|
namespace gtsam {
|
||||||
|
|
||||||
|
/** This class contains the implementation of the Dogleg algorithm. It is used
|
||||||
|
* by DoglegOptimizer and can be used to easily put together custom versions of
|
||||||
|
* Dogleg. Each function is well-documented and unit-tested. The notation
|
||||||
|
* here matches that in "trustregion.pdf" in gtsam_experimental/doc, see this
|
||||||
|
* file for further explanation of the computations performed by this class.
|
||||||
|
*
|
||||||
|
* \tparam VALUES The LieValues or TupleValues type to hold the values to be
|
||||||
|
* estimated.
|
||||||
|
*
|
||||||
|
* \tparam GAUSSIAN_SOLVER The linear solver to use at each iteration,
|
||||||
|
* currently either GaussianSequentialSolver or GaussianMultifrontalSolver.
|
||||||
|
* The latter is typically faster, especially for non-trivial problems.
|
||||||
|
*/
|
||||||
|
struct DoglegOptimizerImpl {
|
||||||
|
|
||||||
|
struct IterationResult {
|
||||||
|
double Delta;
|
||||||
|
VectorValues dx_d;
|
||||||
|
double f_error;
|
||||||
|
};
|
||||||
|
|
||||||
|
enum TrustRegionAdaptationMode {
|
||||||
|
SEARCH_EACH_ITERATION,
|
||||||
|
ONE_STEP_PER_ITERATION
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compute the update point for one iteration of the Dogleg algorithm, given
|
||||||
|
* an initial trust region radius \f$ \Delta \f$. The trust region radius is
|
||||||
|
* adapted based on the error of a NonlinearFactorGraph \f$ f(x) \f$, and
|
||||||
|
* depending on the update mode \c mode.
|
||||||
|
*
|
||||||
|
* The update is computed using a quadratic approximation \f$ M(\delta x) \f$
|
||||||
|
* of an original nonlinear error function (a NonlinearFactorGraph) \f$ f(x) \f$.
|
||||||
|
* The quadratic approximation is represented as a GaussianBayesNet \bayesNet, which is
|
||||||
|
* obtained by eliminating a GaussianFactorGraph resulting from linearizing
|
||||||
|
* the nonlinear factor graph \f$ f(x) \f$. Thus, \f$ M(\delta x) \f$ is
|
||||||
|
* \f[
|
||||||
|
* M(\delta x) = (R \delta x - d)^T (R \delta x - d)
|
||||||
|
* \f]
|
||||||
|
* where \f$ R \f$ and \f$ d \f$ together are a Bayes' net or Bayes' tree.
|
||||||
|
* \f$ R \f$ is upper-triangular and \f$ d \f$ is a vector, in GTSAM represented
|
||||||
|
* as a BayesNet<GaussianConditional> (GaussianBayesNet) or
|
||||||
|
* BayesTree<GaussianConditional>, containing GaussianConditional s.
|
||||||
|
*
|
||||||
|
* @tparam M The type of the Bayes' net or tree, currently
|
||||||
|
* either BayesNet<GaussianConditional> (or GaussianBayesNet) or BayesTree<GaussianConditional>.
|
||||||
|
* @tparam F For normal usage this will be NonlinearFactorGraph<VALUES>.
|
||||||
|
* @tparam VALUES The LieValues or TupleValues to pass to F::error() to evaluate
|
||||||
|
* the error function.
|
||||||
|
* @param initialDelta The initial trust region radius.
|
||||||
|
* @param Rd The Bayes' net or tree as described above.
|
||||||
|
* @param f The original nonlinear factor graph with which to evaluate the
|
||||||
|
* accuracy of \f$ M(\delta x) \f$ to adjust \f$ \Delta \f$.
|
||||||
|
* @param x0 The linearization point about which \bayesNet was created
|
||||||
|
* @param ordering The variable ordering used to create \bayesNet
|
||||||
|
* @param f_error The result of <tt>f.error(x0)</tt>.
|
||||||
|
* @return A DoglegIterationResult containing the new \c Delta, the linear
|
||||||
|
* update \c dx_d, and the resulting nonlinear error \c f_error.
|
||||||
|
*/
|
||||||
|
template<class M, class F, class VALUES>
|
||||||
|
static IterationResult Iterate(
|
||||||
|
double Delta, TrustRegionAdaptationMode mode, const M& Rd,
|
||||||
|
const F& f, const VALUES& x0, const Ordering& ordering, double f_error);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compute the dogleg point given a trust region radius \f$ \Delta \f$. The
|
||||||
|
* dogleg point is the intersection between the dogleg path and the trust
|
||||||
|
* region boundary, see doc/trustregion.pdf for more details.
|
||||||
|
*
|
||||||
|
* The update is computed using a quadratic approximation \f$ M(\delta x) \f$
|
||||||
|
* of an original nonlinear error function (a NonlinearFactorGraph) \f$ f(x) \f$.
|
||||||
|
* The quadratic approximation is represented as a GaussianBayesNet \bayesNet, which is
|
||||||
|
* obtained by eliminating a GaussianFactorGraph resulting from linearizing
|
||||||
|
* the nonlinear factor graph \f$ f(x) \f$. Thus, \f$ M(\delta x) \f$ is
|
||||||
|
* \f[
|
||||||
|
* M(\delta x) = (R \delta x - d)^T (R \delta x - d)
|
||||||
|
* \f]
|
||||||
|
* where \f$ R \f$ and \f$ d \f$ together are a Bayes' net. \f$ R \f$ is
|
||||||
|
* upper-triangular and \f$ d \f$ is a vector, in GTSAM represented as a
|
||||||
|
* GaussianBayesNet, containing GaussianConditional s.
|
||||||
|
*
|
||||||
|
* @param Delta The trust region radius
|
||||||
|
* @param bayesNet The Bayes' net \f$ (R,d) \f$ as described above.
|
||||||
|
* @return The dogleg point \f$ \delta x_d \f$
|
||||||
|
*/
|
||||||
|
static VectorValues ComputeDoglegPoint(double Delta, const VectorValues& x_u, const VectorValues& x_n);
|
||||||
|
|
||||||
|
/** Compute the minimizer \f$ \delta x_u \f$ of the line search along the gradient direction \f$ g \f$ of
|
||||||
|
* the function
|
||||||
|
* \f[
|
||||||
|
* M(\delta x) = (R \delta x - d)^T (R \delta x - d)
|
||||||
|
* \f]
|
||||||
|
* where \f$ R \f$ is an upper-triangular matrix and \f$ d \f$ is a vector.
|
||||||
|
* Together \f$ (R,d) \f$ are either a Bayes' net or a Bayes' tree.
|
||||||
|
*
|
||||||
|
* The same quadratic error function written as a Taylor expansion of the original
|
||||||
|
* non-linear error function is
|
||||||
|
* \f[
|
||||||
|
* M(\delta x) = f(x_0) + g(x_0) + \frac{1}{2} \delta x^T G(x_0) \delta x,
|
||||||
|
* \f]
|
||||||
|
* @tparam M The type of the Bayes' net or tree, currently
|
||||||
|
* either BayesNet<GaussianConditional> (or GaussianBayesNet) or BayesTree<GaussianConditional>.
|
||||||
|
* @param Rd The Bayes' net or tree \f$ (R,d) \f$ as described above, currently
|
||||||
|
* this must be of type BayesNet<GaussianConditional> (or GaussianBayesNet) or
|
||||||
|
* BayesTree<GaussianConditional>.
|
||||||
|
* @return The minimizer \f$ \delta x_u \f$ along the gradient descent direction.
|
||||||
|
*/
|
||||||
|
template<class M>
|
||||||
|
static VectorValues ComputeSteepestDescentPoint(const M& Rd);
|
||||||
|
|
||||||
|
/** Compute the point on the line between the steepest descent point and the
|
||||||
|
* Newton's method point intersecting the trust region boundary.
|
||||||
|
* Mathematically, computes \f$ \tau \f$ such that \f$ 0<\tau<1 \f$ and
|
||||||
|
* \f$ \| (1-\tau)\delta x_u + \tau\delta x_n \| = \Delta \f$, where \f$ \Delta \f$
|
||||||
|
* is the trust region radius.
|
||||||
|
* @param Delta Trust region radius
|
||||||
|
* @param xu Steepest descent minimizer
|
||||||
|
* @param xn Newton's method minimizer
|
||||||
|
*/
|
||||||
|
static VectorValues ComputeBlend(double Delta, const VectorValues& x_u, const VectorValues& x_n);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
template<class M, class F, class VALUES>
|
||||||
|
typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
|
||||||
|
double Delta, TrustRegionAdaptationMode mode, const M& Rd,
|
||||||
|
const F& f, const VALUES& x0, const Ordering& ordering, double f_error) {
|
||||||
|
|
||||||
|
// Compute steepest descent and Newton's method points
|
||||||
|
VectorValues dx_u = ComputeSteepestDescentPoint(Rd);
|
||||||
|
VectorValues dx_n = optimize(Rd);
|
||||||
|
const GaussianFactorGraph jfg(Rd);
|
||||||
|
const double M_error = jfg.error(VectorValues::Zero(dx_u));
|
||||||
|
|
||||||
|
// Result to return
|
||||||
|
IterationResult result;
|
||||||
|
|
||||||
|
bool stay = true;
|
||||||
|
while(stay) {
|
||||||
|
// Compute dog leg point
|
||||||
|
result.dx_d = ComputeDoglegPoint(Delta, dx_u, dx_n);
|
||||||
|
|
||||||
|
cout << "Delta = " << Delta << ", dx_d_norm = " << result.dx_d.vector().norm() << endl;
|
||||||
|
|
||||||
|
// Compute expmapped solution
|
||||||
|
const VALUES x_d(x0.expmap(result.dx_d, ordering));
|
||||||
|
|
||||||
|
// Compute decrease in f
|
||||||
|
result.f_error = f.error(x_d);
|
||||||
|
|
||||||
|
// Compute decrease in M
|
||||||
|
const double new_M_error = jfg.error(result.dx_d);
|
||||||
|
|
||||||
|
cout << "f error: " << f_error << " -> " << result.f_error << endl;
|
||||||
|
cout << "M error: " << M_error << " -> " << new_M_error << endl;
|
||||||
|
|
||||||
|
// Compute gain ratio. Here we take advantage of the invariant that the
|
||||||
|
// Bayes' net error at zero is equal to the nonlinear error
|
||||||
|
const double rho = fabs(M_error - new_M_error) < 1e-30 ?
|
||||||
|
0.5 :
|
||||||
|
(f_error - result.f_error) / (M_error - new_M_error);
|
||||||
|
|
||||||
|
cout << "rho = " << rho << endl;
|
||||||
|
|
||||||
|
if(rho >= 0.75) {
|
||||||
|
// M agrees very well with f, so try to increase lambda
|
||||||
|
const double dx_d_norm = result.dx_d.vector().norm();
|
||||||
|
const double newDelta = std::max(Delta, 3.0 * dx_d_norm); // Compute new Delta
|
||||||
|
|
||||||
|
|
||||||
|
if(mode == ONE_STEP_PER_ITERATION)
|
||||||
|
stay = false; // If not searching, just return with the new Delta
|
||||||
|
else if(mode == SEARCH_EACH_ITERATION) {
|
||||||
|
if(newDelta == Delta)
|
||||||
|
stay = false; // Searching, but Newton's solution is within trust region so keep the same trust region
|
||||||
|
else
|
||||||
|
stay = true; // Searching and increased Delta, so try again to increase Delta
|
||||||
|
} else {
|
||||||
|
assert(false); }
|
||||||
|
|
||||||
|
Delta = newDelta; // Update Delta from new Delta
|
||||||
|
|
||||||
|
} else if(0.75 > rho && rho >= 0.25) {
|
||||||
|
// M agrees so-so with f, keep the same Delta
|
||||||
|
stay = false;
|
||||||
|
|
||||||
|
} else if(0.25 > rho && rho >= 0.0) {
|
||||||
|
// M does not agree well with f, decrease Delta until it does
|
||||||
|
Delta *= 0.5;
|
||||||
|
if(mode == ONE_STEP_PER_ITERATION)
|
||||||
|
stay = false; // If not searching, just return with the new smaller delta
|
||||||
|
else if(mode == SEARCH_EACH_ITERATION)
|
||||||
|
stay = true;
|
||||||
|
else {
|
||||||
|
assert(false); }
|
||||||
|
}
|
||||||
|
|
||||||
|
else {
|
||||||
|
// f actually increased, so keep decreasing Delta until f does not decrease
|
||||||
|
assert(0.0 > rho);
|
||||||
|
Delta *= 0.5;
|
||||||
|
if(Delta > 1e-5)
|
||||||
|
stay = true;
|
||||||
|
else
|
||||||
|
stay = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// dx_d and f_error have already been filled in during the loop
|
||||||
|
result.Delta = Delta;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
template<class M>
|
||||||
|
VectorValues DoglegOptimizerImpl::ComputeSteepestDescentPoint(const M& Rd) {
|
||||||
|
|
||||||
|
// Compute gradient
|
||||||
|
// Convert to JacobianFactor's to use existing gradient function
|
||||||
|
FactorGraph<JacobianFactor> jfg(Rd);
|
||||||
|
VectorValues grad = gradient(jfg, VectorValues::Zero(*allocateVectorValues(Rd)));
|
||||||
|
double gradientSqNorm = grad.dot(grad);
|
||||||
|
|
||||||
|
// Compute R * g
|
||||||
|
Errors Rg = jfg * grad;
|
||||||
|
|
||||||
|
// Compute minimizing step size
|
||||||
|
double step = -gradientSqNorm / dot(Rg, Rg);
|
||||||
|
|
||||||
|
// Compute steepest descent point
|
||||||
|
scal(step, grad);
|
||||||
|
return grad;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -17,14 +17,15 @@ check_PROGRAMS =
|
||||||
|
|
||||||
# Lie Groups
|
# Lie Groups
|
||||||
headers += LieValues.h LieValues-inl.h TupleValues.h TupleValues-inl.h
|
headers += LieValues.h LieValues-inl.h TupleValues.h TupleValues-inl.h
|
||||||
check_PROGRAMS += tests/testLieValues
|
check_PROGRAMS += tests/testLieValues tests/testKey tests/testOrdering
|
||||||
|
|
||||||
# Nonlinear nonlinear
|
# Nonlinear nonlinear
|
||||||
headers += Key.h
|
headers += Key.h
|
||||||
headers += NonlinearFactorGraph.h NonlinearFactorGraph-inl.h
|
headers += NonlinearFactorGraph.h NonlinearFactorGraph-inl.h
|
||||||
headers += NonlinearOptimizer-inl.h NonlinearOptimization.h NonlinearOptimization-inl.h NonlinearOptimizationParameters.h
|
headers += NonlinearOptimizer-inl.h NonlinearOptimization.h NonlinearOptimization-inl.h NonlinearOptimizationParameters.h
|
||||||
headers += NonlinearFactor.h
|
headers += NonlinearFactor.h
|
||||||
sources += NonlinearOptimizer.cpp Ordering.cpp
|
sources += NonlinearOptimizer.cpp Ordering.cpp DoglegOptimizerImpl.cpp
|
||||||
|
headers += DoglegOptimizer.h DoglegOptimizer-inl.h
|
||||||
|
|
||||||
# Nonlinear iSAM(2)
|
# Nonlinear iSAM(2)
|
||||||
headers += NonlinearISAM.h NonlinearISAM-inl.h
|
headers += NonlinearISAM.h NonlinearISAM-inl.h
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include <boost/tuple/tuple.hpp>
|
#include <boost/tuple/tuple.hpp>
|
||||||
#include <gtsam/base/cholesky.h>
|
#include <gtsam/base/cholesky.h>
|
||||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||||
|
#include <gtsam/nonlinear/DoglegOptimizerImpl.h>
|
||||||
|
|
||||||
#define INSTANTIATE_NONLINEAR_OPTIMIZER(G,C) \
|
#define INSTANTIATE_NONLINEAR_OPTIMIZER(G,C) \
|
||||||
template class NonlinearOptimizer<G,C>;
|
template class NonlinearOptimizer<G,C>;
|
||||||
|
@ -95,6 +96,7 @@ namespace gtsam {
|
||||||
if (verbosity >= Parameters::VALUES) newValues->print("newValues");
|
if (verbosity >= Parameters::VALUES) newValues->print("newValues");
|
||||||
|
|
||||||
NonlinearOptimizer newOptimizer = newValues_(newValues);
|
NonlinearOptimizer newOptimizer = newValues_(newValues);
|
||||||
|
++ newOptimizer.iterations_;
|
||||||
|
|
||||||
if (verbosity >= Parameters::ERROR) cout << "error: " << newOptimizer.error_ << endl;
|
if (verbosity >= Parameters::ERROR) cout << "error: " << newOptimizer.error_ << endl;
|
||||||
return newOptimizer;
|
return newOptimizer;
|
||||||
|
@ -176,6 +178,7 @@ namespace gtsam {
|
||||||
// Try solving
|
// Try solving
|
||||||
try {
|
try {
|
||||||
VectorValues delta = *solver->optimize();
|
VectorValues delta = *solver->optimize();
|
||||||
|
if (verbosity >= Parameters::TRYLAMBDA) cout << "linear delta norm = " << delta.vector().norm() << endl;
|
||||||
if (verbosity >= Parameters::TRYDELTA) delta.print("delta");
|
if (verbosity >= Parameters::TRYDELTA) delta.print("delta");
|
||||||
|
|
||||||
// update values
|
// update values
|
||||||
|
@ -277,6 +280,7 @@ namespace gtsam {
|
||||||
error_ = next.error_;
|
error_ = next.error_;
|
||||||
values_ = next.values_;
|
values_ = next.values_;
|
||||||
parameters_ = next.parameters_;
|
parameters_ = next.parameters_;
|
||||||
|
iterations_ = next.iterations_;
|
||||||
|
|
||||||
// check convergence
|
// check convergence
|
||||||
// TODO: move convergence checks here and incorporate in verbosity levels
|
// TODO: move convergence checks here and incorporate in verbosity levels
|
||||||
|
@ -292,4 +296,62 @@ namespace gtsam {
|
||||||
iterations_++;
|
iterations_++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
template<class G, class T, class L, class S, class W>
|
||||||
|
NonlinearOptimizer<G, T, L, S, W> NonlinearOptimizer<G, T, L, S, W>::iterateDogLeg() {
|
||||||
|
|
||||||
|
cout << "values: " << values_.get() << endl;
|
||||||
|
S solver(*graph_->linearize(*values_, *ordering_));
|
||||||
|
DoglegOptimizerImpl::IterationResult result = DoglegOptimizerImpl::Iterate(
|
||||||
|
parameters_->lambda_, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *solver.eliminate(),
|
||||||
|
*graph_, *values_, *ordering_, error_);
|
||||||
|
shared_values newValues(new T(values_->expmap(result.dx_d, *ordering_)));
|
||||||
|
cout << "newValues: " << newValues.get() << endl;
|
||||||
|
return newValuesErrorLambda_(newValues, result.f_error, result.Delta);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
template<class G, class T, class L, class S, class W>
|
||||||
|
NonlinearOptimizer<G, T, L, S, W> NonlinearOptimizer<G, T, L, S, W>::dogLeg() {
|
||||||
|
static W writer(error_);
|
||||||
|
|
||||||
|
// check if we're already close enough
|
||||||
|
if (error_ < parameters_->sumError_) {
|
||||||
|
if ( parameters_->verbosity_ >= Parameters::ERROR )
|
||||||
|
cout << "Exiting, as sumError = " << error_ << " < " << parameters_->sumError_ << endl;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
// for the case that maxIterations_ = 0
|
||||||
|
iterations_ = 1;
|
||||||
|
if (iterations_ >= parameters_->maxIterations_)
|
||||||
|
return *this;
|
||||||
|
|
||||||
|
// Iterative loop that runs Dog Leg
|
||||||
|
while (true) {
|
||||||
|
double previous_error = error_;
|
||||||
|
// do one iteration of LM
|
||||||
|
NonlinearOptimizer next = iterateDogLeg();
|
||||||
|
writer.write(next.error_);
|
||||||
|
error_ = next.error_;
|
||||||
|
values_ = next.values_;
|
||||||
|
parameters_ = next.parameters_;
|
||||||
|
|
||||||
|
// check convergence
|
||||||
|
// TODO: move convergence checks here and incorporate in verbosity levels
|
||||||
|
// TODO: build into iterations somehow as an instance variable
|
||||||
|
bool converged = gtsam::check_convergence(*parameters_, previous_error, error_);
|
||||||
|
|
||||||
|
if(iterations_ >= parameters_->maxIterations_ || converged == true) {
|
||||||
|
if (parameters_->verbosity_ >= Parameters::VALUES) values_->print("final values");
|
||||||
|
if (parameters_->verbosity_ >= Parameters::ERROR) cout << "final error: " << error_ << endl;
|
||||||
|
if (parameters_->verbosity_ >= Parameters::LAMBDA) cout << "final Delta (called lambda) = " << lambda() << endl;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
iterations_++;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -170,7 +170,7 @@ public:
|
||||||
NonlinearOptimizer(const NonlinearOptimizer<G, T, L, GS> &optimizer) :
|
NonlinearOptimizer(const NonlinearOptimizer<G, T, L, GS> &optimizer) :
|
||||||
graph_(optimizer.graph_), values_(optimizer.values_), error_(optimizer.error_),
|
graph_(optimizer.graph_), values_(optimizer.values_), error_(optimizer.error_),
|
||||||
ordering_(optimizer.ordering_), parameters_(optimizer.parameters_),
|
ordering_(optimizer.ordering_), parameters_(optimizer.parameters_),
|
||||||
iterations_(0), dimensions_(optimizer.dimensions_), structure_(optimizer.structure_) {}
|
iterations_(optimizer.iterations_), dimensions_(optimizer.dimensions_), structure_(optimizer.structure_) {}
|
||||||
|
|
||||||
// access to member variables
|
// access to member variables
|
||||||
|
|
||||||
|
@ -268,15 +268,23 @@ public:
|
||||||
///
|
///
|
||||||
NonlinearOptimizer levenbergMarquardt();
|
NonlinearOptimizer levenbergMarquardt();
|
||||||
|
|
||||||
// static interfaces to LM and GN optimization techniques
|
/**
|
||||||
|
* One iteration of the dog leg algorithm
|
||||||
|
*/
|
||||||
|
NonlinearOptimizer iterateDogLeg();
|
||||||
|
|
||||||
///
|
/**
|
||||||
///Static interface to LM optimization using default ordering and thresholds
|
* Optimize using the Dog Leg algorithm
|
||||||
///@param graph Nonlinear factor graph to optimize
|
*/
|
||||||
///@param values Initial values
|
NonlinearOptimizer dogLeg();
|
||||||
///@param verbosity Integer specifying how much output to provide
|
|
||||||
///@return an optimized values structure
|
// static interfaces to LM, Dog leg, and GN optimization techniques
|
||||||
///
|
|
||||||
|
///Static interface to Dog leg optimization using default ordering
|
||||||
|
///@param graph Nonlinear factor graph to optimize
|
||||||
|
///@param values Initial values
|
||||||
|
///@param parameters Optimization parameters
|
||||||
|
///@return an optimized values structure
|
||||||
static shared_values optimizeLM(shared_graph graph,
|
static shared_values optimizeLM(shared_graph graph,
|
||||||
shared_values values,
|
shared_values values,
|
||||||
shared_parameters parameters = boost::make_shared<Parameters>()) {
|
shared_parameters parameters = boost::make_shared<Parameters>()) {
|
||||||
|
@ -293,8 +301,7 @@ public:
|
||||||
|
|
||||||
static shared_values optimizeLM(shared_graph graph,
|
static shared_values optimizeLM(shared_graph graph,
|
||||||
shared_values values,
|
shared_values values,
|
||||||
Parameters::verbosityLevel verbosity)
|
Parameters::verbosityLevel verbosity) {
|
||||||
{
|
|
||||||
return optimizeLM(graph, values, Parameters::newVerbosity(verbosity));
|
return optimizeLM(graph, values, Parameters::newVerbosity(verbosity));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -317,6 +324,57 @@ public:
|
||||||
verbosity);
|
verbosity);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
///Static interface to Dog leg optimization using default ordering
|
||||||
|
///@param graph Nonlinear factor graph to optimize
|
||||||
|
///@param values Initial values
|
||||||
|
///@param parameters Optimization parameters
|
||||||
|
///@return an optimized values structure
|
||||||
|
static shared_values optimizeDogLeg(shared_graph graph,
|
||||||
|
shared_values values,
|
||||||
|
shared_parameters parameters = boost::make_shared<Parameters>()) {
|
||||||
|
|
||||||
|
// Use a variable ordering from COLAMD
|
||||||
|
Ordering::shared_ptr ordering = graph->orderingCOLAMD(*values);
|
||||||
|
// initial optimization state is the same in both cases tested
|
||||||
|
//GS solver(*graph->linearize(*values, *ordering));
|
||||||
|
|
||||||
|
NonlinearOptimizer optimizer(graph, values, ordering, parameters);
|
||||||
|
NonlinearOptimizer result = optimizer.dogLeg();
|
||||||
|
return result.values();
|
||||||
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
///Static interface to Dog leg optimization using default ordering and thresholds
|
||||||
|
///@param graph Nonlinear factor graph to optimize
|
||||||
|
///@param values Initial values
|
||||||
|
///@param verbosity Integer specifying how much output to provide
|
||||||
|
///@return an optimized values structure
|
||||||
|
///
|
||||||
|
static shared_values optimizeDogLeg(shared_graph graph,
|
||||||
|
shared_values values,
|
||||||
|
Parameters::verbosityLevel verbosity) {
|
||||||
|
return optimizeDogLeg(graph, values, Parameters::newVerbosity(verbosity)->newLambda_(1.0));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Static interface to Dogleg optimization (no shared_ptr arguments) - see above
|
||||||
|
*/
|
||||||
|
static shared_values optimizeDogLeg(const G& graph,
|
||||||
|
const T& values,
|
||||||
|
const Parameters parameters = Parameters()) {
|
||||||
|
return optimizeDogLeg(boost::make_shared<const G>(graph),
|
||||||
|
boost::make_shared<const T>(values),
|
||||||
|
boost::make_shared<Parameters>(parameters));
|
||||||
|
}
|
||||||
|
|
||||||
|
static shared_values optimizeDogLeg(const G& graph,
|
||||||
|
const T& values,
|
||||||
|
Parameters::verbosityLevel verbosity) {
|
||||||
|
return optimizeDogLeg(boost::make_shared<const G>(graph),
|
||||||
|
boost::make_shared<const T>(values),
|
||||||
|
verbosity);
|
||||||
|
}
|
||||||
|
|
||||||
///
|
///
|
||||||
///Static interface to GN optimization using default ordering and thresholds
|
///Static interface to GN optimization using default ordering and thresholds
|
||||||
///@param graph Nonlinear factor graph to optimize
|
///@param graph Nonlinear factor graph to optimize
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
using namespace boost::assign;
|
using namespace boost::assign;
|
||||||
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
#include <gtsam/base/Testable.h>
|
||||||
#include <gtsam/nonlinear/Key.h>
|
#include <gtsam/nonlinear/Key.h>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
|
@ -109,27 +109,27 @@ namespace gtsam {
|
||||||
/// Creates a NonlinearFactorGraph based on another NonlinearFactorGraph
|
/// Creates a NonlinearFactorGraph based on another NonlinearFactorGraph
|
||||||
Graph(const NonlinearFactorGraph<Values>& graph);
|
Graph(const NonlinearFactorGraph<Values>& graph);
|
||||||
|
|
||||||
/// Biases the value of PoseKey i with Pose2 p given a noise model
|
/// Biases the value of PoseKey key with Pose2 p given a noise model
|
||||||
void addPrior(const PoseKey& i, const Pose2& p, const SharedNoiseModel& model);
|
void addPrior(const PoseKey& key, const Pose2& pose, const SharedNoiseModel& noiseModel);
|
||||||
|
|
||||||
/// Creates a hard constraint to enforce Pose2 p for PoseKey i's value
|
/// Creates a hard constraint to enforce Pose2 p for PoseKey poseKey's value
|
||||||
void addPoseConstraint(const PoseKey& i, const Pose2& p);
|
void addPoseConstraint(const PoseKey& poseKey, const Pose2& pose);
|
||||||
|
|
||||||
/// Creates a factor with a Pose2 between PoseKeys i and j (i.e. an odometry measurement)
|
/// Creates a factor with a Pose2 between PoseKeys poseKey and pointKey (poseKey.e. an odometry measurement)
|
||||||
void addOdometry(const PoseKey& i, const PoseKey& j, const Pose2& z,
|
void addOdometry(const PoseKey& poseKey, const PoseKey& pointKey, const Pose2& odometry,
|
||||||
const SharedNoiseModel& model);
|
const SharedNoiseModel& model);
|
||||||
|
|
||||||
/// Creates a factor with a Rot2 between a PoseKey i and PointKey j for difference in rotation
|
/// Creates a factor with a Rot2 between a PoseKey poseKey and PointKey pointKey for difference in rotation
|
||||||
void addBearing(const PoseKey& i, const PointKey& j, const Rot2& z,
|
void addBearing(const PoseKey& poseKey, const PointKey& pointKey, const Rot2& bearing,
|
||||||
const SharedNoiseModel& model);
|
const SharedNoiseModel& model);
|
||||||
|
|
||||||
/// Creates a factor with a Rot2 between a PoseKey i and PointKey j for difference in location
|
/// Creates a factor with a Rot2 between a PoseKey poseKey and PointKey pointKey for difference in location
|
||||||
void addRange(const PoseKey& i, const PointKey& j, double z,
|
void addRange(const PoseKey& poseKey, const PointKey& pointKey, double range,
|
||||||
const SharedNoiseModel& model);
|
const SharedNoiseModel& model);
|
||||||
|
|
||||||
/// Creates a factor with a Rot2 between a PoseKey i and PointKey j for difference in rotation and location
|
/// Creates a factor with a Rot2 between a PoseKey poseKey and PointKey pointKey for difference in rotation and location
|
||||||
void addBearingRange(const PoseKey& i, const PointKey& j,
|
void addBearingRange(const PoseKey& poseKey, const PointKey& pointKey,
|
||||||
const Rot2& z1, double z2, const SharedNoiseModel& model);
|
const Rot2& bearing, double range, const SharedNoiseModel& model);
|
||||||
|
|
||||||
/// Optimize_ for MATLAB
|
/// Optimize_ for MATLAB
|
||||||
boost::shared_ptr<Values> optimize_(const Values& initialEstimate) {
|
boost::shared_ptr<Values> optimize_(const Values& initialEstimate) {
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include <gtsam/slam/PriorFactor.h>
|
#include <gtsam/slam/PriorFactor.h>
|
||||||
#include <gtsam/slam/ProjectionFactor.h>
|
#include <gtsam/slam/ProjectionFactor.h>
|
||||||
#include <gtsam/slam/StereoFactor.h>
|
#include <gtsam/slam/StereoFactor.h>
|
||||||
|
#include <gtsam/slam/RangeFactor.h>
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
|
@ -47,6 +48,7 @@ namespace gtsam {
|
||||||
typedef NonlinearEquality<Values, PointKey> PointConstraint; ///< put a hard constraint on a point
|
typedef NonlinearEquality<Values, PointKey> PointConstraint; ///< put a hard constraint on a point
|
||||||
typedef PriorFactor<Values, PoseKey> PosePrior; ///< put a soft prior on a Pose
|
typedef PriorFactor<Values, PoseKey> PosePrior; ///< put a soft prior on a Pose
|
||||||
typedef PriorFactor<Values, PointKey> PointPrior; ///< put a soft prior on a point
|
typedef PriorFactor<Values, PointKey> PointPrior; ///< put a soft prior on a point
|
||||||
|
typedef RangeFactor<Values, PoseKey, PointKey> RangeFactor; ///< put a factor on the range from a pose to a point
|
||||||
|
|
||||||
/// monocular and stereo camera typedefs for general use
|
/// monocular and stereo camera typedefs for general use
|
||||||
typedef GenericProjectionFactor<Values, PointKey, PoseKey> ProjectionFactor;
|
typedef GenericProjectionFactor<Values, PointKey, PoseKey> ProjectionFactor;
|
||||||
|
@ -115,7 +117,7 @@ namespace gtsam {
|
||||||
* @param p to which pose to constrain it to
|
* @param p to which pose to constrain it to
|
||||||
* @param model uncertainty model of this prior
|
* @param model uncertainty model of this prior
|
||||||
*/
|
*/
|
||||||
void addPosePrior(int j, const Pose3& p = Pose3(), const SharedNoiseModel& model = noiseModel::Unit::Create(1)) {
|
void addPosePrior(int j, const Pose3& p = Pose3(), const SharedNoiseModel& model = noiseModel::Unit::Create(6)) {
|
||||||
boost::shared_ptr<PosePrior> factor(new PosePrior(j, p, model));
|
boost::shared_ptr<PosePrior> factor(new PosePrior(j, p, model));
|
||||||
push_back(factor);
|
push_back(factor);
|
||||||
}
|
}
|
||||||
|
@ -126,11 +128,15 @@ namespace gtsam {
|
||||||
* @param p index of point
|
* @param p index of point
|
||||||
* @param model uncertainty model of this prior
|
* @param model uncertainty model of this prior
|
||||||
*/
|
*/
|
||||||
void addPointPrior(int j, const Point3& p = Point3(), const SharedNoiseModel& model = noiseModel::Unit::Create(1)) {
|
void addPointPrior(int j, const Point3& p = Point3(), const SharedNoiseModel& model = noiseModel::Unit::Create(3)) {
|
||||||
boost::shared_ptr<PointPrior> factor(new PointPrior(j, p, model));
|
boost::shared_ptr<PointPrior> factor(new PointPrior(j, p, model));
|
||||||
push_back(factor);
|
push_back(factor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void addRangeFactor(PoseKey i, PointKey j, double range, const SharedNoiseModel& model = noiseModel::Unit::Create(1)) {
|
||||||
|
push_back(boost::shared_ptr<RangeFactor>(new RangeFactor(i, j, range, model)));
|
||||||
|
}
|
||||||
|
|
||||||
}; // Graph
|
}; // Graph
|
||||||
|
|
||||||
/// typedef for Optimizer. The current default will use the multi-frontal solver
|
/// typedef for Optimizer. The current default will use the multi-frontal solver
|
||||||
|
|
|
@ -15,6 +15,7 @@ check_PROGRAMS += testInference
|
||||||
check_PROGRAMS += testGaussianJunctionTree
|
check_PROGRAMS += testGaussianJunctionTree
|
||||||
check_PROGRAMS += testNonlinearEquality testNonlinearFactor testNonlinearFactorGraph
|
check_PROGRAMS += testNonlinearEquality testNonlinearFactor testNonlinearFactorGraph
|
||||||
check_PROGRAMS += testNonlinearOptimizer
|
check_PROGRAMS += testNonlinearOptimizer
|
||||||
|
# check_PROGRAMS += testDoglegOptimizer # Uses debugging prints so commented out in SVN
|
||||||
check_PROGRAMS += testSymbolicBayesNet testSymbolicFactorGraph
|
check_PROGRAMS += testSymbolicBayesNet testSymbolicFactorGraph
|
||||||
check_PROGRAMS += testTupleValues
|
check_PROGRAMS += testTupleValues
|
||||||
check_PROGRAMS += testNonlinearISAM
|
check_PROGRAMS += testNonlinearISAM
|
||||||
|
|
|
@ -0,0 +1,416 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file DoglegOptimizer.h
|
||||||
|
* @brief Unit tests for DoglegOptimizer
|
||||||
|
* @author Richard Roberts
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
#include <gtsam/base/numericalDerivative.h>
|
||||||
|
#include <gtsam/inference/FactorGraph-inl.h>
|
||||||
|
#include <gtsam/inference/BayesTree-inl.h>
|
||||||
|
#include <gtsam/linear/JacobianFactor.h>
|
||||||
|
#include <gtsam/linear/GaussianSequentialSolver.h>
|
||||||
|
#include <gtsam/nonlinear/DoglegOptimizerImpl.h>
|
||||||
|
#include <gtsam/slam/pose2SLAM.h>
|
||||||
|
#include <gtsam/slam/smallExample.h>
|
||||||
|
|
||||||
|
#include <boost/bind.hpp>
|
||||||
|
#include <boost/assign/list_of.hpp> // for 'list_of()'
|
||||||
|
#include <functional>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace gtsam;
|
||||||
|
using boost::shared_ptr;
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
double computeError(const GaussianBayesNet& gbn, const LieVector& values) {
|
||||||
|
|
||||||
|
// Convert Vector to VectorValues
|
||||||
|
VectorValues vv = *allocateVectorValues(gbn);
|
||||||
|
vv.vector() = values;
|
||||||
|
|
||||||
|
// Convert to factor graph
|
||||||
|
GaussianFactorGraph gfg(gbn);
|
||||||
|
return gfg.error(vv);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
double computeErrorBt(const BayesTree<GaussianConditional>& gbt, const LieVector& values) {
|
||||||
|
|
||||||
|
// Convert Vector to VectorValues
|
||||||
|
VectorValues vv = *allocateVectorValues(gbt);
|
||||||
|
vv.vector() = values;
|
||||||
|
|
||||||
|
// Convert to factor graph
|
||||||
|
GaussianFactorGraph gfg(gbt);
|
||||||
|
return gfg.error(vv);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(DoglegOptimizer, ComputeSteepestDescentPoint) {
|
||||||
|
|
||||||
|
// Create an arbitrary Bayes Net
|
||||||
|
GaussianBayesNet gbn;
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
0, Vector_(2, 1.0,2.0), Matrix_(2,2, 3.0,4.0,0.0,6.0),
|
||||||
|
3, Matrix_(2,2, 7.0,8.0,9.0,10.0),
|
||||||
|
4, Matrix_(2,2, 11.0,12.0,13.0,14.0), ones(2)));
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
1, Vector_(2, 15.0,16.0), Matrix_(2,2, 17.0,18.0,0.0,20.0),
|
||||||
|
2, Matrix_(2,2, 21.0,22.0,23.0,24.0),
|
||||||
|
4, Matrix_(2,2, 25.0,26.0,27.0,28.0), ones(2)));
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
2, Vector_(2, 29.0,30.0), Matrix_(2,2, 31.0,32.0,0.0,34.0),
|
||||||
|
3, Matrix_(2,2, 35.0,36.0,37.0,38.0), ones(2)));
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
3, Vector_(2, 39.0,40.0), Matrix_(2,2, 41.0,42.0,0.0,44.0),
|
||||||
|
4, Matrix_(2,2, 45.0,46.0,47.0,48.0), ones(2)));
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
4, Vector_(2, 49.0,50.0), Matrix_(2,2, 51.0,52.0,0.0,54.0), ones(2)));
|
||||||
|
|
||||||
|
// Compute the Hessian numerically
|
||||||
|
Matrix hessian = numericalHessian(
|
||||||
|
boost::function<double(const LieVector&)>(boost::bind(&computeError, gbn, _1)),
|
||||||
|
LieVector(VectorValues::Zero(*allocateVectorValues(gbn)).vector()));
|
||||||
|
|
||||||
|
// Compute the gradient numerically
|
||||||
|
VectorValues gradientValues = *allocateVectorValues(gbn);
|
||||||
|
Vector gradient = numericalGradient(
|
||||||
|
boost::function<double(const LieVector&)>(boost::bind(&computeError, gbn, _1)),
|
||||||
|
LieVector(VectorValues::Zero(gradientValues).vector()));
|
||||||
|
gradientValues.vector() = gradient;
|
||||||
|
|
||||||
|
// Compute the gradient using dense matrices
|
||||||
|
Matrix augmentedHessian = GaussianFactorGraph(gbn).denseHessian();
|
||||||
|
LONGS_EQUAL(11, augmentedHessian.cols());
|
||||||
|
VectorValues denseMatrixGradient = *allocateVectorValues(gbn);
|
||||||
|
denseMatrixGradient.vector() = -augmentedHessian.col(10).segment(0,10);
|
||||||
|
EXPECT(assert_equal(gradientValues, denseMatrixGradient, 1e-5));
|
||||||
|
|
||||||
|
// Compute the steepest descent point
|
||||||
|
double step = -gradient.squaredNorm() / (gradient.transpose() * hessian * gradient)(0);
|
||||||
|
VectorValues expected = gradientValues; scal(step, expected);
|
||||||
|
|
||||||
|
// Compute the steepest descent point with the dogleg function
|
||||||
|
VectorValues actual = DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn);
|
||||||
|
|
||||||
|
// Check that points agree
|
||||||
|
EXPECT(assert_equal(expected, actual, 1e-5));
|
||||||
|
|
||||||
|
// Check that point causes a decrease in error
|
||||||
|
double origError = GaussianFactorGraph(gbn).error(VectorValues::Zero(*allocateVectorValues(gbn)));
|
||||||
|
double newError = GaussianFactorGraph(gbn).error(actual);
|
||||||
|
EXPECT(newError < origError);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(DoglegOptimizer, BT_BN_equivalency) {
|
||||||
|
|
||||||
|
// Create an arbitrary Bayes Tree
|
||||||
|
BayesTree<GaussianConditional> bt;
|
||||||
|
bt.insert(BayesTree<GaussianConditional>::sharedClique(new BayesTree<GaussianConditional>::Clique(
|
||||||
|
GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
boost::assign::pair_list_of
|
||||||
|
(2, Matrix_(6,2,
|
||||||
|
31.0,32.0,
|
||||||
|
0.0,34.0,
|
||||||
|
0.0,0.0,
|
||||||
|
0.0,0.0,
|
||||||
|
0.0,0.0,
|
||||||
|
0.0,0.0))
|
||||||
|
(3, Matrix_(6,2,
|
||||||
|
35.0,36.0,
|
||||||
|
37.0,38.0,
|
||||||
|
41.0,42.0,
|
||||||
|
0.0,44.0,
|
||||||
|
0.0,0.0,
|
||||||
|
0.0,0.0))
|
||||||
|
(4, Matrix_(6,2,
|
||||||
|
0.0,0.0,
|
||||||
|
0.0,0.0,
|
||||||
|
45.0,46.0,
|
||||||
|
47.0,48.0,
|
||||||
|
51.0,52.0,
|
||||||
|
0.0,54.0)),
|
||||||
|
3, Vector_(6, 29.0,30.0,39.0,40.0,49.0,50.0), ones(6))))));
|
||||||
|
bt.insert(BayesTree<GaussianConditional>::sharedClique(new BayesTree<GaussianConditional>::Clique(
|
||||||
|
GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
boost::assign::pair_list_of
|
||||||
|
(0, Matrix_(4,2,
|
||||||
|
3.0,4.0,
|
||||||
|
0.0,6.0,
|
||||||
|
0.0,0.0,
|
||||||
|
0.0,0.0))
|
||||||
|
(1, Matrix_(4,2,
|
||||||
|
0.0,0.0,
|
||||||
|
0.0,0.0,
|
||||||
|
17.0,18.0,
|
||||||
|
0.0,20.0))
|
||||||
|
(2, Matrix_(4,2,
|
||||||
|
0.0,0.0,
|
||||||
|
0.0,0.0,
|
||||||
|
21.0,22.0,
|
||||||
|
23.0,24.0))
|
||||||
|
(3, Matrix_(4,2,
|
||||||
|
7.0,8.0,
|
||||||
|
9.0,10.0,
|
||||||
|
0.0,0.0,
|
||||||
|
0.0,0.0))
|
||||||
|
(4, Matrix_(4,2,
|
||||||
|
11.0,12.0,
|
||||||
|
13.0,14.0,
|
||||||
|
25.0,26.0,
|
||||||
|
27.0,28.0)),
|
||||||
|
2, Vector_(4, 1.0,2.0,15.0,16.0), ones(4))))));
|
||||||
|
|
||||||
|
// Create an arbitrary Bayes Net
|
||||||
|
GaussianBayesNet gbn;
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
0, Vector_(2, 1.0,2.0), Matrix_(2,2, 3.0,4.0,0.0,6.0),
|
||||||
|
3, Matrix_(2,2, 7.0,8.0,9.0,10.0),
|
||||||
|
4, Matrix_(2,2, 11.0,12.0,13.0,14.0), ones(2)));
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
1, Vector_(2, 15.0,16.0), Matrix_(2,2, 17.0,18.0,0.0,20.0),
|
||||||
|
2, Matrix_(2,2, 21.0,22.0,23.0,24.0),
|
||||||
|
4, Matrix_(2,2, 25.0,26.0,27.0,28.0), ones(2)));
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
2, Vector_(2, 29.0,30.0), Matrix_(2,2, 31.0,32.0,0.0,34.0),
|
||||||
|
3, Matrix_(2,2, 35.0,36.0,37.0,38.0), ones(2)));
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
3, Vector_(2, 39.0,40.0), Matrix_(2,2, 41.0,42.0,0.0,44.0),
|
||||||
|
4, Matrix_(2,2, 45.0,46.0,47.0,48.0), ones(2)));
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
4, Vector_(2, 49.0,50.0), Matrix_(2,2, 51.0,52.0,0.0,54.0), ones(2)));
|
||||||
|
|
||||||
|
GaussianFactorGraph expected(gbn);
|
||||||
|
GaussianFactorGraph actual(bt);
|
||||||
|
|
||||||
|
EXPECT(assert_equal(expected.denseJacobian(), actual.denseJacobian()));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(DoglegOptimizer, ComputeSteepestDescentPointBT) {
|
||||||
|
|
||||||
|
// Create an arbitrary Bayes Tree
|
||||||
|
BayesTree<GaussianConditional> bt;
|
||||||
|
bt.insert(BayesTree<GaussianConditional>::sharedClique(new BayesTree<GaussianConditional>::Clique(
|
||||||
|
GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
boost::assign::pair_list_of
|
||||||
|
(2, Matrix_(6,2,
|
||||||
|
31.0,32.0,
|
||||||
|
0.0,34.0,
|
||||||
|
0.0,0.0,
|
||||||
|
0.0,0.0,
|
||||||
|
0.0,0.0,
|
||||||
|
0.0,0.0))
|
||||||
|
(3, Matrix_(6,2,
|
||||||
|
35.0,36.0,
|
||||||
|
37.0,38.0,
|
||||||
|
41.0,42.0,
|
||||||
|
0.0,44.0,
|
||||||
|
0.0,0.0,
|
||||||
|
0.0,0.0))
|
||||||
|
(4, Matrix_(6,2,
|
||||||
|
0.0,0.0,
|
||||||
|
0.0,0.0,
|
||||||
|
45.0,46.0,
|
||||||
|
47.0,48.0,
|
||||||
|
51.0,52.0,
|
||||||
|
0.0,54.0)),
|
||||||
|
3, Vector_(6, 29.0,30.0,39.0,40.0,49.0,50.0), ones(6))))));
|
||||||
|
bt.insert(BayesTree<GaussianConditional>::sharedClique(new BayesTree<GaussianConditional>::Clique(
|
||||||
|
GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
boost::assign::pair_list_of
|
||||||
|
(0, Matrix_(4,2,
|
||||||
|
3.0,4.0,
|
||||||
|
0.0,6.0,
|
||||||
|
0.0,0.0,
|
||||||
|
0.0,0.0))
|
||||||
|
(1, Matrix_(4,2,
|
||||||
|
0.0,0.0,
|
||||||
|
0.0,0.0,
|
||||||
|
17.0,18.0,
|
||||||
|
0.0,20.0))
|
||||||
|
(2, Matrix_(4,2,
|
||||||
|
0.0,0.0,
|
||||||
|
0.0,0.0,
|
||||||
|
21.0,22.0,
|
||||||
|
23.0,24.0))
|
||||||
|
(3, Matrix_(4,2,
|
||||||
|
7.0,8.0,
|
||||||
|
9.0,10.0,
|
||||||
|
0.0,0.0,
|
||||||
|
0.0,0.0))
|
||||||
|
(4, Matrix_(4,2,
|
||||||
|
11.0,12.0,
|
||||||
|
13.0,14.0,
|
||||||
|
25.0,26.0,
|
||||||
|
27.0,28.0)),
|
||||||
|
2, Vector_(4, 1.0,2.0,15.0,16.0), ones(4))))));
|
||||||
|
|
||||||
|
// Compute the Hessian numerically
|
||||||
|
Matrix hessian = numericalHessian(
|
||||||
|
boost::function<double(const LieVector&)>(boost::bind(&computeErrorBt, bt, _1)),
|
||||||
|
LieVector(VectorValues::Zero(*allocateVectorValues(bt)).vector()));
|
||||||
|
|
||||||
|
// Compute the gradient numerically
|
||||||
|
VectorValues gradientValues = *allocateVectorValues(bt);
|
||||||
|
Vector gradient = numericalGradient(
|
||||||
|
boost::function<double(const LieVector&)>(boost::bind(&computeErrorBt, bt, _1)),
|
||||||
|
LieVector(VectorValues::Zero(gradientValues).vector()));
|
||||||
|
gradientValues.vector() = gradient;
|
||||||
|
|
||||||
|
// Compute the gradient using dense matrices
|
||||||
|
Matrix augmentedHessian = GaussianFactorGraph(bt).denseHessian();
|
||||||
|
LONGS_EQUAL(11, augmentedHessian.cols());
|
||||||
|
VectorValues denseMatrixGradient = *allocateVectorValues(bt);
|
||||||
|
denseMatrixGradient.vector() = -augmentedHessian.col(10).segment(0,10);
|
||||||
|
EXPECT(assert_equal(gradientValues, denseMatrixGradient, 1e-5));
|
||||||
|
|
||||||
|
// Compute the steepest descent point
|
||||||
|
double step = -gradient.squaredNorm() / (gradient.transpose() * hessian * gradient)(0);
|
||||||
|
VectorValues expected = gradientValues; scal(step, expected);
|
||||||
|
|
||||||
|
// Known steepest descent point from Bayes' net version
|
||||||
|
VectorValues expectedFromBN(5,2);
|
||||||
|
expectedFromBN[0] = Vector_(2, 0.000129034, 0.000688183);
|
||||||
|
expectedFromBN[1] = Vector_(2, 0.0109679, 0.0253767);
|
||||||
|
expectedFromBN[2] = Vector_(2, 0.0680441, 0.114496);
|
||||||
|
expectedFromBN[3] = Vector_(2, 0.16125, 0.241294);
|
||||||
|
expectedFromBN[4] = Vector_(2, 0.300134, 0.423233);
|
||||||
|
|
||||||
|
// Compute the steepest descent point with the dogleg function
|
||||||
|
VectorValues actual = DoglegOptimizerImpl::ComputeSteepestDescentPoint(bt);
|
||||||
|
|
||||||
|
// Check that points agree
|
||||||
|
EXPECT(assert_equal(expected, actual, 1e-5));
|
||||||
|
EXPECT(assert_equal(expectedFromBN, actual, 1e-5));
|
||||||
|
|
||||||
|
// Check that point causes a decrease in error
|
||||||
|
double origError = GaussianFactorGraph(bt).error(VectorValues::Zero(*allocateVectorValues(bt)));
|
||||||
|
double newError = GaussianFactorGraph(bt).error(actual);
|
||||||
|
EXPECT(newError < origError);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(DoglegOptimizer, ComputeBlend) {
|
||||||
|
// Create an arbitrary Bayes Net
|
||||||
|
GaussianBayesNet gbn;
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
0, Vector_(2, 1.0,2.0), Matrix_(2,2, 3.0,4.0,0.0,6.0),
|
||||||
|
3, Matrix_(2,2, 7.0,8.0,9.0,10.0),
|
||||||
|
4, Matrix_(2,2, 11.0,12.0,13.0,14.0), ones(2)));
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
1, Vector_(2, 15.0,16.0), Matrix_(2,2, 17.0,18.0,0.0,20.0),
|
||||||
|
2, Matrix_(2,2, 21.0,22.0,23.0,24.0),
|
||||||
|
4, Matrix_(2,2, 25.0,26.0,27.0,28.0), ones(2)));
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
2, Vector_(2, 29.0,30.0), Matrix_(2,2, 31.0,32.0,0.0,34.0),
|
||||||
|
3, Matrix_(2,2, 35.0,36.0,37.0,38.0), ones(2)));
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
3, Vector_(2, 39.0,40.0), Matrix_(2,2, 41.0,42.0,0.0,44.0),
|
||||||
|
4, Matrix_(2,2, 45.0,46.0,47.0,48.0), ones(2)));
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
4, Vector_(2, 49.0,50.0), Matrix_(2,2, 51.0,52.0,0.0,54.0), ones(2)));
|
||||||
|
|
||||||
|
// Compute steepest descent point
|
||||||
|
VectorValues xu = DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn);
|
||||||
|
|
||||||
|
// Compute Newton's method point
|
||||||
|
VectorValues xn = optimize(gbn);
|
||||||
|
|
||||||
|
// The Newton's method point should be more "adventurous", i.e. larger, than the steepest descent point
|
||||||
|
EXPECT(xu.vector().norm() < xn.vector().norm());
|
||||||
|
|
||||||
|
// Compute blend
|
||||||
|
double Delta = 1.5;
|
||||||
|
VectorValues xb = DoglegOptimizerImpl::ComputeBlend(Delta, xu, xn);
|
||||||
|
DOUBLES_EQUAL(Delta, xb.vector().norm(), 1e-10);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(DoglegOptimizer, ComputeDoglegPoint) {
|
||||||
|
// Create an arbitrary Bayes Net
|
||||||
|
GaussianBayesNet gbn;
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
0, Vector_(2, 1.0,2.0), Matrix_(2,2, 3.0,4.0,0.0,6.0),
|
||||||
|
3, Matrix_(2,2, 7.0,8.0,9.0,10.0),
|
||||||
|
4, Matrix_(2,2, 11.0,12.0,13.0,14.0), ones(2)));
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
1, Vector_(2, 15.0,16.0), Matrix_(2,2, 17.0,18.0,0.0,20.0),
|
||||||
|
2, Matrix_(2,2, 21.0,22.0,23.0,24.0),
|
||||||
|
4, Matrix_(2,2, 25.0,26.0,27.0,28.0), ones(2)));
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
2, Vector_(2, 29.0,30.0), Matrix_(2,2, 31.0,32.0,0.0,34.0),
|
||||||
|
3, Matrix_(2,2, 35.0,36.0,37.0,38.0), ones(2)));
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
3, Vector_(2, 39.0,40.0), Matrix_(2,2, 41.0,42.0,0.0,44.0),
|
||||||
|
4, Matrix_(2,2, 45.0,46.0,47.0,48.0), ones(2)));
|
||||||
|
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
|
||||||
|
4, Vector_(2, 49.0,50.0), Matrix_(2,2, 51.0,52.0,0.0,54.0), ones(2)));
|
||||||
|
|
||||||
|
// Compute dogleg point for different deltas
|
||||||
|
|
||||||
|
double Delta1 = 0.5; // Less than steepest descent
|
||||||
|
VectorValues actual1 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta1, DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn), optimize(gbn));
|
||||||
|
DOUBLES_EQUAL(Delta1, actual1.vector().norm(), 1e-5);
|
||||||
|
|
||||||
|
double Delta2 = 1.5; // Between steepest descent and Newton's method
|
||||||
|
VectorValues expected2 = DoglegOptimizerImpl::ComputeBlend(Delta2, DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn), optimize(gbn));
|
||||||
|
VectorValues actual2 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta2, DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn), optimize(gbn));
|
||||||
|
DOUBLES_EQUAL(Delta2, actual2.vector().norm(), 1e-5);
|
||||||
|
EXPECT(assert_equal(expected2, actual2));
|
||||||
|
|
||||||
|
double Delta3 = 5.0; // Larger than Newton's method point
|
||||||
|
VectorValues expected3 = optimize(gbn);
|
||||||
|
VectorValues actual3 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta3, DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn), optimize(gbn));
|
||||||
|
EXPECT(assert_equal(expected3, actual3));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(DoglegOptimizer, Iterate) {
|
||||||
|
// really non-linear factor graph
|
||||||
|
shared_ptr<example::Graph> fg(new example::Graph(
|
||||||
|
example::createReallyNonlinearFactorGraph()));
|
||||||
|
|
||||||
|
// config far from minimum
|
||||||
|
Point2 x0(3,0);
|
||||||
|
boost::shared_ptr<simulated2D::Values> config(new example::Values);
|
||||||
|
config->insert(simulated2D::PoseKey(1), x0);
|
||||||
|
|
||||||
|
// ordering
|
||||||
|
shared_ptr<Ordering> ord(new Ordering());
|
||||||
|
ord->push_back(simulated2D::PoseKey(1));
|
||||||
|
|
||||||
|
double Delta = 1.0;
|
||||||
|
for(size_t it=0; it<10; ++it) {
|
||||||
|
GaussianSequentialSolver solver(*fg->linearize(*config, *ord));
|
||||||
|
GaussianBayesNet gbn = *solver.eliminate();
|
||||||
|
// Iterate assumes that linear error = nonlinear error at the linearization point, and this should be true
|
||||||
|
double nonlinearError = fg->error(*config);
|
||||||
|
double linearError = GaussianFactorGraph(gbn).error(VectorValues::Zero(*allocateVectorValues(gbn)));
|
||||||
|
DOUBLES_EQUAL(nonlinearError, linearError, 1e-5);
|
||||||
|
// cout << "it " << it << ", Delta = " << Delta << ", error = " << fg->error(*config) << endl;
|
||||||
|
DoglegOptimizerImpl::IterationResult result = DoglegOptimizerImpl::Iterate(Delta, DoglegOptimizerImpl::SEARCH_EACH_ITERATION, gbn, *fg, *config, *ord, fg->error(*config));
|
||||||
|
Delta = result.Delta;
|
||||||
|
EXPECT(result.f_error < fg->error(*config)); // Check that error decreases
|
||||||
|
simulated2D::Values newConfig(config->expmap(result.dx_d, *ord));
|
||||||
|
(*config) = newConfig;
|
||||||
|
DOUBLES_EQUAL(fg->error(*config), result.f_error, 1e-5); // Check that error is correctly filled in
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
|
||||||
|
/* ************************************************************************* */
|
Loading…
Reference in New Issue