Merged in fix/SubgraphPreconditioner (pull request #407)

Fix/SubgraphPreconditioner
release/4.3a0
Frank Dellaert 2019-04-08 21:21:36 +00:00
commit 60b2c3b157
9 changed files with 3995 additions and 280 deletions

File diff suppressed because it is too large Load Diff

169
examples/Data/toy3D.xml Normal file
View File

@ -0,0 +1,169 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<!DOCTYPE boost_serialization>
<boost_serialization signature="serialization::archive" version="17">
<graph class_id="0" tracking_level="0" version="0">
<Base class_id="1" tracking_level="0" version="0">
<factors_ class_id="2" tracking_level="0" version="0">
<count>2</count>
<item_version>1</item_version>
<item class_id="3" tracking_level="0" version="1">
<px class_id="4" class_name="JacobianFactor" tracking_level="1" version="0" object_id="_0">
<Base class_id="5" tracking_level="0" version="0">
<Base class_id="6" tracking_level="0" version="0">
<keys_>
<count>2</count>
<item_version>0</item_version>
<item>0</item>
<item>1</item>
</keys_>
</Base>
</Base>
<Ab_ class_id="8" tracking_level="0" version="0">
<matrix_ class_id="9" tracking_level="0" version="0">
<rows>9</rows>
<cols>7</cols>
<data>
<item>1.22427709071730959e+01</item>
<item>1.51514613104920048e+01</item>
<item>3.60934366857813060e+00</item>
<item>-1.18407259026383116e+01</item>
<item>7.84826117220921216e+00</item>
<item>1.23509165494819051e+01</item>
<item>-6.09875015991639735e+00</item>
<item>6.16547190708139126e-01</item>
<item>3.94972084922329048e+00</item>
<item>-4.89208482920378174e+00</item>
<item>3.02091647632478866e+00</item>
<item>-8.95328692238917512e+00</item>
<item>7.89831607220345955e+00</item>
<item>-2.36793602009719084e+00</item>
<item>1.48517612051941725e+01</item>
<item>-3.97284286249233731e-01</item>
<item>-1.95744531643153863e+01</item>
<item>-3.85954855417462017e+00</item>
<item>4.79268277145419042e+00</item>
<item>-9.01707953629520453e+00</item>
<item>1.37848069005841385e+01</item>
<item>1.04829326688375950e+01</item>
<item>-5.00630568442241675e+00</item>
<item>4.70463561852773182e+00</item>
<item>-1.59179134598689274e+01</item>
<item>-2.04767784956723942e+00</item>
<item>9.54135497908261954e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>-0.00000000000000000e+00</item>
<item>-1.76201757312015372e+00</item>
<item>1.98634190821282672e+01</item>
<item>1.52966546661624236e+00</item>
<item>1.94817649567575373e+01</item>
<item>1.39684693294110307e+00</item>
<item>4.30228460420588288e+00</item>
<item>1.76201757312015372e+00</item>
<item>-1.98634190821282672e+01</item>
<item>-1.52966546661624236e+00</item>
<item>-0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>4.16606867942304948e+00</item>
<item>1.86906420801308037e+00</item>
<item>-1.94717865319198360e+01</item>
<item>-1.94817649567575373e+01</item>
<item>-1.39684693294110307e+00</item>
<item>-4.30228460420588288e+00</item>
<item>-4.16606867942304948e+00</item>
<item>-1.86906420801308037e+00</item>
<item>1.94717865319198360e+01</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>1.00891462904330531e+01</item>
<item>-1.08132497987816869e+01</item>
<item>8.66487736568128497e+00</item>
<item>2.88370015604634311e+01</item>
<item>1.89391698948574643e+01</item>
<item>2.12398960190661290e+00</item>
<item>1.22150946112124039e+01</item>
<item>-2.33658532501596561e+01</item>
<item>1.51576204760307363e+01</item>
</data>
</matrix_>
<variableColOffsets_>
<count>4</count>
<item_version>0</item_version>
<item>0</item>
<item>3</item>
<item>6</item>
<item>7</item>
</variableColOffsets_>
<rowStart_>0</rowStart_>
<rowEnd_>9</rowEnd_>
<blockStart_>0</blockStart_>
</Ab_>
<model_ class_id="11" tracking_level="0" version="1">
<px class_id="-1"></px>
</model_>
</px>
</item>
<item>
<px class_id_reference="4" object_id="_1">
<Base>
<Base>
<keys_>
<count>2</count>
<item_version>0</item_version>
<item>0</item>
<item>1</item>
</keys_>
</Base>
</Base>
<Ab_>
<matrix_>
<rows>3</rows>
<cols>7</cols>
<data>
<item>1.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>1.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>1.00000000000000000e+00</item>
<item>1.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>1.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>1.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
</data>
</matrix_>
<variableColOffsets_>
<count>4</count>
<item_version>0</item_version>
<item>0</item>
<item>3</item>
<item>6</item>
<item>7</item>
</variableColOffsets_>
<rowStart_>0</rowStart_>
<rowEnd_>3</rowEnd_>
<blockStart_>0</blockStart_>
</Ab_>
<model_>
<px class_id="-1"></px>
</model_>
</px>
</item>
</factors_>
</Base>
</graph>
</boost_serialization>

View File

@ -67,28 +67,29 @@ struct FixedSizeMatrix {
} }
/** /**
* Numerically compute gradient of scalar function * @brief Numerically compute gradient of scalar function
* @return n-dimensional gradient computed via central differencing
* Class X is the input argument * Class X is the input argument
* The class X needs to have dim, expmap, logmap * The class X needs to have dim, expmap, logmap
* int N is the dimension of the X input value if variable dimension type but known at test time
*/ */
template<class X>
typename internal::FixedSizeMatrix<X>::type numericalGradient(boost::function<double(const X&)> h, const X& x, template <class X, int N = traits<X>::dimension>
double delta = 1e-5) { typename Eigen::Matrix<double, N, 1> numericalGradient(
boost::function<double(const X&)> h, const X& x, double delta = 1e-5) {
double factor = 1.0 / (2.0 * delta); double factor = 1.0 / (2.0 * delta);
BOOST_STATIC_ASSERT_MSG( BOOST_STATIC_ASSERT_MSG(
(boost::is_base_of<manifold_tag, typename traits<X>::structure_category>::value), (boost::is_base_of<manifold_tag, typename traits<X>::structure_category>::value),
"Template argument X must be a manifold type."); "Template argument X must be a manifold type.");
static const int N = traits<X>::dimension; BOOST_STATIC_ASSERT_MSG(N>0, "Template argument X must be fixed-size type or N must be specified.");
BOOST_STATIC_ASSERT_MSG(N>0, "Template argument X must be fixed-size type.");
typedef typename traits<X>::TangentVector TangentX;
// Prepare a tangent vector to perturb x with, only works for fixed size // Prepare a tangent vector to perturb x with, only works for fixed size
TangentX d; typename traits<X>::TangentVector d;
d.setZero(); d.setZero();
Eigen::Matrix<double,N,1> g; g.setZero(); // Can be fixed size Eigen::Matrix<double,N,1> g;
g.setZero();
for (int j = 0; j < N; j++) { for (int j = 0; j < N; j++) {
d(j) = delta; d(j) = delta;
double hxplus = h(traits<X>::Retract(x, d)); double hxplus = h(traits<X>::Retract(x, d));
@ -108,37 +109,34 @@ typename internal::FixedSizeMatrix<X>::type numericalGradient(boost::function<do
* @param delta increment for numerical derivative * @param delta increment for numerical derivative
* Class Y is the output argument * Class Y is the output argument
* Class X is the input argument * Class X is the input argument
* int N is the dimension of the X input value if variable dimension type but known at test time
* @return m*n Jacobian computed via central differencing * @return m*n Jacobian computed via central differencing
*/ */
template<class Y, class X> template <class Y, class X, int N = traits<X>::dimension>
// TODO Should compute fixed-size matrix // TODO Should compute fixed-size matrix
typename internal::FixedSizeMatrix<Y,X>::type numericalDerivative11(boost::function<Y(const X&)> h, const X& x, typename internal::FixedSizeMatrix<Y, X>::type numericalDerivative11(
double delta = 1e-5) { boost::function<Y(const X&)> h, const X& x, double delta = 1e-5) {
typedef typename internal::FixedSizeMatrix<Y,X>::type Matrix; typedef typename internal::FixedSizeMatrix<Y,X>::type Matrix;
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<Y>::structure_category>::value), BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<Y>::structure_category>::value),
"Template argument Y must be a manifold type."); "Template argument Y must be a manifold type.");
typedef traits<Y> TraitsY; typedef traits<Y> TraitsY;
typedef typename TraitsY::TangentVector TangentY;
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<X>::structure_category>::value), BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<X>::structure_category>::value),
"Template argument X must be a manifold type."); "Template argument X must be a manifold type.");
static const int N = traits<X>::dimension; BOOST_STATIC_ASSERT_MSG(N>0, "Template argument X must be fixed-size type or N must be specified.");
BOOST_STATIC_ASSERT_MSG(N>0, "Template argument X must be fixed-size type.");
typedef traits<X> TraitsX; typedef traits<X> TraitsX;
typedef typename TraitsX::TangentVector TangentX;
// get value at x, and corresponding chart // get value at x, and corresponding chart
const Y hx = h(x); const Y hx = h(x);
// Bit of a hack for now to find number of rows // Bit of a hack for now to find number of rows
const TangentY zeroY = TraitsY::Local(hx, hx); const typename TraitsY::TangentVector zeroY = TraitsY::Local(hx, hx);
const size_t m = zeroY.size(); const size_t m = zeroY.size();
// Prepare a tangent vector to perturb x with, only works for fixed size // Prepare a tangent vector to perturb x with, only works for fixed size
TangentX dx; Eigen::Matrix<double, N, 1> dx;
dx.setZero(); dx.setZero();
// Fill in Jacobian H // Fill in Jacobian H
@ -146,9 +144,9 @@ typename internal::FixedSizeMatrix<Y,X>::type numericalDerivative11(boost::funct
const double factor = 1.0 / (2.0 * delta); const double factor = 1.0 / (2.0 * delta);
for (int j = 0; j < N; j++) { for (int j = 0; j < N; j++) {
dx(j) = delta; dx(j) = delta;
const TangentY dy1 = TraitsY::Local(hx, h(TraitsX::Retract(x, dx))); const auto dy1 = TraitsY::Local(hx, h(TraitsX::Retract(x, dx)));
dx(j) = -delta; dx(j) = -delta;
const TangentY dy2 = TraitsY::Local(hx, h(TraitsX::Retract(x, dx))); const auto dy2 = TraitsY::Local(hx, h(TraitsX::Retract(x, dx)));
dx(j) = 0; dx(j) = 0;
H.col(j) << (dy1 - dy2) * factor; H.col(j) << (dy1 - dy2) * factor;
} }

View File

@ -70,21 +70,20 @@ public:
Preconditioner() {} Preconditioner() {}
virtual ~Preconditioner() {} virtual ~Preconditioner() {}
/* Computation Interfaces */ /*
* Abstract interface for raw vectors. VectorValues is a speed bottleneck
* and Yong-Dian has profiled preconditioners (outside GTSAM) with the the
* three methods below. In GTSAM, unfortunately, we are still using the
* VectorValues methods called in iterative-inl.h
*/
/* implement x = L^{-1} y */ /// implement x = L^{-1} y
virtual void solve(const Vector& y, Vector &x) const = 0; virtual void solve(const Vector& y, Vector &x) const = 0;
// virtual void solve(const VectorValues& y, VectorValues &x) const = 0;
/* implement x = L^{-T} y */ /// implement x = L^{-T} y
virtual void transposeSolve(const Vector& y, Vector& x) const = 0; virtual void transposeSolve(const Vector& y, Vector& x) const = 0;
// virtual void transposeSolve(const VectorValues& y, VectorValues &x) const = 0;
// /* implement x = L^{-1} L^{-T} y */ /// build/factorize the preconditioner
// virtual void fullSolve(const Vector& y, Vector &x) const = 0;
// virtual void fullSolve(const VectorValues& y, VectorValues &x) const = 0;
/* build/factorize the preconditioner */
virtual void build( virtual void build(
const GaussianFactorGraph &gfg, const GaussianFactorGraph &gfg,
const KeyInfo &info, const KeyInfo &info,
@ -113,14 +112,7 @@ public:
/* Computation Interfaces for raw vector */ /* Computation Interfaces for raw vector */
virtual void solve(const Vector& y, Vector &x) const { x = y; } virtual void solve(const Vector& y, Vector &x) const { x = y; }
// virtual void solve(const VectorValues& y, VectorValues& x) const { x = y; }
virtual void transposeSolve(const Vector& y, Vector& x) const { x = y; } virtual void transposeSolve(const Vector& y, Vector& x) const { x = y; }
// virtual void transposeSolve(const VectorValues& y, VectorValues& x) const { x = y; }
// virtual void fullSolve(const Vector& y, Vector &x) const { x = y; }
// virtual void fullSolve(const VectorValues& y, VectorValues& x) const { x = y; }
virtual void build( virtual void build(
const GaussianFactorGraph &gfg, const GaussianFactorGraph &gfg,
const KeyInfo &info, const KeyInfo &info,
@ -145,8 +137,6 @@ public:
/* Computation Interfaces for raw vector */ /* Computation Interfaces for raw vector */
virtual void solve(const Vector& y, Vector &x) const; virtual void solve(const Vector& y, Vector &x) const;
virtual void transposeSolve(const Vector& y, Vector& x) const ; virtual void transposeSolve(const Vector& y, Vector& x) const ;
// virtual void fullSolve(const Vector& y, Vector &x) const ;
virtual void build( virtual void build(
const GaussianFactorGraph &gfg, const GaussianFactorGraph &gfg,
const KeyInfo &info, const KeyInfo &info,

View File

@ -52,17 +52,21 @@
#include <utility> #include <utility>
#include <vector> #include <vector>
using namespace std; using std::cout;
using std::endl;
using std::vector;
using std::ostream;
namespace gtsam { namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
// Convert any non-Jacobian factors to Jacobians (e.g. Hessian -> Jacobian with Cholesky)
static GaussianFactorGraph::shared_ptr convertToJacobianFactors(const GaussianFactorGraph &gfg) { static GaussianFactorGraph::shared_ptr convertToJacobianFactors(const GaussianFactorGraph &gfg) {
GaussianFactorGraph::shared_ptr result(new GaussianFactorGraph()); auto result = boost::make_shared<GaussianFactorGraph>();
for(const GaussianFactor::shared_ptr &gf: gfg) { for (const auto &factor : gfg) {
JacobianFactor::shared_ptr jf = boost::dynamic_pointer_cast<JacobianFactor>(gf); auto jf = boost::dynamic_pointer_cast<JacobianFactor>(factor);
if( !jf ) { if( !jf ) {
jf = boost::make_shared<JacobianFactor>(*gf); // Convert any non-Jacobian factors to Jacobians (e.g. Hessian -> Jacobian with Cholesky) jf = boost::make_shared<JacobianFactor>(*factor);
} }
result->push_back(jf); result->push_back(jf);
} }
@ -70,20 +74,23 @@ static GaussianFactorGraph::shared_ptr convertToJacobianFactors(const GaussianFa
} }
/*****************************************************************************/ /*****************************************************************************/
static std::vector<size_t> iidSampler(const vector<double> &weight, const size_t n) { static vector<size_t> iidSampler(const vector<double> &weight, const size_t n) {
/* compute the sum of the weights */ /* compute the sum of the weights */
const double sum = std::accumulate(weight.begin(), weight.end(), 0.0); const double sum = std::accumulate(weight.begin(), weight.end(), 0.0);
if (sum==0.0) {
throw std::runtime_error("Weight vector has no non-zero weights");
}
/* make a normalized and accumulated version of the weight vector */ /* make a normalized and accumulated version of the weight vector */
const size_t m = weight.size(); const size_t m = weight.size();
vector<double> w; w.reserve(m); vector<double> cdf; cdf.reserve(m);
for ( size_t i = 0 ; i < m ; ++i ) { for ( size_t i = 0 ; i < m ; ++i ) {
w.push_back(weight[i]/sum); cdf.push_back(weight[i]/sum);
} }
vector<double> acc(m); vector<double> acc(m);
std::partial_sum(w.begin(),w.end(),acc.begin()); std::partial_sum(cdf.begin(),cdf.end(),acc.begin());
/* iid sample n times */ /* iid sample n times */
vector<size_t> result; result.reserve(n); vector<size_t> result; result.reserve(n);
@ -91,26 +98,26 @@ static std::vector<size_t> iidSampler(const vector<double> &weight, const size_t
for ( size_t i = 0 ; i < n ; ++i ) { for ( size_t i = 0 ; i < n ; ++i ) {
const double value = rand() / denominator; const double value = rand() / denominator;
/* binary search the interval containing "value" */ /* binary search the interval containing "value" */
vector<double>::iterator it = std::lower_bound(acc.begin(), acc.end(), value); const auto it = std::lower_bound(acc.begin(), acc.end(), value);
size_t idx = it - acc.begin(); const size_t idx = it - acc.begin();
result.push_back(idx); result.push_back(idx);
} }
return result; return result;
} }
/*****************************************************************************/ /*****************************************************************************/
vector<size_t> uniqueSampler(const vector<double> &weight, const size_t n) { static vector<size_t> UniqueSampler(const vector<double> &weight, const size_t n) {
const size_t m = weight.size(); const size_t m = weight.size();
if ( n > m ) throw std::invalid_argument("uniqueSampler: invalid input size"); if ( n > m ) throw std::invalid_argument("UniqueSampler: invalid input size");
vector<size_t> result; vector<size_t> result;
size_t count = 0; size_t count = 0;
std::vector<bool> touched(m, false); vector<bool> touched(m, false);
while ( count < n ) { while ( count < n ) {
std::vector<size_t> localIndices; localIndices.reserve(n-count); vector<size_t> localIndices; localIndices.reserve(n-count);
std::vector<double> localWeights; localWeights.reserve(n-count); vector<double> localWeights; localWeights.reserve(n-count);
/* collect data */ /* collect data */
for ( size_t i = 0 ; i < m ; ++i ) { for ( size_t i = 0 ; i < m ; ++i ) {
@ -134,16 +141,16 @@ vector<size_t> uniqueSampler(const vector<double> &weight, const size_t n) {
} }
/****************************************************************************/ /****************************************************************************/
Subgraph::Subgraph(const std::vector<size_t> &indices) { Subgraph::Subgraph(const vector<size_t> &indices) {
edges_.reserve(indices.size()); edges_.reserve(indices.size());
for ( const size_t &idx: indices ) { for ( const size_t &idx: indices ) {
edges_.push_back(SubgraphEdge(idx, 1.0)); edges_.emplace_back(idx, 1.0);
} }
} }
/****************************************************************************/ /****************************************************************************/
std::vector<size_t> Subgraph::edgeIndices() const { vector<size_t> Subgraph::edgeIndices() const {
std::vector<size_t> eid; eid.reserve(size()); vector<size_t> eid; eid.reserve(size());
for ( const SubgraphEdge &edge: edges_ ) { for ( const SubgraphEdge &edge: edges_ ) {
eid.push_back(edge.index_); eid.push_back(edge.index_);
} }
@ -169,7 +176,7 @@ Subgraph::shared_ptr Subgraph::load(const std::string &fn) {
} }
/****************************************************************************/ /****************************************************************************/
std::ostream &operator<<(std::ostream &os, const SubgraphEdge &edge) { ostream &operator<<(ostream &os, const SubgraphEdge &edge) {
if ( edge.weight() != 1.0 ) if ( edge.weight() != 1.0 )
os << edge.index() << "(" << std::setprecision(2) << edge.weight() << ")"; os << edge.index() << "(" << std::setprecision(2) << edge.weight() << ")";
else else
@ -178,7 +185,7 @@ std::ostream &operator<<(std::ostream &os, const SubgraphEdge &edge) {
} }
/****************************************************************************/ /****************************************************************************/
std::ostream &operator<<(std::ostream &os, const Subgraph &subgraph) { ostream &operator<<(ostream &os, const Subgraph &subgraph) {
os << "Subgraph" << endl; os << "Subgraph" << endl;
for ( const SubgraphEdge &e: subgraph.edges() ) { for ( const SubgraphEdge &e: subgraph.edges() ) {
os << e << ", " ; os << e << ", " ;
@ -212,15 +219,15 @@ SubgraphBuilderParameters::Skeleton SubgraphBuilderParameters::skeletonTranslato
if (s == "NATURALCHAIN") return NATURALCHAIN; if (s == "NATURALCHAIN") return NATURALCHAIN;
else if (s == "BFS") return BFS; else if (s == "BFS") return BFS;
else if (s == "KRUSKAL") return KRUSKAL; else if (s == "KRUSKAL") return KRUSKAL;
throw invalid_argument("SubgraphBuilderParameters::skeletonTranslator undefined string " + s); throw std::invalid_argument("SubgraphBuilderParameters::skeletonTranslator undefined string " + s);
return KRUSKAL; return KRUSKAL;
} }
/****************************************************************/ /****************************************************************/
std::string SubgraphBuilderParameters::skeletonTranslator(Skeleton w) { std::string SubgraphBuilderParameters::skeletonTranslator(Skeleton s) {
if ( w == NATURALCHAIN )return "NATURALCHAIN"; if ( s == NATURALCHAIN ) return "NATURALCHAIN";
else if ( w == BFS ) return "BFS"; else if ( s == BFS ) return "BFS";
else if ( w == KRUSKAL )return "KRUSKAL"; else if ( s == KRUSKAL ) return "KRUSKAL";
else return "UNKNOWN"; else return "UNKNOWN";
} }
@ -231,7 +238,7 @@ SubgraphBuilderParameters::SkeletonWeight SubgraphBuilderParameters::skeletonWei
else if (s == "RHS") return RHS_2NORM; else if (s == "RHS") return RHS_2NORM;
else if (s == "LHS") return LHS_FNORM; else if (s == "LHS") return LHS_FNORM;
else if (s == "RANDOM") return RANDOM; else if (s == "RANDOM") return RANDOM;
throw invalid_argument("SubgraphBuilderParameters::skeletonWeightTranslator undefined string " + s); throw std::invalid_argument("SubgraphBuilderParameters::skeletonWeightTranslator undefined string " + s);
return EQUAL; return EQUAL;
} }
@ -245,12 +252,14 @@ std::string SubgraphBuilderParameters::skeletonWeightTranslator(SkeletonWeight w
} }
/****************************************************************/ /****************************************************************/
SubgraphBuilderParameters::AugmentationWeight SubgraphBuilderParameters::augmentationWeightTranslator(const std::string &src) { SubgraphBuilderParameters::AugmentationWeight
SubgraphBuilderParameters::augmentationWeightTranslator(
const std::string &src) {
std::string s = src; boost::algorithm::to_upper(s); std::string s = src; boost::algorithm::to_upper(s);
if (s == "SKELETON") return SKELETON; if (s == "SKELETON") return SKELETON;
// else if (s == "STRETCH") return STRETCH; // else if (s == "STRETCH") return STRETCH;
// else if (s == "GENERALIZED_STRETCH") return GENERALIZED_STRETCH; // else if (s == "GENERALIZED_STRETCH") return GENERALIZED_STRETCH;
throw invalid_argument("SubgraphBuilder::Parameters::augmentationWeightTranslator undefined string " + s); throw std::invalid_argument("SubgraphBuilder::Parameters::augmentationWeightTranslator undefined string " + s);
return SKELETON; return SKELETON;
} }
@ -263,7 +272,9 @@ std::string SubgraphBuilderParameters::augmentationWeightTranslator(Augmentation
} }
/****************************************************************/ /****************************************************************/
std::vector<size_t> SubgraphBuilder::buildTree(const GaussianFactorGraph &gfg, const FastMap<Key, size_t> &ordering, const std::vector<double> &w) const { vector<size_t> SubgraphBuilder::buildTree(const GaussianFactorGraph &gfg,
const FastMap<Key, size_t> &ordering,
const vector<double> &weights) const {
const SubgraphBuilderParameters &p = parameters_; const SubgraphBuilderParameters &p = parameters_;
switch (p.skeleton_) { switch (p.skeleton_) {
case SubgraphBuilderParameters::NATURALCHAIN: case SubgraphBuilderParameters::NATURALCHAIN:
@ -273,21 +284,21 @@ std::vector<size_t> SubgraphBuilder::buildTree(const GaussianFactorGraph &gfg, c
return bfs(gfg); return bfs(gfg);
break; break;
case SubgraphBuilderParameters::KRUSKAL: case SubgraphBuilderParameters::KRUSKAL:
return kruskal(gfg, ordering, w); return kruskal(gfg, ordering, weights);
break; break;
default: default:
cerr << "SubgraphBuilder::buildTree undefined skeleton type" << endl; std::cerr << "SubgraphBuilder::buildTree undefined skeleton type" << endl;
break; break;
} }
return vector<size_t>(); return vector<size_t>();
} }
/****************************************************************/ /****************************************************************/
std::vector<size_t> SubgraphBuilder::unary(const GaussianFactorGraph &gfg) const { vector<size_t> SubgraphBuilder::unary(const GaussianFactorGraph &gfg) const {
std::vector<size_t> result ; vector<size_t> result ;
size_t idx = 0; size_t idx = 0;
for ( const GaussianFactor::shared_ptr &gf: gfg ) { for (const auto &factor : gfg) {
if ( gf->size() == 1 ) { if ( factor->size() == 1 ) {
result.push_back(idx); result.push_back(idx);
} }
idx++; idx++;
@ -296,8 +307,8 @@ std::vector<size_t> SubgraphBuilder::unary(const GaussianFactorGraph &gfg) const
} }
/****************************************************************/ /****************************************************************/
std::vector<size_t> SubgraphBuilder::natural_chain(const GaussianFactorGraph &gfg) const { vector<size_t> SubgraphBuilder::natural_chain(const GaussianFactorGraph &gfg) const {
std::vector<size_t> result ; vector<size_t> result ;
size_t idx = 0; size_t idx = 0;
for ( const GaussianFactor::shared_ptr &gf: gfg ) { for ( const GaussianFactor::shared_ptr &gf: gfg ) {
if ( gf->size() == 2 ) { if ( gf->size() == 2 ) {
@ -311,7 +322,7 @@ std::vector<size_t> SubgraphBuilder::natural_chain(const GaussianFactorGraph &gf
} }
/****************************************************************/ /****************************************************************/
std::vector<size_t> SubgraphBuilder::bfs(const GaussianFactorGraph &gfg) const { vector<size_t> SubgraphBuilder::bfs(const GaussianFactorGraph &gfg) const {
const VariableIndex variableIndex(gfg); const VariableIndex variableIndex(gfg);
/* start from the first key of the first factor */ /* start from the first key of the first factor */
Key seed = gfg[0]->keys()[0]; Key seed = gfg[0]->keys()[0];
@ -319,7 +330,7 @@ std::vector<size_t> SubgraphBuilder::bfs(const GaussianFactorGraph &gfg) const {
const size_t n = variableIndex.size(); const size_t n = variableIndex.size();
/* each vertex has self as the predecessor */ /* each vertex has self as the predecessor */
std::vector<size_t> result; vector<size_t> result;
result.reserve(n-1); result.reserve(n-1);
/* Initialize */ /* Initialize */
@ -347,30 +358,31 @@ std::vector<size_t> SubgraphBuilder::bfs(const GaussianFactorGraph &gfg) const {
} }
/****************************************************************/ /****************************************************************/
std::vector<size_t> SubgraphBuilder::kruskal(const GaussianFactorGraph &gfg, const FastMap<Key, size_t> &ordering, const std::vector<double> &w) const { vector<size_t> SubgraphBuilder::kruskal(const GaussianFactorGraph &gfg,
const FastMap<Key, size_t> &ordering,
const vector<double> &weights) const {
const VariableIndex variableIndex(gfg); const VariableIndex variableIndex(gfg);
const size_t n = variableIndex.size(); const size_t n = variableIndex.size();
const vector<size_t> idx = sort_idx(w) ; const vector<size_t> idx = sort_idx(weights) ;
/* initialize buffer */ /* initialize buffer */
vector<size_t> result; vector<size_t> result;
result.reserve(n-1); result.reserve(n-1);
// container for acsendingly sorted edges // container for acsendingly sorted edges
DSFVector D(n) ; DSFVector dsf(n);
size_t count = 0 ; double sum = 0.0 ; size_t count = 0 ; double sum = 0.0 ;
for (const size_t id: idx) { for (const size_t id: idx) {
const GaussianFactor &gf = *gfg[id]; const GaussianFactor &gf = *gfg[id];
if ( gf.keys().size() != 2 ) continue; const auto keys = gf.keys();
const size_t u = ordering.find(gf.keys()[0])->second, if ( keys.size() != 2 ) continue;
u_root = D.find(u), const size_t u = ordering.find(keys[0])->second,
v = ordering.find(gf.keys()[1])->second, v = ordering.find(keys[1])->second;
v_root = D.find(v) ; if ( dsf.find(u) != dsf.find(v) ) {
if ( u_root != v_root ) { dsf.merge(u, v) ;
D.merge(u_root, v_root) ;
result.push_back(id) ; result.push_back(id) ;
sum += w[id] ; sum += weights[id] ;
if ( ++count == n-1 ) break ; if ( ++count == n-1 ) break ;
} }
} }
@ -378,33 +390,40 @@ std::vector<size_t> SubgraphBuilder::kruskal(const GaussianFactorGraph &gfg, con
} }
/****************************************************************/ /****************************************************************/
std::vector<size_t> SubgraphBuilder::sample(const std::vector<double> &weights, const size_t t) const { vector<size_t> SubgraphBuilder::sample(const vector<double> &weights, const size_t t) const {
return uniqueSampler(weights, t); return UniqueSampler(weights, t);
} }
/****************************************************************/ /****************************************************************/
Subgraph::shared_ptr SubgraphBuilder::operator() (const GaussianFactorGraph &gfg) const { Subgraph::shared_ptr SubgraphBuilder::operator() (const GaussianFactorGraph &gfg) const {
const SubgraphBuilderParameters &p = parameters_; const auto &p = parameters_;
const Ordering inverse_ordering = Ordering::Natural(gfg); const auto inverse_ordering = Ordering::Natural(gfg);
const FastMap<Key, size_t> forward_ordering = inverse_ordering.invert(); const FastMap<Key, size_t> forward_ordering = inverse_ordering.invert();
const size_t n = inverse_ordering.size(), t = n * p.complexity_ ; const size_t n = inverse_ordering.size(), m = gfg.size();
vector<double> w = weights(gfg); // Make sure the subgraph preconditioner does not include more than half of
const vector<size_t> tree = buildTree(gfg, forward_ordering, w); // the edges beyond the spanning tree, or we might as well solve the whole thing.
size_t numExtraEdges = n * p.complexity_;
const size_t numRemaining = m - (n - 1);
numExtraEdges = std::min(numExtraEdges, numRemaining/2);
/* sanity check */ // Calculate weights
vector<double> weights = this->weights(gfg);
// Build spanning tree.
const vector<size_t> tree = buildTree(gfg, forward_ordering, weights);
if ( tree.size() != n-1 ) { if ( tree.size() != n-1 ) {
throw runtime_error("SubgraphBuilder::operator() tree.size() != n-1 failed "); throw std::runtime_error("SubgraphBuilder::operator() failure: tree.size() != n-1");
} }
/* down weight the tree edges to zero */ // Downweight the tree edges to zero.
for ( const size_t id: tree ) { for ( const size_t id: tree ) {
w[id] = 0.0; weights[id] = 0.0;
} }
/* decide how many edges to augment */ /* decide how many edges to augment */
std::vector<size_t> offTree = sample(w, t); vector<size_t> offTree = sample(weights, numExtraEdges);
vector<size_t> subgraph = unary(gfg); vector<size_t> subgraph = unary(gfg);
subgraph.insert(subgraph.end(), tree.begin(), tree.end()); subgraph.insert(subgraph.end(), tree.begin(), tree.end());
@ -450,7 +469,7 @@ SubgraphBuilder::Weights SubgraphBuilder::weights(const GaussianFactorGraph &gfg
break; break;
default: default:
throw invalid_argument("SubgraphBuilder::weights: undefined weight scheme "); throw std::invalid_argument("SubgraphBuilder::weights: undefined weight scheme ");
break; break;
} }
} }
@ -495,7 +514,6 @@ VectorValues SubgraphPreconditioner::gradient(const VectorValues& y) const {
/* ************************************************************************* */ /* ************************************************************************* */
// Apply operator A, A*y = [I;A2*inv(R1)]*y = [y; A2*inv(R1)*y] // Apply operator A, A*y = [I;A2*inv(R1)]*y = [y; A2*inv(R1)*y]
Errors SubgraphPreconditioner::operator*(const VectorValues& y) const { Errors SubgraphPreconditioner::operator*(const VectorValues& y) const {
Errors e(y); Errors e(y);
VectorValues x = Rc1()->backSubstitute(y); /* x=inv(R1)*y */ VectorValues x = Rc1()->backSubstitute(y); /* x=inv(R1)*y */
Errors e2 = *Ab2() * x; /* A2*x */ Errors e2 = *Ab2() * x; /* A2*x */
@ -508,8 +526,10 @@ Errors SubgraphPreconditioner::operator*(const VectorValues& y) const {
void SubgraphPreconditioner::multiplyInPlace(const VectorValues& y, Errors& e) const { void SubgraphPreconditioner::multiplyInPlace(const VectorValues& y, Errors& e) const {
Errors::iterator ei = e.begin(); Errors::iterator ei = e.begin();
for (size_t i = 0; i < y.size(); ++i, ++ei) for(const auto& key_value: y) {
*ei = y[i]; *ei = key_value.second;
++ei;
}
// Add A2 contribution // Add A2 contribution
VectorValues x = Rc1()->backSubstitute(y); // x=inv(R1)*y VectorValues x = Rc1()->backSubstitute(y); // x=inv(R1)*y
@ -522,8 +542,10 @@ VectorValues SubgraphPreconditioner::operator^(const Errors& e) const {
Errors::const_iterator it = e.begin(); Errors::const_iterator it = e.begin();
VectorValues y = zero(); VectorValues y = zero();
for (size_t i = 0; i < y.size(); ++i, ++it) for(auto& key_value: y) {
y[i] = *it; key_value.second = *it;
++it;
}
transposeMultiplyAdd2(1.0, it, e.end(), y); transposeMultiplyAdd2(1.0, it, e.end(), y);
return y; return y;
} }
@ -534,9 +556,10 @@ void SubgraphPreconditioner::transposeMultiplyAdd
(double alpha, const Errors& e, VectorValues& y) const { (double alpha, const Errors& e, VectorValues& y) const {
Errors::const_iterator it = e.begin(); Errors::const_iterator it = e.begin();
for (size_t i = 0; i < y.size(); ++i, ++it) { for(auto& key_value: y) {
const Vector& ei = *it; const Vector& ei = *it;
axpy(alpha, ei, y[i]); axpy(alpha, ei, key_value.second);
++it;
} }
transposeMultiplyAdd2(alpha, it, e.end(), y); transposeMultiplyAdd2(alpha, it, e.end(), y);
} }
@ -563,47 +586,52 @@ void SubgraphPreconditioner::print(const std::string& s) const {
} }
/*****************************************************************************/ /*****************************************************************************/
void SubgraphPreconditioner::solve(const Vector& y, Vector &x) const void SubgraphPreconditioner::solve(const Vector &y, Vector &x) const {
{ assert(x.size() == y.size());
/* copy first */
std::copy(y.data(), y.data() + y.rows(), x.data());
/* in place back substitute */ /* back substitute */
for (auto cg: boost::adaptors::reverse(*Rc1_)) { for (const auto &cg : boost::adaptors::reverse(*Rc1_)) {
/* collect a subvector of x that consists of the parents of cg (S) */ /* collect a subvector of x that consists of the parents of cg (S) */
const Vector xParent = getSubvector(x, keyInfo_, KeyVector(cg->beginParents(), cg->endParents())); const KeyVector parentKeys(cg->beginParents(), cg->endParents());
const Vector rhsFrontal = getSubvector(x, keyInfo_, KeyVector(cg->beginFrontals(), cg->endFrontals())); const KeyVector frontalKeys(cg->beginFrontals(), cg->endFrontals());
const Vector xParent = getSubvector(x, keyInfo_, parentKeys);
const Vector rhsFrontal = getSubvector(y, keyInfo_, frontalKeys);
/* compute the solution for the current pivot */ /* compute the solution for the current pivot */
const Vector solFrontal = cg->get_R().triangularView<Eigen::Upper>().solve(rhsFrontal - cg->get_S() * xParent); const Vector solFrontal = cg->get_R().triangularView<Eigen::Upper>().solve(
rhsFrontal - cg->get_S() * xParent);
/* assign subvector of sol to the frontal variables */ /* assign subvector of sol to the frontal variables */
setSubvector(solFrontal, keyInfo_, KeyVector(cg->beginFrontals(), cg->endFrontals()), x); setSubvector(solFrontal, keyInfo_, frontalKeys, x);
} }
} }
/*****************************************************************************/ /*****************************************************************************/
void SubgraphPreconditioner::transposeSolve(const Vector& y, Vector& x) const void SubgraphPreconditioner::transposeSolve(const Vector &y, Vector &x) const {
{
/* copy first */ /* copy first */
assert(x.size() == y.size());
std::copy(y.data(), y.data() + y.rows(), x.data()); std::copy(y.data(), y.data() + y.rows(), x.data());
/* in place back substitute */ /* in place back substitute */
for(const boost::shared_ptr<GaussianConditional> & cg: *Rc1_) { for (const auto &cg : *Rc1_) {
const Vector rhsFrontal = getSubvector(x, keyInfo_, KeyVector(cg->beginFrontals(), cg->endFrontals())); const Vector rhsFrontal = getSubvector(
// const Vector solFrontal = cg->get_R().triangularView<Eigen::Upper>().transpose().solve(rhsFrontal); x, keyInfo_, KeyVector(cg->beginFrontals(), cg->endFrontals()));
const Vector solFrontal = cg->get_R().transpose().triangularView<Eigen::Lower>().solve(rhsFrontal); const Vector solFrontal =
cg->get_R().transpose().triangularView<Eigen::Lower>().solve(
rhsFrontal);
// Check for indeterminant solution // Check for indeterminant solution
if ( solFrontal.hasNaN()) throw IndeterminantLinearSystemException(cg->keys().front()); if (solFrontal.hasNaN())
throw IndeterminantLinearSystemException(cg->keys().front());
/* assign subvector of sol to the frontal variables */ /* assign subvector of sol to the frontal variables */
setSubvector(solFrontal, keyInfo_, KeyVector(cg->beginFrontals(), cg->endFrontals()), x); setSubvector(solFrontal, keyInfo_,
KeyVector(cg->beginFrontals(), cg->endFrontals()), x);
/* substract from parent variables */ /* substract from parent variables */
for (GaussianConditional::const_iterator it = cg->beginParents(); it != cg->endParents(); it++) { for (auto it = cg->beginParents(); it != cg->endParents(); it++) {
KeyInfo::const_iterator it2 = keyInfo_.find(*it); const KeyInfoEntry &info = keyInfo_.find(*it)->second;
Eigen::Map<Vector> rhsParent(x.data()+it2->second.colstart(), it2->second.dim(), 1); Eigen::Map<Vector> rhsParent(x.data() + info.colstart(), info.dim(), 1);
rhsParent -= Matrix(cg->getA(it)).transpose() * solFrontal; rhsParent -= Matrix(cg->getA(it)).transpose() * solFrontal;
} }
} }
@ -626,24 +654,23 @@ void SubgraphPreconditioner::build(const GaussianFactorGraph &gfg, const KeyInfo
} }
/*****************************************************************************/ /*****************************************************************************/
Vector getSubvector(const Vector &src, const KeyInfo &keyInfo, const KeyVector &keys) { Vector getSubvector(const Vector &src, const KeyInfo &keyInfo,
const KeyVector &keys) {
/* a cache of starting index and dim */ /* a cache of starting index and dim */
typedef vector<pair<size_t, size_t> > Cache; vector<std::pair<size_t, size_t> > cache;
Cache cache;
/* figure out dimension by traversing the keys */ /* figure out dimension by traversing the keys */
size_t d = 0; size_t dim = 0;
for (const Key &key : keys) { for (const Key &key : keys) {
const KeyInfoEntry &entry = keyInfo.find(key)->second; const KeyInfoEntry &entry = keyInfo.find(key)->second;
cache.push_back(make_pair(entry.colstart(), entry.dim())); cache.emplace_back(entry.colstart(), entry.dim());
d += entry.dim(); dim += entry.dim();
} }
/* use the cache to fill the result */ /* use the cache to fill the result */
Vector result = Vector::Zero(d, 1); Vector result = Vector::Zero(dim);
size_t idx = 0; size_t idx = 0;
for ( const Cache::value_type &p: cache ) { for (const auto &p : cache) {
result.segment(idx, p.second) = src.segment(p.first, p.second); result.segment(idx, p.second) = src.segment(p.first, p.second);
idx += p.second; idx += p.second;
} }
@ -653,7 +680,6 @@ Vector getSubvector(const Vector &src, const KeyInfo &keyInfo, const KeyVector &
/*****************************************************************************/ /*****************************************************************************/
void setSubvector(const Vector &src, const KeyInfo &keyInfo, const KeyVector &keys, Vector &dst) { void setSubvector(const Vector &src, const KeyInfo &keyInfo, const KeyVector &keys, Vector &dst) {
/* use the cache */
size_t idx = 0; size_t idx = 0;
for ( const Key &key: keys ) { for ( const Key &key: keys ) {
const KeyInfoEntry &entry = keyInfo.find(key)->second; const KeyInfoEntry &entry = keyInfo.find(key)->second;
@ -663,10 +689,10 @@ void setSubvector(const Vector &src, const KeyInfo &keyInfo, const KeyVector &ke
} }
/*****************************************************************************/ /*****************************************************************************/
boost::shared_ptr<GaussianFactorGraph> GaussianFactorGraph::shared_ptr buildFactorSubgraph(
buildFactorSubgraph(const GaussianFactorGraph &gfg, const Subgraph &subgraph, const bool clone) { const GaussianFactorGraph &gfg, const Subgraph &subgraph,
const bool clone) {
GaussianFactorGraph::shared_ptr result(new GaussianFactorGraph()); auto result = boost::make_shared<GaussianFactorGraph>();
result->reserve(subgraph.size()); result->reserve(subgraph.size());
for ( const SubgraphEdge &e: subgraph ) { for ( const SubgraphEdge &e: subgraph ) {
const size_t idx = e.index(); const size_t idx = e.index();

View File

@ -133,7 +133,7 @@ namespace gtsam {
// GENERALIZED_STRETCH /* the generalized stretch defined in jian2013iros */ // GENERALIZED_STRETCH /* the generalized stretch defined in jian2013iros */
} augmentationWeight_ ; } augmentationWeight_ ;
double complexity_; double complexity_; /* factor multiplied with n, yields number of extra edges. */
SubgraphBuilderParameters() SubgraphBuilderParameters()
: skeleton_(KRUSKAL), skeletonWeight_(RANDOM), augmentationWeight_(SKELETON), complexity_(1.0) {} : skeleton_(KRUSKAL), skeletonWeight_(RANDOM), augmentationWeight_(SKELETON), complexity_(1.0) {}
@ -145,7 +145,7 @@ namespace gtsam {
friend std::ostream& operator<<(std::ostream &os, const PreconditionerParameters &p); friend std::ostream& operator<<(std::ostream &os, const PreconditionerParameters &p);
static Skeleton skeletonTranslator(const std::string &s); static Skeleton skeletonTranslator(const std::string &s);
static std::string skeletonTranslator(Skeleton w); static std::string skeletonTranslator(Skeleton s);
static SkeletonWeight skeletonWeightTranslator(const std::string &s); static SkeletonWeight skeletonWeightTranslator(const std::string &s);
static std::string skeletonWeightTranslator(SkeletonWeight w); static std::string skeletonWeightTranslator(SkeletonWeight w);
static AugmentationWeight augmentationWeightTranslator(const std::string &s); static AugmentationWeight augmentationWeightTranslator(const std::string &s);
@ -170,7 +170,7 @@ namespace gtsam {
std::vector<size_t> unary(const GaussianFactorGraph &gfg) const ; std::vector<size_t> unary(const GaussianFactorGraph &gfg) const ;
std::vector<size_t> natural_chain(const GaussianFactorGraph &gfg) const ; std::vector<size_t> natural_chain(const GaussianFactorGraph &gfg) const ;
std::vector<size_t> bfs(const GaussianFactorGraph &gfg) const ; std::vector<size_t> bfs(const GaussianFactorGraph &gfg) const ;
std::vector<size_t> kruskal(const GaussianFactorGraph &gfg, const FastMap<Key, size_t> &ordering, const std::vector<double> &w) const ; std::vector<size_t> kruskal(const GaussianFactorGraph &gfg, const FastMap<Key, size_t> &ordering, const std::vector<double> &weights) const ;
std::vector<size_t> sample(const std::vector<double> &weights, const size_t t) const ; std::vector<size_t> sample(const std::vector<double> &weights, const size_t t) const ;
Weights weights(const GaussianFactorGraph &gfg) const; Weights weights(const GaussianFactorGraph &gfg) const;
SubgraphBuilderParameters parameters_; SubgraphBuilderParameters parameters_;
@ -249,8 +249,8 @@ namespace gtsam {
/* A zero VectorValues with the structure of xbar */ /* A zero VectorValues with the structure of xbar */
VectorValues zero() const { VectorValues zero() const {
VectorValues V(VectorValues::Zero(*xbar_)); assert(xbar_);
return V ; return VectorValues::Zero(*xbar_);
} }
/** /**
@ -285,15 +285,18 @@ namespace gtsam {
/*****************************************************************************/ /*****************************************************************************/
/* implement virtual functions of Preconditioner */ /* implement virtual functions of Preconditioner */
/* Computation Interfaces for Vector */ /// implement x = R^{-1} y
virtual void solve(const Vector& y, Vector &x) const; void solve(const Vector& y, Vector &x) const override;
virtual void transposeSolve(const Vector& y, Vector& x) const ;
virtual void build( /// implement x = R^{-T} y
void transposeSolve(const Vector& y, Vector& x) const override;
/// build/factorize the preconditioner
void build(
const GaussianFactorGraph &gfg, const GaussianFactorGraph &gfg,
const KeyInfo &info, const KeyInfo &info,
const std::map<Key,Vector> &lambda const std::map<Key,Vector> &lambda
) ; ) override;
/*****************************************************************************/ /*****************************************************************************/
}; };

View File

@ -21,6 +21,8 @@
#include <gtsam/base/Testable.h> #include <gtsam/base/Testable.h>
#include <gtsam/base/numericalDerivative.h> #include <gtsam/base/numericalDerivative.h>
#include <CppUnitLite/TestHarness.h>
#include <boost/tuple/tuple.hpp>
#include <boost/assign/list_of.hpp> #include <boost/assign/list_of.hpp>
#include <boost/assign/std/list.hpp> // for operator += #include <boost/assign/std/list.hpp> // for operator +=
using namespace boost::assign; using namespace boost::assign;
@ -28,13 +30,11 @@ using namespace boost::assign;
// STL/C++ // STL/C++
#include <iostream> #include <iostream>
#include <sstream> #include <sstream>
#include <CppUnitLite/TestHarness.h>
#include <boost/tuple/tuple.hpp>
using namespace std; using namespace std;
using namespace gtsam; using namespace gtsam;
static const Key _x_=0, _y_=1; static const Key _x_ = 11, _y_ = 22, _z_ = 33;
static GaussianBayesNet smallBayesNet = static GaussianBayesNet smallBayesNet =
list_of(GaussianConditional(_x_, Vector1::Constant(9), I_1x1, _y_, I_1x1))( list_of(GaussianConditional(_x_, Vector1::Constant(9), I_1x1, _y_, I_1x1))(
@ -42,9 +42,9 @@ static GaussianBayesNet smallBayesNet =
static GaussianBayesNet noisyBayesNet = static GaussianBayesNet noisyBayesNet =
list_of(GaussianConditional(_x_, Vector1::Constant(9), I_1x1, _y_, I_1x1, list_of(GaussianConditional(_x_, Vector1::Constant(9), I_1x1, _y_, I_1x1,
noiseModel::Diagonal::Sigmas(Vector1::Constant(2))))( noiseModel::Isotropic::Sigma(1, 2.0)))(
GaussianConditional(_y_, Vector1::Constant(5), I_1x1, GaussianConditional(_y_, Vector1::Constant(5), I_1x1,
noiseModel::Diagonal::Sigmas(Vector1::Constant(3)))); noiseModel::Isotropic::Sigma(1, 3.0)));
/* ************************************************************************* */ /* ************************************************************************* */
TEST( GaussianBayesNet, Matrix ) TEST( GaussianBayesNet, Matrix )
@ -140,11 +140,33 @@ TEST( GaussianBayesNet, optimize3 )
TEST(GaussianBayesNet, ordering) TEST(GaussianBayesNet, ordering)
{ {
Ordering expected; Ordering expected;
expected += 0, 1; expected += _x_, _y_;
const auto actual = noisyBayesNet.ordering(); const auto actual = noisyBayesNet.ordering();
EXPECT(assert_equal(expected, actual)); EXPECT(assert_equal(expected, actual));
} }
/* ************************************************************************* */
TEST( GaussianBayesNet, MatrixStress )
{
GaussianBayesNet bn;
using GC = GaussianConditional;
bn.emplace_shared<GC>(_x_, Vector2(1, 2), 1 * I_2x2, _y_, 2 * I_2x2, _z_, 3 * I_2x2);
bn.emplace_shared<GC>(_y_, Vector2(3, 4), 4 * I_2x2, _z_, 5 * I_2x2);
bn.emplace_shared<GC>(_z_, Vector2(5, 6), 6 * I_2x2);
const VectorValues expected = bn.optimize();
for (const auto keys :
{KeyVector({_x_, _y_, _z_}), KeyVector({_x_, _z_, _y_}),
KeyVector({_y_, _x_, _z_}), KeyVector({_y_, _z_, _x_}),
KeyVector({_z_, _x_, _y_}), KeyVector({_z_, _y_, _x_})}) {
const Ordering ordering(keys);
Matrix R;
Vector d;
boost::tie(R, d) = bn.matrix(ordering);
EXPECT(assert_equal(expected.vector(ordering), R.inverse() * d));
}
}
/* ************************************************************************* */ /* ************************************************************************* */
TEST( GaussianBayesNet, backSubstituteTranspose ) TEST( GaussianBayesNet, backSubstituteTranspose )
{ {

View File

@ -66,6 +66,7 @@ string findExampleDataFile(const string& name) {
namesToSearch.push_back(name + ".graph"); namesToSearch.push_back(name + ".graph");
namesToSearch.push_back(name + ".txt"); namesToSearch.push_back(name + ".txt");
namesToSearch.push_back(name + ".out"); namesToSearch.push_back(name + ".out");
namesToSearch.push_back(name + ".xml");
// Find first name that exists // Find first name that exists
for(const fs::path& root: rootsToSearch) { for(const fs::path& root: rootsToSearch) {

View File

@ -15,31 +15,36 @@
* @author Frank Dellaert * @author Frank Dellaert
**/ **/
#include <CppUnitLite/TestHarness.h>
#if 0
#include <tests/smallExample.h> #include <tests/smallExample.h>
#include <gtsam/base/numericalDerivative.h>
#include <gtsam/inference/Ordering.h>
#include <gtsam/inference/Symbol.h> #include <gtsam/inference/Symbol.h>
#include <gtsam/linear/iterative.h> #include <gtsam/linear/GaussianEliminationTree.h>
#include <gtsam/linear/GaussianFactorGraph.h> #include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/SubgraphPreconditioner.h> #include <gtsam/linear/SubgraphPreconditioner.h>
#include <gtsam/inference/Ordering.h> #include <gtsam/linear/iterative.h>
#include <gtsam/base/numericalDerivative.h> #include <gtsam/slam/dataset.h>
#include <gtsam/symbolic/SymbolicFactorGraph.h>
#include <boost/tuple/tuple.hpp> #include <CppUnitLite/TestHarness.h>
#include <boost/archive/xml_iarchive.hpp>
#include <boost/assign/std/list.hpp> #include <boost/assign/std/list.hpp>
#include <boost/range/adaptor/reversed.hpp>
#include <boost/serialization/export.hpp>
#include <boost/tuple/tuple.hpp>
using namespace boost::assign; using namespace boost::assign;
#include <fstream>
using namespace std; using namespace std;
using namespace gtsam; using namespace gtsam;
using namespace example; using namespace example;
// define keys // define keys
// Create key for simulated planar graph // Create key for simulated planar graph
Symbol key(int x, int y) { Symbol key(int x, int y) { return symbol_shorthand::X(1000 * x + y); }
return symbol_shorthand::X(1000*x+y);
}
/* ************************************************************************* */ /* ************************************************************************* */
TEST(SubgraphPreconditioner, planarOrdering) { TEST(SubgraphPreconditioner, planarOrdering) {
@ -49,7 +54,7 @@ TEST( SubgraphPreconditioner, planarOrdering ) {
key(3, 3), key(2, 3), key(1, 3), key(3, 3), key(2, 3), key(1, 3),
key(3, 2), key(2, 2), key(1, 2), key(3, 2), key(2, 2), key(1, 2),
key(3, 1), key(2, 1), key(1, 1); key(3, 1), key(2, 1), key(1, 1);
CHECK(assert_equal(expected,ordering)); EXPECT(assert_equal(expected, ordering));
} }
/* ************************************************************************* */ /* ************************************************************************* */
@ -62,8 +67,7 @@ static double error(const GaussianFactorGraph& fg, const VectorValues& x) {
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( SubgraphPreconditioner, planarGraph ) TEST(SubgraphPreconditioner, planarGraph) {
{
// Check planar graph construction // Check planar graph construction
GaussianFactorGraph A; GaussianFactorGraph A;
VectorValues xtrue; VectorValues xtrue;
@ -73,152 +77,240 @@ TEST( SubgraphPreconditioner, planarGraph )
DOUBLES_EQUAL(0, error(A, xtrue), 1e-9); // check zero error for xtrue DOUBLES_EQUAL(0, error(A, xtrue), 1e-9); // check zero error for xtrue
// Check that xtrue is optimal // Check that xtrue is optimal
GaussianBayesNet::shared_ptr R1 = GaussianSequentialSolver(A).eliminate(); GaussianBayesNet::shared_ptr R1 = A.eliminateSequential();
VectorValues actual = optimize(*R1); VectorValues actual = R1->optimize();
CHECK(assert_equal(xtrue,actual)); EXPECT(assert_equal(xtrue, actual));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( SubgraphPreconditioner, splitOffPlanarTree ) TEST(SubgraphPreconditioner, splitOffPlanarTree) {
{
// Build a planar graph // Build a planar graph
GaussianFactorGraph A; GaussianFactorGraph A;
VectorValues xtrue; VectorValues xtrue;
boost::tie(A, xtrue) = planarGraph(3); boost::tie(A, xtrue) = planarGraph(3);
// Get the spanning tree and constraints, and check their sizes // Get the spanning tree and constraints, and check their sizes
GaussianFactorGraph T, C; GaussianFactorGraph::shared_ptr T, C;
boost::tie(T, C) = splitOffPlanarTree(3, A); boost::tie(T, C) = splitOffPlanarTree(3, A);
LONGS_EQUAL(9,T.size()); LONGS_EQUAL(9, T->size());
LONGS_EQUAL(4,C.size()); LONGS_EQUAL(4, C->size());
// Check that the tree can be solved to give the ground xtrue // Check that the tree can be solved to give the ground xtrue
GaussianBayesNet::shared_ptr R1 = GaussianSequentialSolver(T).eliminate(); GaussianBayesNet::shared_ptr R1 = T->eliminateSequential();
VectorValues xbar = optimize(*R1); VectorValues xbar = R1->optimize();
CHECK(assert_equal(xtrue,xbar)); EXPECT(assert_equal(xtrue, xbar));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST(SubgraphPreconditioner, system) {
TEST( SubgraphPreconditioner, system )
{
// Build a planar graph // Build a planar graph
GaussianFactorGraph Ab; GaussianFactorGraph Ab;
VectorValues xtrue; VectorValues xtrue;
size_t N = 3; size_t N = 3;
boost::tie(Ab, xtrue) = planarGraph(N); // A*x-b boost::tie(Ab, xtrue) = planarGraph(N); // A*x-b
// Get the spanning tree and corresponding ordering // Get the spanning tree and remaining graph
GaussianFactorGraph Ab1_, Ab2_; // A1*x-b1 and A2*x-b2 GaussianFactorGraph::shared_ptr Ab1, Ab2; // A1*x-b1 and A2*x-b2
boost::tie(Ab1_, Ab2_) = splitOffPlanarTree(N, Ab); boost::tie(Ab1, Ab2) = splitOffPlanarTree(N, Ab);
SubgraphPreconditioner::sharedFG Ab1(new GaussianFactorGraph(Ab1_));
SubgraphPreconditioner::sharedFG Ab2(new GaussianFactorGraph(Ab2_));
// Eliminate the spanning tree to build a prior // Eliminate the spanning tree to build a prior
SubgraphPreconditioner::sharedBayesNet Rc1 = GaussianSequentialSolver(Ab1_).eliminate(); // R1*x-c1 const Ordering ord = planarOrdering(N);
VectorValues xbar = optimize(*Rc1); // xbar = inv(R1)*c1 auto Rc1 = Ab1->eliminateSequential(ord); // R1*x-c1
VectorValues xbar = Rc1->optimize(); // xbar = inv(R1)*c1
// Create Subgraph-preconditioned system // Create Subgraph-preconditioned system
VectorValues::shared_ptr xbarShared(new VectorValues(xbar)); // TODO: horrible VectorValues::shared_ptr xbarShared(
SubgraphPreconditioner system(Ab2, Rc1, xbarShared); new VectorValues(xbar)); // TODO: horrible
const SubgraphPreconditioner system(Ab2, Rc1, xbarShared);
// Create zero config // Get corresponding matrices for tests. Add dummy factors to Ab2 to make
VectorValues zeros = VectorValues::Zero(xbar); // sure it works with the ordering.
Ordering ordering = Rc1->ordering(); // not ord in general!
Ab2->add(key(1, 1), Z_2x2, Z_2x1);
Ab2->add(key(1, 2), Z_2x2, Z_2x1);
Ab2->add(key(1, 3), Z_2x2, Z_2x1);
Matrix A, A1, A2;
Vector b, b1, b2;
std::tie(A, b) = Ab.jacobian(ordering);
std::tie(A1, b1) = Ab1->jacobian(ordering);
std::tie(A2, b2) = Ab2->jacobian(ordering);
Matrix R1 = Rc1->matrix(ordering).first;
Matrix Abar(13 * 2, 9 * 2);
Abar.topRows(9 * 2) = Matrix::Identity(9 * 2, 9 * 2);
Abar.bottomRows(8) = A2.topRows(8) * R1.inverse();
// Helper function to vectorize in correct order, which is the order in which
// we eliminated the spanning tree.
auto vec = [ordering](const VectorValues& x) { return x.vector(ordering); };
// Set up y0 as all zeros // Set up y0 as all zeros
VectorValues y0 = zeros; const VectorValues y0 = system.zero();
// y1 = perturbed y0 // y1 = perturbed y0
VectorValues y1 = zeros; VectorValues y1 = system.zero();
y1[1] = Vector2(1.0, -1.0); y1[key(3, 3)] = Vector2(1.0, -1.0);
// Check backSubstituteTranspose works with R1
VectorValues actual = Rc1->backSubstituteTranspose(y1);
Vector expected = R1.transpose().inverse() * vec(y1);
EXPECT(assert_equal(expected, vec(actual)));
// Check corresponding x values // Check corresponding x values
VectorValues expected_x1 = xtrue, x1 = system.x(y1); // for y = 0, we get xbar:
expected_x1[1] = Vector2(2.01, 2.99); EXPECT(assert_equal(xbar, system.x(y0)));
expected_x1[0] = Vector2(3.01, 2.99); // for non-zero y, answer is x = xbar + inv(R1)*y
CHECK(assert_equal(xtrue, system.x(y0))); const Vector expected_x1 = vec(xbar) + R1.inverse() * vec(y1);
CHECK(assert_equal(expected_x1,system.x(y1))); const VectorValues x1 = system.x(y1);
EXPECT(assert_equal(expected_x1, vec(x1)));
// Check errors // Check errors
DOUBLES_EQUAL(0,error(Ab,xtrue),1e-9); DOUBLES_EQUAL(0, error(Ab, xbar), 1e-9);
DOUBLES_EQUAL(3,error(Ab,x1),1e-9); DOUBLES_EQUAL(0, system.error(y0), 1e-9);
DOUBLES_EQUAL(0,error(system,y0),1e-9); DOUBLES_EQUAL(2, error(Ab, x1), 1e-9);
DOUBLES_EQUAL(3,error(system,y1),1e-9); DOUBLES_EQUAL(2, system.error(y1), 1e-9);
// Test gradient in x // Check that transposeMultiplyAdd <=> y += alpha * Abar' * e
VectorValues expected_gx0 = zeros; // We check for e1 =[1;0] and e2=[0;1] corresponding to T and C
VectorValues expected_gx1 = zeros; const double alpha = 0.5;
CHECK(assert_equal(expected_gx0,gradient(Ab,xtrue))); Errors e1, e2;
expected_gx1[2] = Vector2(-100., 100.); for (size_t i = 0; i < 13; i++) {
expected_gx1[4] = Vector2(-100., 100.); e1 += i < 9 ? Vector2(1, 1) : Vector2(0, 0);
expected_gx1[1] = Vector2(200., -200.); e2 += i >= 9 ? Vector2(1, 1) : Vector2(0, 0);
expected_gx1[3] = Vector2(-100., 100.); }
expected_gx1[0] = Vector2(100., -100.); Vector ee1(13 * 2), ee2(13 * 2);
CHECK(assert_equal(expected_gx1,gradient(Ab,x1))); ee1 << Vector::Ones(9 * 2), Vector::Zero(4 * 2);
ee2 << Vector::Zero(9 * 2), Vector::Ones(4 * 2);
// Check transposeMultiplyAdd for e1
VectorValues y = system.zero();
system.transposeMultiplyAdd(alpha, e1, y);
Vector expected_y = alpha * Abar.transpose() * ee1;
EXPECT(assert_equal(expected_y, vec(y)));
// Check transposeMultiplyAdd for e2
y = system.zero();
system.transposeMultiplyAdd(alpha, e2, y);
expected_y = alpha * Abar.transpose() * ee2;
EXPECT(assert_equal(expected_y, vec(y)));
// Test gradient in y // Test gradient in y
VectorValues expected_gy0 = zeros; auto g = system.gradient(y0);
VectorValues expected_gy1 = zeros; Vector expected_g = Vector::Zero(18);
expected_gy1[2] = Vector2(2., -2.); EXPECT(assert_equal(expected_g, vec(g)));
expected_gy1[4] = Vector2(-2., 2.);
expected_gy1[1] = Vector2(3., -3.);
expected_gy1[3] = Vector2(-1., 1.);
expected_gy1[0] = Vector2(1., -1.);
CHECK(assert_equal(expected_gy0,gradient(system,y0)));
CHECK(assert_equal(expected_gy1,gradient(system,y1)));
// Check it numerically for good measure
// TODO use boost::bind(&SubgraphPreconditioner::error,&system,_1)
// Vector numerical_g1 = numericalGradient<VectorValues> (error, y1, 0.001);
// Vector expected_g1 = (Vector(18) << 0., 0., 0., 0., 2., -2., 0., 0., -2., 2.,
// 3., -3., 0., 0., -1., 1., 1., -1.);
// CHECK(assert_equal(expected_g1,numerical_g1));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( SubgraphPreconditioner, conjugateGradients ) BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "JacobianFactor");
{
// Read from XML file
static GaussianFactorGraph read(const string& name) {
auto inputFile = findExampleDataFile(name);
ifstream is(inputFile);
if (!is.is_open()) throw runtime_error("Cannot find file " + inputFile);
boost::archive::xml_iarchive in_archive(is);
GaussianFactorGraph Ab;
in_archive >> boost::serialization::make_nvp("graph", Ab);
return Ab;
}
TEST(SubgraphSolver, Solves) {
// Create preconditioner
SubgraphPreconditioner system;
// We test on three different graphs
const auto Ab1 = planarGraph(3).get<0>();
const auto Ab2 = read("toy3D");
const auto Ab3 = read("randomGrid3D");
// For all graphs, test solve and solveTranspose
for (const auto& Ab : {Ab1, Ab2, Ab3}) {
// Call build, a non-const method needed to make solve work :-(
KeyInfo keyInfo(Ab);
std::map<Key, Vector> lambda;
system.build(Ab, keyInfo, lambda);
// Create a perturbed (non-zero) RHS
const auto xbar = system.Rc1()->optimize(); // merely for use in zero below
auto values_y = VectorValues::Zero(xbar);
auto it = values_y.begin();
it->second.setConstant(100);
++it;
it->second.setConstant(-100);
// Solve the VectorValues way
auto values_x = system.Rc1()->backSubstitute(values_y);
// Solve the matrix way, this really just checks BN::backSubstitute
// This only works with Rc1 ordering, not with keyInfo !
// TODO(frank): why does this not work with an arbitrary ordering?
const auto ord = system.Rc1()->ordering();
const Matrix R1 = system.Rc1()->matrix(ord).first;
auto ord_y = values_y.vector(ord);
auto vector_x = R1.inverse() * ord_y;
EXPECT(assert_equal(vector_x, values_x.vector(ord)));
// Test that 'solve' does implement x = R^{-1} y
// We do this by asserting it gives same answer as backSubstitute
// Only works with keyInfo ordering:
const auto ordering = keyInfo.ordering();
auto vector_y = values_y.vector(ordering);
const size_t N = R1.cols();
Vector solve_x = Vector::Zero(N);
system.solve(vector_y, solve_x);
EXPECT(assert_equal(values_x.vector(ordering), solve_x));
// Test that transposeSolve does implement x = R^{-T} y
// We do this by asserting it gives same answer as backSubstituteTranspose
auto values_x2 = system.Rc1()->backSubstituteTranspose(values_y);
Vector solveT_x = Vector::Zero(N);
system.transposeSolve(vector_y, solveT_x);
EXPECT(assert_equal(values_x2.vector(ordering), solveT_x));
}
}
/* ************************************************************************* */
TEST(SubgraphPreconditioner, conjugateGradients) {
// Build a planar graph // Build a planar graph
GaussianFactorGraph Ab; GaussianFactorGraph Ab;
VectorValues xtrue; VectorValues xtrue;
size_t N = 3; size_t N = 3;
boost::tie(Ab, xtrue) = planarGraph(N); // A*x-b boost::tie(Ab, xtrue) = planarGraph(N); // A*x-b
// Get the spanning tree and corresponding ordering // Get the spanning tree
GaussianFactorGraph Ab1_, Ab2_; // A1*x-b1 and A2*x-b2 GaussianFactorGraph::shared_ptr Ab1, Ab2; // A1*x-b1 and A2*x-b2
boost::tie(Ab1_, Ab2_) = splitOffPlanarTree(N, Ab); boost::tie(Ab1, Ab2) = splitOffPlanarTree(N, Ab);
SubgraphPreconditioner::sharedFG Ab1(new GaussianFactorGraph(Ab1_));
SubgraphPreconditioner::sharedFG Ab2(new GaussianFactorGraph(Ab2_));
// Eliminate the spanning tree to build a prior // Eliminate the spanning tree to build a prior
Ordering ordering = planarOrdering(N); SubgraphPreconditioner::sharedBayesNet Rc1 =
SubgraphPreconditioner::sharedBayesNet Rc1 = GaussianSequentialSolver(Ab1_).eliminate(); // R1*x-c1 Ab1->eliminateSequential(); // R1*x-c1
VectorValues xbar = optimize(*Rc1); // xbar = inv(R1)*c1 VectorValues xbar = Rc1->optimize(); // xbar = inv(R1)*c1
// Create Subgraph-preconditioned system // Create Subgraph-preconditioned system
VectorValues::shared_ptr xbarShared(new VectorValues(xbar)); // TODO: horrible VectorValues::shared_ptr xbarShared(
new VectorValues(xbar)); // TODO: horrible
SubgraphPreconditioner system(Ab2, Rc1, xbarShared); SubgraphPreconditioner system(Ab2, Rc1, xbarShared);
// Create zero config y0 and perturbed config y1 // Create zero config y0 and perturbed config y1
VectorValues y0 = VectorValues::Zero(xbar); VectorValues y0 = VectorValues::Zero(xbar);
VectorValues y1 = y0; VectorValues y1 = y0;
y1[1] = Vector2(1.0, -1.0); y1[key(2, 2)] = Vector2(1.0, -1.0);
VectorValues x1 = system.x(y1); VectorValues x1 = system.x(y1);
// Solve for the remaining constraints using PCG // Solve for the remaining constraints using PCG
ConjugateGradientParameters parameters; ConjugateGradientParameters parameters;
VectorValues actual = conjugateGradients<SubgraphPreconditioner, VectorValues actual = conjugateGradients<SubgraphPreconditioner,
VectorValues, Errors>(system, y1, parameters); VectorValues, Errors>(system, y1, parameters);
CHECK(assert_equal(y0,actual)); EXPECT(assert_equal(y0,actual));
// Compare with non preconditioned version: // Compare with non preconditioned version:
VectorValues actual2 = conjugateGradientDescent(Ab, x1, parameters); VectorValues actual2 = conjugateGradientDescent(Ab, x1, parameters);
CHECK(assert_equal(xtrue,actual2,1e-4)); EXPECT(assert_equal(xtrue, actual2, 1e-4));
} }
#endif
/* ************************************************************************* */ /* ************************************************************************* */
int main() { TestResult tr; return TestRegistry::runAllTests(tr); } int main() {
TestResult tr;
return TestRegistry::runAllTests(tr);
}
/* ************************************************************************* */ /* ************************************************************************* */