pose2slamoptimizer unit tests worked

release/4.3a0
Kai Ni 2010-01-23 03:49:05 +00:00
parent 21c1af2b9f
commit b092fee64b
9 changed files with 672 additions and 597 deletions

1117
.cproject

File diff suppressed because it is too large Load Diff

View File

@ -23,7 +23,7 @@
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.buildArguments</key>
<value></value>
<value>-j2</value>
</dictionary>
<dictionary>
<key>org.eclipse.cdt.make.core.buildCommand</key>

View File

@ -198,8 +198,8 @@ Errors GaussianFactorGraph::rhs() const {
/* ************************************************************************* */
Vector GaussianFactorGraph::rhsVector() const {
Vector v;
return v;
Errors e = rhs();
return concatVectors(e);
}
/* ************************************************************************* */
@ -217,6 +217,24 @@ pair<Matrix,Vector> GaussianFactorGraph::matrix(const Ordering& ordering) const
return lf.matrix(ordering);
}
/* ************************************************************************* */
VectorConfig GaussianFactorGraph::assembleConfig(const Vector& vs, const Ordering& ordering) const {
Dimensions dims = dimensions();
VectorConfig config;
Vector::const_iterator itSrc = vs.begin();
Vector::iterator itDst;
BOOST_FOREACH(const Symbol& key, ordering){
int dim = dims.find(key)->second;
Vector v(dim);
for (itDst=v.begin(); itDst!=v.end(); itDst++, itSrc++)
*itDst = *itSrc;
config.insert(key, v);
}
if (itSrc != vs.end())
throw runtime_error("assembleConfig: input vector and ordering are not compatible with the graph");
return config;
}
/* ************************************************************************* */
Dimensions GaussianFactorGraph::columnIndices(const Ordering& ordering) const {

View File

@ -175,6 +175,13 @@ namespace gtsam {
*/
std::pair<Matrix,Vector> matrix (const Ordering& ordering) const;
/**
* split the source vector w.r.t. the given ordering and assemble a vector config
* @param v: the source vector
* @param ordeirng: the ordering corresponding to the vector
*/
VectorConfig assembleConfig(const Vector& v, const Ordering& ordering) const;
/**
* get the starting column indices for all variables
* @param ordering of variables needed for matrix column order

View File

@ -37,8 +37,9 @@ namespace gtsam {
/* ************************************************************************* */
void Pose2SLAMOptimizer::update(const Vector& x) {
VectorConfig X; // TODO
update(X);
VectorConfig X = system_->assembleConfig(x, *solver_.ordering());
*theta_ = expmap(*theta_, X);
linearize();
}
/* ************************************************************************* */
@ -47,5 +48,17 @@ namespace gtsam {
update(x);
}
/* ************************************************************************* */
/* ************************************************************************* */
Vector Pose2SLAMOptimizer::optimize() const {
VectorConfig X = solver_.optimize(*system_);
std::list<Vector> vs;
BOOST_FOREACH(const Symbol& key, *solver_.ordering())
vs.push_back(X[key]);
return concatVectors(vs);
}
/* ************************************************************************* */
double Pose2SLAMOptimizer::error() const {
return graph_->error(*theta_);
}
}

View File

@ -8,6 +8,8 @@
#ifndef POSE2SLAMOPTIMIZER_H_
#define POSE2SLAMOPTIMIZER_H_
#include <boost/foreach.hpp>
#include "pose2SLAM.h"
#include "Ordering.h"
#include "SubgraphPreconditioner.h"
@ -54,12 +56,14 @@ namespace gtsam {
boost::shared_ptr<const Pose2Graph> graph() const {
return graph_;
}
/**
* linearize around current theta
* return the current linearization point
*/
boost::shared_ptr<const Pose2Config> theta() const {
return theta_;
}
/**
* linearize around current theta
*/
@ -67,31 +71,22 @@ namespace gtsam {
system_ = solver_.linearize(*graph_, *theta_);
}
/**
* update estimate with pure delta config x
*/
void update(const VectorConfig& x) {
// TODO instead of assigning can we create a new one and replace the shared ptr ?
*theta_ = expmap(*theta_, x);
linearize();
}
/**
* Optimize to get a
*/
Vector optimize() {
VectorConfig X = solver_.optimize(*system_);
Vector x; // TODO convert to Vector
return x;
}
Vector optimize() const;
double error() const;
/**
* Return matrices associated with optimization problem
* around current non-linear estimate theta
* Returns [IJS] sparse representation
*/
Matrix Ab1() const { return system_->Ab1(*solver_.ordering()); }
Matrix Ab2() const { return system_->Ab2(*solver_.ordering()); }
Matrix a1() const { return system_->A1(*solver_.ordering()); }
Matrix a2() const { return system_->A2(*solver_.ordering()); }
Vector b1() const { return system_->b1(); }
Vector b2() const { return system_->b2(); }
/**
* update estimate with pure delta config x

View File

@ -44,8 +44,11 @@ namespace gtsam {
*/
SubgraphPreconditioner(sharedFG& Ab1, sharedFG& Ab2, sharedBayesNet& Rc1, sharedConfig& xbar);
Matrix Ab1(const Ordering& ordering) const { return Ab1_->sparse(ordering); }
Matrix Ab2(const Ordering& ordering) const { return Ab2_->sparse(ordering); }
Matrix A1(const Ordering& ordering) const { return Ab1_->sparse(ordering); }
Matrix A2(const Ordering& ordering) const { return Ab2_->sparse(ordering); }
Vector b1() const { return Ab1_->rhsVector(); }
Vector b2() const { return Ab2_->rhsVector(); }
VectorConfig assembleConfig(const Vector& v, const Ordering& ordering) const { return Ab1_->assembleConfig(v, ordering); }
/* x = xbar + inv(R1)*y */
VectorConfig x(const VectorConfig& y) const;

View File

@ -1,8 +1,12 @@
class Pose2SLAMOptimizer {
Pose2SLAMOptimizer(string dataset_name);
Matrix Ab1() const;
Matrix Ab2() const;
void update(Vector x) const;
Vector optimize() const;
double error() const;
Matrix a1() const;
Matrix a2() const;
Vector b1() const;
Vector b2() const;
};
class SharedGaussian {

View File

@ -162,25 +162,53 @@ TEST(Pose2Graph, optimizeCircle) {
// Check loop closure
CHECK(assert_equal(delta,between(actual[5],actual[0])));
// Try PCG class
// Pose2SLAMOptimizer myOptimizer("3");
// Matrix Ab1 = myOptimizer.Ab1();
// CHECK(assert_equal(Matrix_(1,1,1.0),Ab1));
// myOptimizer.linearize();
//
// Matrix A1 = myOptimizer.a1();
// LONGS_EQUAL(3, A1.size1());
// LONGS_EQUAL(17, A1.size2()); // 7 + 7 + 3
//
// Matrix A2 = myOptimizer.a2();
// LONGS_EQUAL(3, A1.size1());
// LONGS_EQUAL(7, A2.size2()); // 7
//
// Vector b1 = myOptimizer.b1();
// LONGS_EQUAL(9, b1.size()); // 3 + 3 + 3
//
// Vector b2 = myOptimizer.b2();
// LONGS_EQUAL(3, b2.size()); // 3
//
// // Here, call matlab to
// // A=[A1;A2], b=[b1;b2]
// // R=qr(A1)
// // call pcg on A,b, with preconditioner R -> get x
//
// Vector x = myOptimizer.optimize();
// LONGS_EQUAL(9, x.size()); // 3 + 3 + 3
//
// Matrix Ab2 = myOptimizer.Ab2();
// CHECK(assert_equal(Matrix_(1,1,1.0),Ab2));
// Here, call matlab to
// A=[A1;A2], b=[b1;b2]
// R=qr(A1)
// call pcg on A,b, with preconditioner R -> get x
// Vector x;
// myOptimizer.update(x);
//
// Pose2Config expected;
// expected.insert(0, Pose2(0.,0.,0.));
// expected.insert(1, Pose2(1.,0.,0.));
// expected.insert(2, Pose2(2.,0.,0.));
//
// // Check with ground truth
// CHECK(assert_equal(expected, *myOptimizer.theta()));
}
// Check with ground truth
// CHECK(assert_equal(hexagon, *myOptimizer.theta()));
TEST(Pose2Graph, optimize2) {
// Pose2SLAMOptimizer myOptimizer("100");
//
// //cout << "error: " << myOptimizer.error() << endl;
// for(int i = 0; i<10; i++) {
// myOptimizer.linearize();
// Vector x = myOptimizer.optimize();
// myOptimizer.update(x);
// }
// //cout << "error: " << myOptimizer.error() << endl;
// CHECK(myOptimizer.error() < 1.);
}
/* ************************************************************************* */