debug hard constraint and make pose2prior work
parent
fb3e38b161
commit
f5a6a10feb
|
@ -91,7 +91,12 @@ namespace gtsam {
|
|||
if (verbosity >= CONFIG)
|
||||
newConfig->print("newConfig");
|
||||
|
||||
return NonlinearOptimizer(graph_, ordering_, newConfig);
|
||||
NonlinearOptimizer newOptimizer = NonlinearOptimizer(graph_, ordering_, newConfig);
|
||||
|
||||
if (verbosity >= ERROR)
|
||||
cout << "error: " << newOptimizer.error_ << endl;
|
||||
|
||||
return newOptimizer;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -9,23 +9,25 @@
|
|||
#include "GaussianFactor.h"
|
||||
#include "Pose2.h"
|
||||
#include "Matrix.h"
|
||||
#include "Pose2Config.h"
|
||||
#include "Key.h"
|
||||
#include "Pose2Graph.h"
|
||||
#include "ostream"
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
class Pose2Prior : public NonlinearFactor<Pose2Config> {
|
||||
private:
|
||||
std::string key_;
|
||||
typedef Pose2Config::Key Key;
|
||||
Key key_;
|
||||
Pose2 measured_;
|
||||
Matrix square_root_inverse_covariance_; /** sqrt(inv(measurement_covariance)) */
|
||||
std::list<std::string> keys_;
|
||||
//std::list<Key> keys_;
|
||||
|
||||
public:
|
||||
|
||||
typedef boost::shared_ptr<Pose2Prior> shared_ptr; // shorthand for a smart pointer to a factor
|
||||
|
||||
Pose2Prior(const std::string& key, const Pose2& measured, const Matrix& measurement_covariance) :
|
||||
Pose2Prior(const Key& key, const Pose2& measured, const Matrix& measurement_covariance) :
|
||||
key_(key),measured_(measured) {
|
||||
square_root_inverse_covariance_ = inverse_square_root(measurement_covariance);
|
||||
keys_.push_back(key);
|
||||
|
@ -35,7 +37,7 @@ public:
|
|||
void print(const std::string& name) const {
|
||||
std::cout << name << std::endl;
|
||||
std::cout << "Factor "<< std::endl;
|
||||
std::cout << "key "<< key_<<std::endl;
|
||||
std::cout << "key "<< (std::string)key_<<std::endl;
|
||||
measured_.print("measured ");
|
||||
gtsam::print(square_root_inverse_covariance_,"MeasurementCovariance");
|
||||
}
|
||||
|
@ -43,16 +45,16 @@ public:
|
|||
|
||||
/** implement functions needed to derive from Factor */
|
||||
Vector error_vector(const Pose2Config& config) const {
|
||||
Pose2 p = config.get(key_);
|
||||
Pose2 p = config[key_];
|
||||
return -logmap(p,measured_);
|
||||
}
|
||||
|
||||
std::list<std::string> keys() const { return keys_; }
|
||||
//std::list<Key> keys() const { return keys_; }
|
||||
std::size_t size() const { return keys_.size(); }
|
||||
|
||||
/** linearize */
|
||||
boost::shared_ptr<GaussianFactor> linearize(const Pose2Config& config) const {
|
||||
Pose2 p = config.get(key_);
|
||||
Pose2 p = config[key_];
|
||||
Vector b = logmap(p,measured_);
|
||||
Matrix H(3,3);
|
||||
H(0,0)=1; H(0,1)=0; H(0,2)=0;
|
||||
|
|
|
@ -195,6 +195,11 @@ namespace gtsam {
|
|||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double max(const Vector &a) {
|
||||
return *(std::max_element(a.begin(), a.end()));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
pair<double, Vector > house(Vector &x)
|
||||
{
|
||||
|
|
|
@ -170,6 +170,13 @@ Vector ediv_(const Vector &a, const Vector &b);
|
|||
*/
|
||||
double sum(const Vector &a);
|
||||
|
||||
/**
|
||||
* return the max element of a vector
|
||||
* @param a vector
|
||||
* @return max(a)
|
||||
*/
|
||||
double max(const Vector &a);
|
||||
|
||||
/** Dot product */
|
||||
inline double dot(const Vector &a, const Vector& b) { return sum(emul(a,b)); }
|
||||
|
||||
|
|
|
@ -68,6 +68,14 @@ namespace gtsam {
|
|||
/** Total dimensionality */
|
||||
size_t dim() const;
|
||||
|
||||
/** max of the vectors */
|
||||
inline double max() const {
|
||||
double m = -std::numeric_limits<double>::infinity();
|
||||
for(const_iterator it=begin(); it!=end(); it++)
|
||||
m = std::max(m, gtsam::max(it->second));
|
||||
return m;
|
||||
}
|
||||
|
||||
/** Scale */
|
||||
VectorConfig scale(double s) const;
|
||||
VectorConfig operator*(double s) const;
|
||||
|
@ -125,6 +133,9 @@ namespace gtsam {
|
|||
/** dim function (for iterative::CGD) */
|
||||
inline double dim(const VectorConfig& x) { return x.dim();}
|
||||
|
||||
/** max of the vectors */
|
||||
inline double max(const VectorConfig& x) { return x.max();}
|
||||
|
||||
/** print with optional string */
|
||||
void print(const VectorConfig& v, const std::string& s = "");
|
||||
|
||||
|
|
|
@ -22,15 +22,17 @@ namespace gtsam {
|
|||
template<class S, class V, class E>
|
||||
V conjugateGradients(const S& Ab, V x, bool verbose, double epsilon, double epsilon_abs,
|
||||
size_t maxIterations, bool steepest = false) {
|
||||
|
||||
//GTSAM_PRINT(Ab);
|
||||
if (maxIterations == 0) maxIterations = dim(x) * (steepest ? 10 : 1);
|
||||
size_t reset = (size_t)(sqrt(dim(x))+0.5); // when to reset
|
||||
|
||||
// Start with g0 = A'*(A*x0-b), d0 = - g0
|
||||
// i.e., first step is in direction of negative gradient
|
||||
V g = Ab.gradient(x);
|
||||
//print(g, "g");
|
||||
V d = -g;
|
||||
double dotg0 = dot(g, g), prev_dotg = dotg0;
|
||||
//printf("dotg0:%g epsilon_abs:%g\n", dotg0, epsilon_abs);
|
||||
if (dotg0 < epsilon_abs) return x;
|
||||
double threshold = epsilon * epsilon * dotg0;
|
||||
|
||||
|
@ -43,7 +45,9 @@ namespace gtsam {
|
|||
|
||||
// calculate optimal step-size
|
||||
E Ad = Ab * d;
|
||||
//printf("dot(d, g):%g dot(Ad, Ad):%g\n", dot(d, g), dot(Ad, Ad));
|
||||
double alpha = -dot(d, g) / dot(Ad, Ad);
|
||||
//printf("alpha:%g\n", alpha);
|
||||
|
||||
// do step in new search direction
|
||||
x = x + alpha * d;
|
||||
|
|
|
@ -12,63 +12,119 @@ using namespace boost::assign;
|
|||
#include "Ordering.h"
|
||||
#include "iterative.h"
|
||||
#include "smallExample.h"
|
||||
#include "Pose2Graph.h"
|
||||
#include "Pose2Prior.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( Iterative, steepestDescent )
|
||||
{
|
||||
// Expected solution
|
||||
Ordering ord;
|
||||
ord += "l1", "x1", "x2";
|
||||
GaussianFactorGraph fg = createGaussianFactorGraph();
|
||||
VectorConfig expected = fg.optimize(ord); // destructive
|
||||
///* ************************************************************************* */
|
||||
//TEST( Iterative, steepestDescent )
|
||||
//{
|
||||
// // Expected solution
|
||||
// Ordering ord;
|
||||
// ord += "l1", "x1", "x2";
|
||||
// GaussianFactorGraph fg = createGaussianFactorGraph();
|
||||
// VectorConfig expected = fg.optimize(ord); // destructive
|
||||
//
|
||||
// // Do gradient descent
|
||||
// GaussianFactorGraph fg2 = createGaussianFactorGraph();
|
||||
// VectorConfig zero = createZeroDelta();
|
||||
// bool verbose = false;
|
||||
// VectorConfig actual = steepestDescent(fg2, zero, verbose);
|
||||
// CHECK(assert_equal(expected,actual,1e-2));
|
||||
//}
|
||||
//
|
||||
///* ************************************************************************* */
|
||||
//TEST( Iterative, conjugateGradientDescent )
|
||||
//{
|
||||
// // Expected solution
|
||||
// Ordering ord;
|
||||
// ord += "l1", "x1", "x2";
|
||||
// GaussianFactorGraph fg = createGaussianFactorGraph();
|
||||
// VectorConfig expected = fg.optimize(ord); // destructive
|
||||
//
|
||||
// // create graph and get matrices
|
||||
// GaussianFactorGraph fg2 = createGaussianFactorGraph();
|
||||
// Matrix A;
|
||||
// Vector b;
|
||||
// Vector x0 = gtsam::zero(6);
|
||||
// boost::tie(A, b) = fg2.matrix(ord);
|
||||
// Vector expectedX = Vector_(6, -0.1, 0.1, -0.1, -0.1, 0.1, -0.2);
|
||||
//
|
||||
// // Do conjugate gradient descent, System version
|
||||
// System Ab(A, b);
|
||||
// Vector actualX = conjugateGradientDescent(Ab, x0);
|
||||
// CHECK(assert_equal(expectedX,actualX,1e-9));
|
||||
//
|
||||
// // Do conjugate gradient descent, Matrix version
|
||||
// Vector actualX2 = conjugateGradientDescent(A, b, x0);
|
||||
// CHECK(assert_equal(expectedX,actualX2,1e-9));
|
||||
//
|
||||
// // Do conjugate gradient descent on factor graph
|
||||
// VectorConfig zero = createZeroDelta();
|
||||
// VectorConfig actual = conjugateGradientDescent(fg2, zero);
|
||||
// CHECK(assert_equal(expected,actual,1e-2));
|
||||
//
|
||||
// // Test method
|
||||
// VectorConfig actual2 = fg2.conjugateGradientDescent(zero);
|
||||
// CHECK(assert_equal(expected,actual2,1e-2));
|
||||
//}
|
||||
|
||||
// Do gradient descent
|
||||
GaussianFactorGraph fg2 = createGaussianFactorGraph();
|
||||
VectorConfig zero = createZeroDelta();
|
||||
bool verbose = false;
|
||||
VectorConfig actual = steepestDescent(fg2, zero, verbose);
|
||||
CHECK(assert_equal(expected,actual,1e-2));
|
||||
/* ************************************************************************* *
|
||||
TEST( Iterative, conjugateGradientDescent_hard_constraint )
|
||||
{
|
||||
typedef Pose2Config::Key Key;
|
||||
|
||||
Pose2Config config;
|
||||
config.insert(1, Pose2(0.,0.,0.));
|
||||
config.insert(2, Pose2(1.5,0.,0.));
|
||||
|
||||
Pose2Graph graph;
|
||||
Matrix cov = eye(3);
|
||||
graph.push_back(Pose2Graph::sharedFactor(new Pose2Factor(Key(1), Key(2), Pose2(1.,0.,0.), cov)));
|
||||
graph.addConstraint(1, config[1]);
|
||||
|
||||
VectorConfig zeros;
|
||||
zeros.insert("x1",zero(3));
|
||||
zeros.insert("x2",zero(3));
|
||||
|
||||
GaussianFactorGraph fg = graph.linearize(config);
|
||||
VectorConfig actual = conjugateGradientDescent(fg, zeros, true, 1e-3, 1e-5, 10);
|
||||
|
||||
VectorConfig expected;
|
||||
expected.insert("x1", zero(3));
|
||||
expected.insert("x2", Vector_(-0.5,0.,0.));
|
||||
CHECK(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( Iterative, conjugateGradientDescent )
|
||||
TEST( Iterative, conjugateGradientDescent_soft_constraint )
|
||||
{
|
||||
// Expected solution
|
||||
Ordering ord;
|
||||
ord += "l1", "x1", "x2";
|
||||
GaussianFactorGraph fg = createGaussianFactorGraph();
|
||||
VectorConfig expected = fg.optimize(ord); // destructive
|
||||
typedef Pose2Config::Key Key;
|
||||
|
||||
// create graph and get matrices
|
||||
GaussianFactorGraph fg2 = createGaussianFactorGraph();
|
||||
Matrix A;
|
||||
Vector b;
|
||||
Vector x0 = gtsam::zero(6);
|
||||
boost::tie(A, b) = fg2.matrix(ord);
|
||||
Vector expectedX = Vector_(6, -0.1, 0.1, -0.1, -0.1, 0.1, -0.2);
|
||||
Pose2Config config;
|
||||
config.insert(1, Pose2(0.,0.,0.));
|
||||
config.insert(2, Pose2(1.5,0.,0.));
|
||||
|
||||
// Do conjugate gradient descent, System version
|
||||
System Ab(A, b);
|
||||
Vector actualX = conjugateGradientDescent(Ab, x0);
|
||||
CHECK(assert_equal(expectedX,actualX,1e-9));
|
||||
Pose2Graph graph;
|
||||
Matrix cov = eye(3);
|
||||
Matrix cov2 = eye(3) * 1e-10;
|
||||
graph.push_back(Pose2Graph::sharedFactor(new Pose2Factor(Key(1), Key(2), Pose2(1.,0.,0.), cov)));
|
||||
graph.push_back(boost::shared_ptr<Pose2Prior>(new Pose2Prior(Key(1), Pose2(0.,0.,0.), cov2)));
|
||||
|
||||
// Do conjugate gradient descent, Matrix version
|
||||
Vector actualX2 = conjugateGradientDescent(A, b, x0);
|
||||
CHECK(assert_equal(expectedX,actualX2,1e-9));
|
||||
VectorConfig zeros;
|
||||
zeros.insert("x1",zero(3));
|
||||
zeros.insert("x2",zero(3));
|
||||
|
||||
// Do conjugate gradient descent on factor graph
|
||||
VectorConfig zero = createZeroDelta();
|
||||
VectorConfig actual = conjugateGradientDescent(fg2, zero);
|
||||
CHECK(assert_equal(expected,actual,1e-2));
|
||||
GaussianFactorGraph fg = graph.linearize(config);
|
||||
VectorConfig actual = conjugateGradientDescent(fg, zeros, false, 1e-3, 1e-5, 100);
|
||||
|
||||
// Test method
|
||||
VectorConfig actual2 = fg2.conjugateGradientDescent(zero);
|
||||
CHECK(assert_equal(expected,actual2,1e-2));
|
||||
VectorConfig expected;
|
||||
expected.insert("x1", zero(3));
|
||||
expected.insert("x2", Vector_(3,-0.5,0.,0.));
|
||||
CHECK(assert_equal(expected, actual));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
Loading…
Reference in New Issue