Gradient using new operator^ and errors method

release/4.3a0
Frank Dellaert 2009-12-27 12:13:31 +00:00
parent 7d1428de60
commit 266fc56dea
9 changed files with 196 additions and 138 deletions

View File

@ -175,6 +175,30 @@ void GaussianFactor::tally_separator(const string& key, set<string>& separator)
} }
} }
/* ************************************************************************* */
Vector GaussianFactor::operator*(const VectorConfig& x) const {
Vector Ax = zero(b_.size());
if (empty()) return Ax;
// Just iterate over all A matrices and multiply in correct config part
string j; Matrix Aj;
FOREACH_PAIR(j, Aj, As_)
Ax += (Aj * x[j]);
return ediv(Ax,sigmas_);
}
/* ************************************************************************* */
VectorConfig GaussianFactor::operator^(const Vector& e) const {
Vector E = ediv(e,sigmas_);
VectorConfig x;
// Just iterate over all A matrices and insert Ai^e into VectorConfig
string j; Matrix Aj;
FOREACH_PAIR(j, Aj, As_)
x.insert(j,Aj^E);
return x;
}
/* ************************************************************************* */ /* ************************************************************************* */
pair<Matrix,Vector> GaussianFactor::matrix(const Ordering& ordering, bool weight) const { pair<Matrix,Vector> GaussianFactor::matrix(const Ordering& ordering, bool weight) const {
@ -362,22 +386,6 @@ GaussianFactor::eliminate(const string& key) const
return make_pair(conditional, factor); return make_pair(conditional, factor);
} }
/* ************************************************************************* */
void GaussianFactor::addGradientContribution(const VectorConfig& x,
VectorConfig& g) const {
// calculate the value of the factor
Vector e = GaussianFactor::error_vector(x);
Vector et = trans(e); // transpose
// contribute to gradient for each connected variable
string j; Matrix Aj;
FOREACH_PAIR(j, Aj, As_) {
Vector dj = trans(et*Aj); // this factor's contribution to gradient on j
Vector wdj = ediv(dj,sigmas_); // properly weight by sigmas
g.add(j,wdj);
}
}
/* ************************************************************************* */ /* ************************************************************************* */
// Creates a factor on step-size, given initial estimate and direction d, e.g. // Creates a factor on step-size, given initial estimate and direction d, e.g.
// Factor |A1*x+A2*y-b|/sigma -> |A1*(x0+alpha*dx)+A2*(y0+alpha*dy)-b|/sigma // Factor |A1*x+A2*y-b|/sigma -> |A1*(x0+alpha*dx)+A2*(y0+alpha*dy)-b|/sigma
@ -401,18 +409,6 @@ GaussianFactor::shared_ptr GaussianFactor::alphaFactor(const VectorConfig& x,
return factor; return factor;
} }
/* ************************************************************************* */
Vector GaussianFactor::operator*(const VectorConfig& x) const {
Vector Ax = zero(b_.size());
if (empty()) return Ax;
string j; Matrix Aj;
FOREACH_PAIR(j, Aj, As_)
Ax += (Aj * x[j]);
return ediv(Ax,sigmas_);
}
/* ************************************************************************* */ /* ************************************************************************* */
namespace gtsam { namespace gtsam {

View File

@ -192,6 +192,16 @@ public:
void tally_separator(const std::string& key, void tally_separator(const std::string& key,
std::set<std::string>& separator) const; std::set<std::string>& separator) const;
/**
* Return A*x
*/
Vector operator*(const VectorConfig& x) const;
/**
* Return A^x
*/
VectorConfig operator^(const Vector& e) const;
/** /**
* Return (dense) matrix associated with factor * Return (dense) matrix associated with factor
* @param ordering of variables needed for matrix column order * @param ordering of variables needed for matrix column order
@ -216,13 +226,6 @@ public:
boost::tuple<std::list<int>, std::list<int>, std::list<double> > boost::tuple<std::list<int>, std::list<int>, std::list<double> >
sparse(const Dimensions& columnIndices) const; sparse(const Dimensions& columnIndices) const;
/**
* Add gradient contribution to gradient config g
* @param x: confif at which to evaluate gradient
* @param g: I/O parameter, evolving gradient
*/
void addGradientContribution(const VectorConfig& x, VectorConfig& g) const;
/** /**
* Create a GaussianFactor on one variable 'alpha' (step size), in direction d * Create a GaussianFactor on one variable 'alpha' (step size), in direction d
* @param x: starting point for search * @param x: starting point for search
@ -230,11 +233,6 @@ public:
*/ */
shared_ptr alphaFactor(const VectorConfig& x, const VectorConfig& d) const; shared_ptr alphaFactor(const VectorConfig& x, const VectorConfig& d) const;
/**
* Return A*x
*/
Vector operator*(const VectorConfig& x) const;
/* ************************************************************************* */ /* ************************************************************************* */
// MUTABLE functions. FD:on the path to being eradicated // MUTABLE functions. FD:on the path to being eradicated
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -40,13 +40,37 @@ double GaussianFactorGraph::error(const VectorConfig& x) const {
} }
/* ************************************************************************* */ /* ************************************************************************* */
Errors GaussianFactorGraph::operator*(const VectorConfig& x) const { Errors GaussianFactorGraph::errors(const VectorConfig& x) const {
Errors e; Errors e;
BOOST_FOREACH(sharedFactor factor,factors_) BOOST_FOREACH(sharedFactor factor,factors_)
e.push_back((*factor)*x); e.push_back(factor->error_vector(x));
return e; return e;
} }
/* ************************************************************************* */
Errors GaussianFactorGraph::operator*(const VectorConfig& x) const {
Errors e;
BOOST_FOREACH(sharedFactor Ai,factors_)
e.push_back((*Ai)*x);
return e;
}
/* ************************************************************************* */
VectorConfig GaussianFactorGraph::operator^(const Errors& e) const {
VectorConfig x;
// For each factor add the gradient contribution
size_t i=0;
BOOST_FOREACH(sharedFactor Ai,factors_)
x += (*Ai)^e[i++];
return x;
}
/* ************************************************************************* */
VectorConfig GaussianFactorGraph::gradient(const VectorConfig& x) const {
const GaussianFactorGraph& A = *this;
return A^errors(x);
}
/* ************************************************************************* */ /* ************************************************************************* */
set<string> GaussianFactorGraph::find_separator(const string& key) const set<string> GaussianFactorGraph::find_separator(const string& key) const
{ {
@ -236,15 +260,6 @@ Matrix GaussianFactorGraph::sparse(const Ordering& ordering) const {
return ijs; return ijs;
} }
/* ************************************************************************* */
VectorConfig GaussianFactorGraph::gradient(const VectorConfig& x) const {
VectorConfig g;
// For each factor add the gradient contribution
BOOST_FOREACH(sharedFactor factor,factors_)
factor->addGradientContribution(x,g);
return g;
}
/* ************************************************************************* */ /* ************************************************************************* */
VectorConfig GaussianFactorGraph::optimalUpdate(const VectorConfig& x, VectorConfig GaussianFactorGraph::optimalUpdate(const VectorConfig& x,
const VectorConfig& d) const { const VectorConfig& d) const {

View File

@ -13,8 +13,8 @@
#pragma once #pragma once
#include <boost/shared_ptr.hpp> #include <boost/shared_ptr.hpp>
#include "Errors.h"
#include "FactorGraph.h" #include "FactorGraph.h"
#include "Errors.h"
#include "GaussianFactor.h" #include "GaussianFactor.h"
#include "GaussianBayesNet.h" // needed for MATLAB toolbox !! #include "GaussianBayesNet.h" // needed for MATLAB toolbox !!
@ -73,12 +73,25 @@ namespace gtsam {
push_back(sharedFactor(new GaussianFactor(terms,b,sigma))); push_back(sharedFactor(new GaussianFactor(terms,b,sigma)));
} }
/** return A*x-b */
Errors errors(const VectorConfig& x) const;
/** unnormalized error */ /** unnormalized error */
double error(const VectorConfig& x) const; double error(const VectorConfig& x) const;
/** return A*x */ /** return A*x */
Errors operator*(const VectorConfig& x) const; Errors operator*(const VectorConfig& x) const;
/** return A^x */
VectorConfig operator^(const Errors& e) const;
/**
* Calculate Gradient of A^(A*x-b) for a given config
* @param x: VectorConfig specifying where to calculate gradient
* @return gradient, as a VectorConfig as well
*/
VectorConfig gradient(const VectorConfig& x) const;
/** Unnormalized probability. O(n) */ /** Unnormalized probability. O(n) */
double probPrime(const VectorConfig& c) const { double probPrime(const VectorConfig& c) const {
return exp(-0.5 * error(c)); return exp(-0.5 * error(c));
@ -164,13 +177,6 @@ namespace gtsam {
*/ */
Matrix sparse(const Ordering& ordering) const; Matrix sparse(const Ordering& ordering) const;
/**
* Calculate Gradient of 0.5*|Ax-b| for a given config
* @param x: VectorConfig specifying where to calculate gradient
* @return gradient, as a VectorConfig as well
*/
VectorConfig gradient(const VectorConfig& x) const;
/** /**
* Take an optimal step in direction d by calculating optimal step-size * Take an optimal step in direction d by calculating optimal step-size
* @param x: starting point for search * @param x: starting point for search

View File

@ -69,21 +69,31 @@ VectorConfig VectorConfig::operator*(double s) const {
return scale(s); return scale(s);
} }
/* ************************************************************************* */
void VectorConfig::operator+=(const VectorConfig& b) {
string j; Vector b_j;
FOREACH_PAIR(j, b_j, b.values) {
iterator it = values.find(j);
if (it==values.end())
insert(j, b_j);
else
it->second += b_j;
}
}
/* ************************************************************************* */ /* ************************************************************************* */
VectorConfig VectorConfig::operator+(const VectorConfig& b) const { VectorConfig VectorConfig::operator+(const VectorConfig& b) const {
VectorConfig result; VectorConfig result = *this;
string key; Vector v; result += b;
FOREACH_PAIR(key, v, values)
result.insert(key, v + b.get(key));
return result; return result;
} }
/* ************************************************************************* */ /* ************************************************************************* */
VectorConfig VectorConfig::operator-(const VectorConfig& b) const { VectorConfig VectorConfig::operator-(const VectorConfig& b) const {
VectorConfig result; VectorConfig result;
string key; Vector v; string j; Vector v;
FOREACH_PAIR(key, v, values) FOREACH_PAIR(j, v, values)
result.insert(key, v - b.get(key)); result.insert(j, v - b.get(j));
return result; return result;
} }

View File

@ -30,6 +30,7 @@ namespace gtsam {
VectorConfig() {} VectorConfig() {}
VectorConfig(const VectorConfig& cfg_in): values(cfg_in.values) {} VectorConfig(const VectorConfig& cfg_in): values(cfg_in.values) {}
VectorConfig(const std::string& j, const Vector& a) { add(j,a); }
virtual ~VectorConfig() {} virtual ~VectorConfig() {}
@ -78,6 +79,9 @@ namespace gtsam {
VectorConfig scale(double s) const; VectorConfig scale(double s) const;
VectorConfig operator*(double s) const; VectorConfig operator*(double s) const;
/** Add in place */
void operator+=(const VectorConfig &b);
/** Add */ /** Add */
VectorConfig operator+(const VectorConfig &b) const; VectorConfig operator+(const VectorConfig &b) const;

View File

@ -26,20 +26,10 @@ using namespace gtsam;
/* ************************************************************************* */ /* ************************************************************************* */
TEST( GaussianFactor, linearFactor ) TEST( GaussianFactor, linearFactor )
{ {
Matrix I = eye(2);
Vector b = Vector_(2,0.2,-0.1);
double sigma = 0.1; double sigma = 0.1;
GaussianFactor expected("x1", -I, "x2", I, b, sigma);
Matrix A1(2,2);
A1(0,0) = -1.0 ; A1(0,1) = 0.0;
A1(1,0) = 0.0 ; A1(1,1) = -1.0;
Matrix A2(2,2);
A2(0,0) = 1.0 ; A2(0,1) = 0.0;
A2(1,0) = 0.0 ; A2(1,1) = 1.0;
Vector b(2);
b(0) = 0.2 ; b(1) = -0.1;
GaussianFactor expected("x1", A1, "x2", A2, b, sigma);
// create a small linear factor graph // create a small linear factor graph
GaussianFactorGraph fg = createGaussianFactorGraph(); GaussianFactorGraph fg = createGaussianFactorGraph();
@ -51,6 +41,29 @@ TEST( GaussianFactor, linearFactor )
CHECK(assert_equal(expected,*lf)); CHECK(assert_equal(expected,*lf));
} }
/* ************************************************************************* */
TEST( GaussianFactor, operators )
{
Matrix I = eye(2);
Vector b = Vector_(2,0.2,-0.1);
double sigma = 0.1;
GaussianFactor lf("x1", -I, "x2", I, b, sigma);
VectorConfig c;
c.insert("x1",Vector_(2,10.,20.));
c.insert("x2",Vector_(2,30.,60.));
// test A*x
Vector expectedE = Vector_(2,200.,400.), e = lf*c;
CHECK(assert_equal(expectedE,e));
// test A^e
VectorConfig expectedX;
expectedX.insert("x1",Vector_(2,-2000.,-4000.));
expectedX.insert("x2",Vector_(2, 2000., 4000.));
CHECK(assert_equal(expectedX,lf^e));
}
/* ************************************************************************* */ /* ************************************************************************* */
TEST( GaussianFactor, keys ) TEST( GaussianFactor, keys )
{ {
@ -242,7 +255,7 @@ TEST( GaussianFactor, linearFactorN){
1.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 1.0,
-10.0, 0.0, -10.0, 0.0,
0.0, -10.0, 0.0,-10.0,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
@ -253,7 +266,7 @@ TEST( GaussianFactor, linearFactorN){
10.0, 0.0, 10.0, 0.0,
0.0, 10.0, 0.0, 10.0,
-10.0, 0.0, -10.0, 0.0,
0.0, -10.0, 0.0,-10.0,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0))); 0.0, 0.0)));
combinedMeasurement.push_back(make_pair("x3", Matrix_(8,2, combinedMeasurement.push_back(make_pair("x3", Matrix_(8,2,
@ -264,7 +277,7 @@ TEST( GaussianFactor, linearFactorN){
10.0, 0.0, 10.0, 0.0,
0.0, 10.0, 0.0, 10.0,
-10.0, 0.0, -10.0, 0.0,
0.0, -10.0))); 0.0,-10.0)));
combinedMeasurement.push_back(make_pair("x4", Matrix_(8,2, combinedMeasurement.push_back(make_pair("x4", Matrix_(8,2,
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
@ -273,7 +286,7 @@ TEST( GaussianFactor, linearFactorN){
0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0,
10.0, 0.0, 10.0, 0.0,
0.0, 10.0))); 0.0,10.0)));
Vector b = Vector_(8, Vector b = Vector_(8,
10.0, 5.0, 1.0, -2.0, 1.5, -1.5, 2.0, -1.0); 10.0, 5.0, 1.0, -2.0, 1.5, -1.5, 2.0, -1.0);

View File

@ -95,18 +95,18 @@ TEST( GaussianFactorGraph, combine_factors_x1 )
Matrix Ax1 = Matrix_(6,2, Matrix Ax1 = Matrix_(6,2,
1., 0., 1., 0.,
0.00, 1., 0., 1.,
-1., 0., -1., 0.,
0.00,-1., 0.,-1.,
-1., 0., -1., 0.,
00., -1. 0.,-1.
); );
Matrix Ax2 = Matrix_(6,2, Matrix Ax2 = Matrix_(6,2,
0., 0., 0., 0.,
0., 0., 0., 0.,
1., 0., 1., 0.,
+0.,1., 0., 1.,
0., 0., 0., 0.,
0., 0. 0., 0.
); );
@ -157,24 +157,24 @@ TEST( GaussianFactorGraph, combine_factors_x2 )
Matrix Ax1 = Matrix_(4,2, Matrix Ax1 = Matrix_(4,2,
// x1 // x1
-1., 0., // f2 -1., 0., // f2
0.00,-1., // f2 0.,-1., // f2
0.00, 0., // f4 0., 0., // f4
0.00, 0. // f4 0., 0. // f4
); );
Matrix Ax2 = Matrix_(4,2, Matrix Ax2 = Matrix_(4,2,
// x2 // x2
1., 0., 1., 0.,
+0.,1., 0., 1.,
-1., 0., -1., 0.,
+0.,-1. 0.,-1.
); );
// the expected RHS vector // the expected RHS vector
Vector b(4); Vector b(4);
b(0) = 2*sigma1; b(0) = 2*sigma1;
b(1) = -1*sigma1; b(1) = -1*sigma1;
b(2) = -1*sigma2; b(2) = -1 *sigma2;
b(3) = 1.5*sigma2; b(3) = 1.5*sigma2;
vector<pair<string, Matrix> > meas; vector<pair<string, Matrix> > meas;
@ -188,7 +188,6 @@ TEST( GaussianFactorGraph, combine_factors_x2 )
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( GaussianFactorGraph, eliminateOne_x1 ) TEST( GaussianFactorGraph, eliminateOne_x1 )
{ {
GaussianFactorGraph fg = createGaussianFactorGraph(); GaussianFactorGraph fg = createGaussianFactorGraph();
@ -203,7 +202,6 @@ TEST( GaussianFactorGraph, eliminateOne_x1 )
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( GaussianFactorGraph, eliminateOne_x2 ) TEST( GaussianFactorGraph, eliminateOne_x2 )
{ {
GaussianFactorGraph fg = createGaussianFactorGraph(); GaussianFactorGraph fg = createGaussianFactorGraph();
@ -360,7 +358,7 @@ TEST( GaussianFactorGraph, CONSTRUCTOR_GaussianBayesNet )
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( GaussianFactorGraph, GET_ORDERING) TEST( GaussianFactorGraph, getOrdering)
{ {
Ordering expected; Ordering expected;
expected += "l1","x1","x2"; expected += "l1","x1","x2";
@ -370,7 +368,7 @@ TEST( GaussianFactorGraph, GET_ORDERING)
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( GaussianFactorGraph, OPTIMIZE ) TEST( GaussianFactorGraph, optimize )
{ {
// create a graph // create a graph
GaussianFactorGraph fg = createGaussianFactorGraph(); GaussianFactorGraph fg = createGaussianFactorGraph();
@ -407,7 +405,7 @@ TEST( GaussianFactorGraph, COMBINE_GRAPHS_INPLACE)
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( GaussianFactorGraph, COMBINE_GRAPHS) TEST( GaussianFactorGraph, combine2)
{ {
// create a test graph // create a test graph
GaussianFactorGraph fg1 = createGaussianFactorGraph(); GaussianFactorGraph fg1 = createGaussianFactorGraph();
@ -591,6 +589,23 @@ TEST( GaussianFactorGraph, multiplication )
CHECK(assert_equal(expected,actual)); CHECK(assert_equal(expected,actual));
} }
/* ************************************************************************* */
TEST( GaussianFactorGraph, transposeMultiplication )
{
GaussianFactorGraph A = createGaussianFactorGraph();
Errors e;
e += Vector_(2, 0.0, 0.0);
e += Vector_(2,15.0, 0.0);
e += Vector_(2, 0.0,-5.0);
e += Vector_(2,-7.5,-5.0);
VectorConfig expected, actual = A ^ e;
expected.insert("l1",Vector_(2, -37.5,-50.0));
expected.insert("x1",Vector_(2,-150.0, 25.0));
expected.insert("x2",Vector_(2, 187.5, 25.0));
CHECK(assert_equal(expected,actual));
}
/* ************************************************************************* */ /* ************************************************************************* */
typedef pair<Matrix,Vector> System; typedef pair<Matrix,Vector> System;

View File

@ -79,9 +79,10 @@ TEST( VectorConfig, exmap)
/* ************************************************************************* */ /* ************************************************************************* */
TEST( VectorConfig, plus) TEST( VectorConfig, plus)
{ {
VectorConfig fg; VectorConfig c;
Vector vx = Vector_(3, 5.0, 6.0, 7.0), vy = Vector_(2, 8.0, 9.0); Vector vx = Vector_(3, 5.0, 6.0, 7.0), vy = Vector_(2, 8.0, 9.0);
fg.insert("x", vx).insert("y",vy); c += VectorConfig("x",vx);
c += VectorConfig("y",vy);
VectorConfig delta; VectorConfig delta;
Vector dx = Vector_(3, 1.0, 1.0, 1.0), dy = Vector_(2, -1.0, -1.0); Vector dx = Vector_(3, 1.0, 1.0, 1.0), dy = Vector_(2, -1.0, -1.0);
@ -92,7 +93,7 @@ TEST( VectorConfig, plus)
expected.insert("x", wx).insert("y",wy); expected.insert("x", wx).insert("y",wy);
// functional // functional
VectorConfig actual = fg.exmap(delta); VectorConfig actual = c.exmap(delta);
CHECK(assert_equal(expected,actual)); CHECK(assert_equal(expected,actual));
} }