Gradient using new operator^ and errors method

release/4.3a0
Frank Dellaert 2009-12-27 12:13:31 +00:00
parent 7d1428de60
commit 266fc56dea
9 changed files with 196 additions and 138 deletions

View File

@ -175,6 +175,30 @@ void GaussianFactor::tally_separator(const string& key, set<string>& separator)
}
}
/* ************************************************************************* */
Vector GaussianFactor::operator*(const VectorConfig& x) const {
Vector Ax = zero(b_.size());
if (empty()) return Ax;
// Just iterate over all A matrices and multiply in correct config part
string j; Matrix Aj;
FOREACH_PAIR(j, Aj, As_)
Ax += (Aj * x[j]);
return ediv(Ax,sigmas_);
}
/* ************************************************************************* */
VectorConfig GaussianFactor::operator^(const Vector& e) const {
Vector E = ediv(e,sigmas_);
VectorConfig x;
// Just iterate over all A matrices and insert Ai^e into VectorConfig
string j; Matrix Aj;
FOREACH_PAIR(j, Aj, As_)
x.insert(j,Aj^E);
return x;
}
/* ************************************************************************* */
pair<Matrix,Vector> GaussianFactor::matrix(const Ordering& ordering, bool weight) const {
@ -362,22 +386,6 @@ GaussianFactor::eliminate(const string& key) const
return make_pair(conditional, factor);
}
/* ************************************************************************* */
void GaussianFactor::addGradientContribution(const VectorConfig& x,
VectorConfig& g) const {
// calculate the value of the factor
Vector e = GaussianFactor::error_vector(x);
Vector et = trans(e); // transpose
// contribute to gradient for each connected variable
string j; Matrix Aj;
FOREACH_PAIR(j, Aj, As_) {
Vector dj = trans(et*Aj); // this factor's contribution to gradient on j
Vector wdj = ediv(dj,sigmas_); // properly weight by sigmas
g.add(j,wdj);
}
}
/* ************************************************************************* */
// Creates a factor on step-size, given initial estimate and direction d, e.g.
// Factor |A1*x+A2*y-b|/sigma -> |A1*(x0+alpha*dx)+A2*(y0+alpha*dy)-b|/sigma
@ -401,18 +409,6 @@ GaussianFactor::shared_ptr GaussianFactor::alphaFactor(const VectorConfig& x,
return factor;
}
/* ************************************************************************* */
Vector GaussianFactor::operator*(const VectorConfig& x) const {
Vector Ax = zero(b_.size());
if (empty()) return Ax;
string j; Matrix Aj;
FOREACH_PAIR(j, Aj, As_)
Ax += (Aj * x[j]);
return ediv(Ax,sigmas_);
}
/* ************************************************************************* */
namespace gtsam {

View File

@ -192,6 +192,16 @@ public:
void tally_separator(const std::string& key,
std::set<std::string>& separator) const;
/**
* Return A*x
*/
Vector operator*(const VectorConfig& x) const;
/**
* Return A^x
*/
VectorConfig operator^(const Vector& e) const;
/**
* Return (dense) matrix associated with factor
* @param ordering of variables needed for matrix column order
@ -216,13 +226,6 @@ public:
boost::tuple<std::list<int>, std::list<int>, std::list<double> >
sparse(const Dimensions& columnIndices) const;
/**
* Add gradient contribution to gradient config g
* @param x: confif at which to evaluate gradient
* @param g: I/O parameter, evolving gradient
*/
void addGradientContribution(const VectorConfig& x, VectorConfig& g) const;
/**
* Create a GaussianFactor on one variable 'alpha' (step size), in direction d
* @param x: starting point for search
@ -230,11 +233,6 @@ public:
*/
shared_ptr alphaFactor(const VectorConfig& x, const VectorConfig& d) const;
/**
* Return A*x
*/
Vector operator*(const VectorConfig& x) const;
/* ************************************************************************* */
// MUTABLE functions. FD:on the path to being eradicated
/* ************************************************************************* */

View File

@ -40,13 +40,37 @@ double GaussianFactorGraph::error(const VectorConfig& x) const {
}
/* ************************************************************************* */
Errors GaussianFactorGraph::operator*(const VectorConfig& x) const {
Errors GaussianFactorGraph::errors(const VectorConfig& x) const {
Errors e;
BOOST_FOREACH(sharedFactor factor,factors_)
e.push_back((*factor)*x);
e.push_back(factor->error_vector(x));
return e;
}
/* ************************************************************************* */
Errors GaussianFactorGraph::operator*(const VectorConfig& x) const {
Errors e;
BOOST_FOREACH(sharedFactor Ai,factors_)
e.push_back((*Ai)*x);
return e;
}
/* ************************************************************************* */
VectorConfig GaussianFactorGraph::operator^(const Errors& e) const {
VectorConfig x;
// For each factor add the gradient contribution
size_t i=0;
BOOST_FOREACH(sharedFactor Ai,factors_)
x += (*Ai)^e[i++];
return x;
}
/* ************************************************************************* */
VectorConfig GaussianFactorGraph::gradient(const VectorConfig& x) const {
const GaussianFactorGraph& A = *this;
return A^errors(x);
}
/* ************************************************************************* */
set<string> GaussianFactorGraph::find_separator(const string& key) const
{
@ -236,15 +260,6 @@ Matrix GaussianFactorGraph::sparse(const Ordering& ordering) const {
return ijs;
}
/* ************************************************************************* */
VectorConfig GaussianFactorGraph::gradient(const VectorConfig& x) const {
VectorConfig g;
// For each factor add the gradient contribution
BOOST_FOREACH(sharedFactor factor,factors_)
factor->addGradientContribution(x,g);
return g;
}
/* ************************************************************************* */
VectorConfig GaussianFactorGraph::optimalUpdate(const VectorConfig& x,
const VectorConfig& d) const {

View File

@ -13,8 +13,8 @@
#pragma once
#include <boost/shared_ptr.hpp>
#include "Errors.h"
#include "FactorGraph.h"
#include "Errors.h"
#include "GaussianFactor.h"
#include "GaussianBayesNet.h" // needed for MATLAB toolbox !!
@ -73,12 +73,25 @@ namespace gtsam {
push_back(sharedFactor(new GaussianFactor(terms,b,sigma)));
}
/** return A*x-b */
Errors errors(const VectorConfig& x) const;
/** unnormalized error */
double error(const VectorConfig& x) const;
/** return A*x */
Errors operator*(const VectorConfig& x) const;
/** return A^x */
VectorConfig operator^(const Errors& e) const;
/**
* Calculate Gradient of A^(A*x-b) for a given config
* @param x: VectorConfig specifying where to calculate gradient
* @return gradient, as a VectorConfig as well
*/
VectorConfig gradient(const VectorConfig& x) const;
/** Unnormalized probability. O(n) */
double probPrime(const VectorConfig& c) const {
return exp(-0.5 * error(c));
@ -164,13 +177,6 @@ namespace gtsam {
*/
Matrix sparse(const Ordering& ordering) const;
/**
* Calculate Gradient of 0.5*|Ax-b| for a given config
* @param x: VectorConfig specifying where to calculate gradient
* @return gradient, as a VectorConfig as well
*/
VectorConfig gradient(const VectorConfig& x) const;
/**
* Take an optimal step in direction d by calculating optimal step-size
* @param x: starting point for search

View File

@ -69,21 +69,31 @@ VectorConfig VectorConfig::operator*(double s) const {
return scale(s);
}
/* ************************************************************************* */
void VectorConfig::operator+=(const VectorConfig& b) {
string j; Vector b_j;
FOREACH_PAIR(j, b_j, b.values) {
iterator it = values.find(j);
if (it==values.end())
insert(j, b_j);
else
it->second += b_j;
}
}
/* ************************************************************************* */
VectorConfig VectorConfig::operator+(const VectorConfig& b) const {
VectorConfig result;
string key; Vector v;
FOREACH_PAIR(key, v, values)
result.insert(key, v + b.get(key));
VectorConfig result = *this;
result += b;
return result;
}
/* ************************************************************************* */
VectorConfig VectorConfig::operator-(const VectorConfig& b) const {
VectorConfig result;
string key; Vector v;
FOREACH_PAIR(key, v, values)
result.insert(key, v - b.get(key));
string j; Vector v;
FOREACH_PAIR(j, v, values)
result.insert(j, v - b.get(j));
return result;
}

View File

@ -30,6 +30,7 @@ namespace gtsam {
VectorConfig() {}
VectorConfig(const VectorConfig& cfg_in): values(cfg_in.values) {}
VectorConfig(const std::string& j, const Vector& a) { add(j,a); }
virtual ~VectorConfig() {}
@ -78,6 +79,9 @@ namespace gtsam {
VectorConfig scale(double s) const;
VectorConfig operator*(double s) const;
/** Add in place */
void operator+=(const VectorConfig &b);
/** Add */
VectorConfig operator+(const VectorConfig &b) const;

View File

@ -26,20 +26,10 @@ using namespace gtsam;
/* ************************************************************************* */
TEST( GaussianFactor, linearFactor )
{
Matrix I = eye(2);
Vector b = Vector_(2,0.2,-0.1);
double sigma = 0.1;
Matrix A1(2,2);
A1(0,0) = -1.0 ; A1(0,1) = 0.0;
A1(1,0) = 0.0 ; A1(1,1) = -1.0;
Matrix A2(2,2);
A2(0,0) = 1.0 ; A2(0,1) = 0.0;
A2(1,0) = 0.0 ; A2(1,1) = 1.0;
Vector b(2);
b(0) = 0.2 ; b(1) = -0.1;
GaussianFactor expected("x1", A1, "x2", A2, b, sigma);
GaussianFactor expected("x1", -I, "x2", I, b, sigma);
// create a small linear factor graph
GaussianFactorGraph fg = createGaussianFactorGraph();
@ -51,6 +41,29 @@ TEST( GaussianFactor, linearFactor )
CHECK(assert_equal(expected,*lf));
}
/* ************************************************************************* */
TEST( GaussianFactor, operators )
{
Matrix I = eye(2);
Vector b = Vector_(2,0.2,-0.1);
double sigma = 0.1;
GaussianFactor lf("x1", -I, "x2", I, b, sigma);
VectorConfig c;
c.insert("x1",Vector_(2,10.,20.));
c.insert("x2",Vector_(2,30.,60.));
// test A*x
Vector expectedE = Vector_(2,200.,400.), e = lf*c;
CHECK(assert_equal(expectedE,e));
// test A^e
VectorConfig expectedX;
expectedX.insert("x1",Vector_(2,-2000.,-4000.));
expectedX.insert("x2",Vector_(2, 2000., 4000.));
CHECK(assert_equal(expectedX,lf^e));
}
/* ************************************************************************* */
TEST( GaussianFactor, keys )
{

View File

@ -95,18 +95,18 @@ TEST( GaussianFactorGraph, combine_factors_x1 )
Matrix Ax1 = Matrix_(6,2,
1., 0.,
0.00, 1.,
0., 1.,
-1., 0.,
0.00,-1.,
0.,-1.,
-1., 0.,
00., -1.
0.,-1.
);
Matrix Ax2 = Matrix_(6,2,
0., 0.,
0., 0.,
1., 0.,
+0.,1.,
0., 1.,
0., 0.,
0., 0.
);
@ -157,17 +157,17 @@ TEST( GaussianFactorGraph, combine_factors_x2 )
Matrix Ax1 = Matrix_(4,2,
// x1
-1., 0., // f2
0.00,-1., // f2
0.00, 0., // f4
0.00, 0. // f4
0.,-1., // f2
0., 0., // f4
0., 0. // f4
);
Matrix Ax2 = Matrix_(4,2,
// x2
1., 0.,
+0.,1.,
0., 1.,
-1., 0.,
+0.,-1.
0.,-1.
);
// the expected RHS vector
@ -188,7 +188,6 @@ TEST( GaussianFactorGraph, combine_factors_x2 )
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, eliminateOne_x1 )
{
GaussianFactorGraph fg = createGaussianFactorGraph();
@ -203,7 +202,6 @@ TEST( GaussianFactorGraph, eliminateOne_x1 )
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, eliminateOne_x2 )
{
GaussianFactorGraph fg = createGaussianFactorGraph();
@ -360,7 +358,7 @@ TEST( GaussianFactorGraph, CONSTRUCTOR_GaussianBayesNet )
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, GET_ORDERING)
TEST( GaussianFactorGraph, getOrdering)
{
Ordering expected;
expected += "l1","x1","x2";
@ -370,7 +368,7 @@ TEST( GaussianFactorGraph, GET_ORDERING)
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, OPTIMIZE )
TEST( GaussianFactorGraph, optimize )
{
// create a graph
GaussianFactorGraph fg = createGaussianFactorGraph();
@ -407,7 +405,7 @@ TEST( GaussianFactorGraph, COMBINE_GRAPHS_INPLACE)
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, COMBINE_GRAPHS)
TEST( GaussianFactorGraph, combine2)
{
// create a test graph
GaussianFactorGraph fg1 = createGaussianFactorGraph();
@ -591,6 +589,23 @@ TEST( GaussianFactorGraph, multiplication )
CHECK(assert_equal(expected,actual));
}
/* ************************************************************************* */
TEST( GaussianFactorGraph, transposeMultiplication )
{
GaussianFactorGraph A = createGaussianFactorGraph();
Errors e;
e += Vector_(2, 0.0, 0.0);
e += Vector_(2,15.0, 0.0);
e += Vector_(2, 0.0,-5.0);
e += Vector_(2,-7.5,-5.0);
VectorConfig expected, actual = A ^ e;
expected.insert("l1",Vector_(2, -37.5,-50.0));
expected.insert("x1",Vector_(2,-150.0, 25.0));
expected.insert("x2",Vector_(2, 187.5, 25.0));
CHECK(assert_equal(expected,actual));
}
/* ************************************************************************* */
typedef pair<Matrix,Vector> System;

View File

@ -79,9 +79,10 @@ TEST( VectorConfig, exmap)
/* ************************************************************************* */
TEST( VectorConfig, plus)
{
VectorConfig fg;
VectorConfig c;
Vector vx = Vector_(3, 5.0, 6.0, 7.0), vy = Vector_(2, 8.0, 9.0);
fg.insert("x", vx).insert("y",vy);
c += VectorConfig("x",vx);
c += VectorConfig("y",vy);
VectorConfig delta;
Vector dx = Vector_(3, 1.0, 1.0, 1.0), dy = Vector_(2, -1.0, -1.0);
@ -92,7 +93,7 @@ TEST( VectorConfig, plus)
expected.insert("x", wx).insert("y",wy);
// functional
VectorConfig actual = fg.exmap(delta);
VectorConfig actual = c.exmap(delta);
CHECK(assert_equal(expected,actual));
}