First cut on raw MultiplyHessianAdd for HessianFactor and JacobianFactor. Unit test is passed in testGaussianFactorGraphUnordered (multiplyHessianAdd3). Note the interface currently needs the accumulated diminsions of key variables. See GaussianFactorGraph::multiplyHessianAdd(double alpha,const double* x, double* y).
parent
63f8c75fb2
commit
b464b808ef
|
|
@ -118,6 +118,12 @@ namespace gtsam {
|
||||||
/// y += alpha * A'*A*x
|
/// y += alpha * A'*A*x
|
||||||
virtual void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const = 0;
|
virtual void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const = 0;
|
||||||
|
|
||||||
|
/// y += alpha * A'*A*x
|
||||||
|
virtual void multiplyHessianAdd(double alpha, const double* x, double* y, std::vector<size_t> keys) const = 0;
|
||||||
|
|
||||||
|
/// y += alpha * A'*A*x
|
||||||
|
virtual void multiplyHessianAdd(double alpha, const double* x, double* y) const = 0;
|
||||||
|
|
||||||
/// A'*b for Jacobian, eta for Hessian
|
/// A'*b for Jacobian, eta for Hessian
|
||||||
virtual VectorValues gradientAtZero() const = 0;
|
virtual VectorValues gradientAtZero() const = 0;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -54,6 +54,27 @@ namespace gtsam {
|
||||||
return keys;
|
return keys;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
vector<size_t> GaussianFactorGraph::getkeydim() const {
|
||||||
|
// First find dimensions of each variable
|
||||||
|
vector<size_t> dims;
|
||||||
|
BOOST_FOREACH(const sharedFactor& factor, *this) {
|
||||||
|
for (GaussianFactor::const_iterator pos = factor->begin();
|
||||||
|
pos != factor->end(); ++pos) {
|
||||||
|
if (dims.size() <= *pos)
|
||||||
|
dims.resize(*pos + 1, 0);
|
||||||
|
dims[*pos] = factor->getDim(pos);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Find accumulated dimensions for variables
|
||||||
|
vector<size_t> dims_accumulated;
|
||||||
|
dims_accumulated.resize(dims.size()+1,0);
|
||||||
|
dims_accumulated[0]=0;
|
||||||
|
for (int i=1; i<dims_accumulated.size(); i++)
|
||||||
|
dims_accumulated[i] = dims_accumulated[i-1]+dims[i-1];
|
||||||
|
return dims_accumulated;
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
GaussianFactorGraph GaussianFactorGraph::clone() const {
|
GaussianFactorGraph GaussianFactorGraph::clone() const {
|
||||||
GaussianFactorGraph result;
|
GaussianFactorGraph result;
|
||||||
|
|
@ -310,7 +331,16 @@ namespace gtsam {
|
||||||
void GaussianFactorGraph::multiplyHessianAdd(double alpha,
|
void GaussianFactorGraph::multiplyHessianAdd(double alpha,
|
||||||
const VectorValues& x, VectorValues& y) const {
|
const VectorValues& x, VectorValues& y) const {
|
||||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& f, *this)
|
BOOST_FOREACH(const GaussianFactor::shared_ptr& f, *this)
|
||||||
f->multiplyHessianAdd(alpha, x, y);
|
f->multiplyHessianAdd(alpha, x, y);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
void GaussianFactorGraph::multiplyHessianAdd(double alpha,
|
||||||
|
const double* x, double* y) const {
|
||||||
|
vector<size_t> FactorKeys = getkeydim();
|
||||||
|
BOOST_FOREACH(const GaussianFactor::shared_ptr& f, *this)
|
||||||
|
f->multiplyHessianAdd(alpha, x, y, FactorKeys);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
|
||||||
|
|
@ -135,6 +135,8 @@ namespace gtsam {
|
||||||
typedef FastSet<Key> Keys;
|
typedef FastSet<Key> Keys;
|
||||||
Keys keys() const;
|
Keys keys() const;
|
||||||
|
|
||||||
|
std::vector<size_t> getkeydim() const;
|
||||||
|
|
||||||
/** unnormalized error */
|
/** unnormalized error */
|
||||||
double error(const VectorValues& x) const {
|
double error(const VectorValues& x) const {
|
||||||
double total_error = 0.;
|
double total_error = 0.;
|
||||||
|
|
@ -296,6 +298,10 @@ namespace gtsam {
|
||||||
void multiplyHessianAdd(double alpha, const VectorValues& x,
|
void multiplyHessianAdd(double alpha, const VectorValues& x,
|
||||||
VectorValues& y) const;
|
VectorValues& y) const;
|
||||||
|
|
||||||
|
///** y += alpha*A'A*x */
|
||||||
|
void multiplyHessianAdd(double alpha, const double* x,
|
||||||
|
double* y) const;
|
||||||
|
|
||||||
///** In-place version e <- A*x that overwrites e. */
|
///** In-place version e <- A*x that overwrites e. */
|
||||||
void multiplyInPlace(const VectorValues& x, Errors& e) const;
|
void multiplyInPlace(const VectorValues& x, Errors& e) const;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -525,7 +525,7 @@ void HessianFactor::multiplyHessianAdd(double alpha, const VectorValues& x,
|
||||||
|
|
||||||
// copy to yvalues
|
// copy to yvalues
|
||||||
for(DenseIndex i = 0; i < (DenseIndex)size(); ++i) {
|
for(DenseIndex i = 0; i < (DenseIndex)size(); ++i) {
|
||||||
bool didNotExist;
|
bool didNotExist;
|
||||||
VectorValues::iterator it;
|
VectorValues::iterator it;
|
||||||
boost::tie(it, didNotExist) = yvalues.tryInsert(keys_[i], Vector());
|
boost::tie(it, didNotExist) = yvalues.tryInsert(keys_[i], Vector());
|
||||||
if (didNotExist)
|
if (didNotExist)
|
||||||
|
|
@ -535,6 +535,38 @@ void HessianFactor::multiplyHessianAdd(double alpha, const VectorValues& x,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
void HessianFactor::multiplyHessianAdd(double alpha, const double* x,
|
||||||
|
double* yvalues, vector<size_t> keys) const {
|
||||||
|
|
||||||
|
// Create a vector of temporary y values, corresponding to rows i
|
||||||
|
vector<Vector> y;
|
||||||
|
y.reserve(size());
|
||||||
|
for (const_iterator it = begin(); it != end(); it++)
|
||||||
|
y.push_back(zero(getDim(it)));
|
||||||
|
|
||||||
|
// Accessing the VectorValues one by one is expensive
|
||||||
|
// So we will loop over columns to access x only once per column
|
||||||
|
// And fill the above temporary y values, to be added into yvalues after
|
||||||
|
for (DenseIndex j = 0; j < (DenseIndex)size(); ++j) {
|
||||||
|
DenseIndex i = 0;
|
||||||
|
for (; i < j; ++i)
|
||||||
|
y[i] += info_(i, j).knownOffDiagonal() * ConstDMap(x+keys[keys_[j]],keys[keys_[j]+1]-keys[keys_[j]]);
|
||||||
|
// blocks on the diagonal are only half
|
||||||
|
y[i] += info_(j, j).selfadjointView() * ConstDMap(x+keys[keys_[j]],keys[keys_[j]+1]-keys[keys_[j]]);
|
||||||
|
// for below diagonal, we take transpose block from upper triangular part
|
||||||
|
for (i = j + 1; i < (DenseIndex)size(); ++i)
|
||||||
|
y[i] += info_(i, j).knownOffDiagonal() * ConstDMap(x+keys[keys_[j]],keys[keys_[j]+1]-keys[keys_[j]]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy to yvalues
|
||||||
|
for(DenseIndex i = 0; i < (DenseIndex)size(); ++i)
|
||||||
|
DMap(yvalues+keys[keys_[i]],keys[keys_[i]+1]-keys[keys_[i]]) += alpha * y[i];
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
VectorValues HessianFactor::gradientAtZero() const {
|
VectorValues HessianFactor::gradientAtZero() const {
|
||||||
VectorValues g;
|
VectorValues g;
|
||||||
|
|
|
||||||
|
|
@ -141,6 +141,12 @@ namespace gtsam {
|
||||||
typedef SymmetricBlockMatrix::Block Block; ///< A block from the Hessian matrix
|
typedef SymmetricBlockMatrix::Block Block; ///< A block from the Hessian matrix
|
||||||
typedef SymmetricBlockMatrix::constBlock constBlock; ///< A block from the Hessian matrix (const version)
|
typedef SymmetricBlockMatrix::constBlock constBlock; ///< A block from the Hessian matrix (const version)
|
||||||
|
|
||||||
|
|
||||||
|
// Use eigen magic to access raw memory
|
||||||
|
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> DVector;
|
||||||
|
typedef Eigen::Map<DVector> DMap;
|
||||||
|
typedef Eigen::Map<const DVector> ConstDMap;
|
||||||
|
|
||||||
/** default constructor for I/O */
|
/** default constructor for I/O */
|
||||||
HessianFactor();
|
HessianFactor();
|
||||||
|
|
||||||
|
|
@ -376,6 +382,10 @@ namespace gtsam {
|
||||||
/** y += alpha * A'*A*x */
|
/** y += alpha * A'*A*x */
|
||||||
void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const;
|
void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const;
|
||||||
|
|
||||||
|
void multiplyHessianAdd(double alpha, const double* x, double* y, std::vector<size_t> keys) const;
|
||||||
|
|
||||||
|
void multiplyHessianAdd(double alpha, const double* x, double* y) const {};
|
||||||
|
|
||||||
/// eta for Hessian
|
/// eta for Hessian
|
||||||
VectorValues gradientAtZero() const;
|
VectorValues gradientAtZero() const;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -495,6 +495,7 @@ namespace gtsam {
|
||||||
if(xi.second)
|
if(xi.second)
|
||||||
xi.first->second = Vector::Zero(getDim(begin() + pos));
|
xi.first->second = Vector::Zero(getDim(begin() + pos));
|
||||||
gtsam::transposeMultiplyAdd(Ab_(pos), E, xi.first->second);
|
gtsam::transposeMultiplyAdd(Ab_(pos), E, xi.first->second);
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -505,6 +506,26 @@ namespace gtsam {
|
||||||
transposeMultiplyAdd(alpha,Ax,y);
|
transposeMultiplyAdd(alpha,Ax,y);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void JacobianFactor::multiplyHessianAdd(double alpha, const double* x, double* y, std::vector<size_t> keys) const {
|
||||||
|
if (empty()) return;
|
||||||
|
Vector Ax = zero(Ab_.rows());
|
||||||
|
|
||||||
|
// Just iterate over all A matrices and multiply in correct config part
|
||||||
|
for(size_t pos=0; pos<size(); ++pos)
|
||||||
|
Ax += Ab_(pos) * ConstDMap(x + keys[keys_[pos]],keys[keys_[pos]+1]-keys[keys_[pos]]);
|
||||||
|
|
||||||
|
// Deal with noise properly, need to Double* whiten as we are dividing by variance
|
||||||
|
if (model_) { model_->whitenInPlace(Ax); model_->whitenInPlace(Ax); }
|
||||||
|
|
||||||
|
// multiply with alpha
|
||||||
|
Ax *= alpha;
|
||||||
|
|
||||||
|
// Again iterate over all A matrices and insert Ai^e into y
|
||||||
|
for(size_t pos=0; pos<size(); ++pos)
|
||||||
|
DMap(y + keys[keys_[pos]],keys[keys_[pos]+1]-keys[keys_[pos]]) += Ab_(pos).transpose() * Ax;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
VectorValues JacobianFactor::gradientAtZero() const {
|
VectorValues JacobianFactor::gradientAtZero() const {
|
||||||
VectorValues g;
|
VectorValues g;
|
||||||
|
|
|
||||||
|
|
@ -96,6 +96,12 @@ namespace gtsam {
|
||||||
typedef ABlock::ColXpr BVector;
|
typedef ABlock::ColXpr BVector;
|
||||||
typedef constABlock::ConstColXpr constBVector;
|
typedef constABlock::ConstColXpr constBVector;
|
||||||
|
|
||||||
|
// Use eigen magic to access raw memory
|
||||||
|
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> DVector;
|
||||||
|
typedef Eigen::Map<DVector> DMap;
|
||||||
|
typedef Eigen::Map<const DVector> ConstDMap;
|
||||||
|
|
||||||
|
|
||||||
/** Convert from other GaussianFactor */
|
/** Convert from other GaussianFactor */
|
||||||
explicit JacobianFactor(const GaussianFactor& gf);
|
explicit JacobianFactor(const GaussianFactor& gf);
|
||||||
|
|
||||||
|
|
@ -275,6 +281,10 @@ namespace gtsam {
|
||||||
/** y += alpha * A'*A*x */
|
/** y += alpha * A'*A*x */
|
||||||
void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const;
|
void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const;
|
||||||
|
|
||||||
|
void multiplyHessianAdd(double alpha, const double* x, double* y, std::vector<size_t> keys) const;
|
||||||
|
|
||||||
|
void multiplyHessianAdd(double alpha, const double* x, double* y) const {};
|
||||||
|
|
||||||
/// A'*b for Jacobian
|
/// A'*b for Jacobian
|
||||||
VectorValues gradientAtZero() const;
|
VectorValues gradientAtZero() const;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -166,9 +166,9 @@ static GaussianFactorGraph createSimpleGaussianFactorGraph() {
|
||||||
// linearized prior on x1: c[_x1_]+x1=0 i.e. x1=-c[_x1_]
|
// linearized prior on x1: c[_x1_]+x1=0 i.e. x1=-c[_x1_]
|
||||||
fg += JacobianFactor(2, 10*eye(2), -1.0*ones(2), unit2);
|
fg += JacobianFactor(2, 10*eye(2), -1.0*ones(2), unit2);
|
||||||
// odometry between x1 and x2: x2-x1=[0.2;-0.1]
|
// odometry between x1 and x2: x2-x1=[0.2;-0.1]
|
||||||
fg += JacobianFactor(2, -10*eye(2), 0, 10*eye(2), (Vector(2) << 2.0, -1.0), unit2);
|
fg += JacobianFactor(0, 10*eye(2), 2, -10*eye(2), (Vector(2) << 2.0, -1.0), unit2);
|
||||||
// measurement between x1 and l1: l1-x1=[0.0;0.2]
|
// measurement between x1 and l1: l1-x1=[0.0;0.2]
|
||||||
fg += JacobianFactor(2, -5*eye(2), 1, 5*eye(2), (Vector(2) << 0.0, 1.0), unit2);
|
fg += JacobianFactor(1, 5*eye(2), 2, -5*eye(2), (Vector(2) << 0.0, 1.0), unit2);
|
||||||
// measurement between x2 and l1: l1-x2=[-0.2;0.3]
|
// measurement between x2 and l1: l1-x2=[-0.2;0.3]
|
||||||
fg += JacobianFactor(0, -5*eye(2), 1, 5*eye(2), (Vector(2) << -1.0, 1.5), unit2);
|
fg += JacobianFactor(0, -5*eye(2), 1, 5*eye(2), (Vector(2) << -1.0, 1.5), unit2);
|
||||||
return fg;
|
return fg;
|
||||||
|
|
@ -313,6 +313,31 @@ TEST( GaussianFactorGraph, multiplyHessianAdd2 )
|
||||||
EXPECT(assert_equal(2*expected, actual));
|
EXPECT(assert_equal(2*expected, actual));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST( GaussianFactorGraph, multiplyHessianAdd3 )
|
||||||
|
{
|
||||||
|
GaussianFactorGraph gfg = createGaussianFactorGraphWithHessianFactor();
|
||||||
|
|
||||||
|
// brute force
|
||||||
|
Matrix AtA; Vector eta; boost::tie(AtA,eta) = gfg.hessian();
|
||||||
|
Vector X(6); X<<1,2,3,4,5,6;
|
||||||
|
Vector Y(6); Y<<-450, -450, 300, 400, 2950, 3450;
|
||||||
|
EXPECT(assert_equal(Y,AtA*X));
|
||||||
|
|
||||||
|
double* x = &X[0];
|
||||||
|
double* y = &Y[0];
|
||||||
|
|
||||||
|
Vector fast_y = gtsam::zero(6);
|
||||||
|
double* actual = &fast_y[0];
|
||||||
|
gfg.multiplyHessianAdd(1.0, x, fast_y.data());
|
||||||
|
EXPECT(assert_equal(Y, fast_y));
|
||||||
|
|
||||||
|
// now, do it with non-zero y
|
||||||
|
gfg.multiplyHessianAdd(1.0, x, fast_y.data());
|
||||||
|
EXPECT(assert_equal(2*Y, fast_y));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST( GaussianFactorGraph, matricesMixed )
|
TEST( GaussianFactorGraph, matricesMixed )
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue