Merged in feature/RegularFactors (pull request #68)
Regular (raw memory access) Factorsrelease/4.3a0
commit
d79b9fc04b
|
@ -100,7 +100,7 @@ namespace gtsam {
|
||||||
/// Return the diagonal of the Hessian for this factor
|
/// Return the diagonal of the Hessian for this factor
|
||||||
virtual VectorValues hessianDiagonal() const = 0;
|
virtual VectorValues hessianDiagonal() const = 0;
|
||||||
|
|
||||||
/// Return the diagonal of the Hessian for this factor (raw memory version)
|
/// Raw memory access version of hessianDiagonal
|
||||||
virtual void hessianDiagonal(double* d) const = 0;
|
virtual void hessianDiagonal(double* d) const = 0;
|
||||||
|
|
||||||
/// Return the block diagonal of the Hessian for this factor
|
/// Return the block diagonal of the Hessian for this factor
|
||||||
|
@ -122,16 +122,10 @@ namespace gtsam {
|
||||||
/// y += alpha * A'*A*x
|
/// y += alpha * A'*A*x
|
||||||
virtual void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const = 0;
|
virtual void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const = 0;
|
||||||
|
|
||||||
/// y += alpha * A'*A*x
|
|
||||||
virtual void multiplyHessianAdd(double alpha, const double* x, double* y, std::vector<size_t> keys) const = 0;
|
|
||||||
|
|
||||||
/// y += alpha * A'*A*x
|
|
||||||
virtual void multiplyHessianAdd(double alpha, const double* x, double* y) const = 0;
|
|
||||||
|
|
||||||
/// A'*b for Jacobian, eta for Hessian
|
/// A'*b for Jacobian, eta for Hessian
|
||||||
virtual VectorValues gradientAtZero() const = 0;
|
virtual VectorValues gradientAtZero() const = 0;
|
||||||
|
|
||||||
/// A'*b for Jacobian, eta for Hessian (raw memory version)
|
/// Raw memory access version of gradientAtZero
|
||||||
virtual void gradientAtZero(double* d) const = 0;
|
virtual void gradientAtZero(double* d) const = 0;
|
||||||
|
|
||||||
/// Gradient wrt a key at any values
|
/// Gradient wrt a key at any values
|
||||||
|
|
|
@ -357,15 +357,6 @@ namespace gtsam {
|
||||||
f->multiplyHessianAdd(alpha, x, y);
|
f->multiplyHessianAdd(alpha, x, y);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
|
||||||
void GaussianFactorGraph::multiplyHessianAdd(double alpha,
|
|
||||||
const double* x, double* y) const {
|
|
||||||
vector<size_t> FactorKeys = getkeydim();
|
|
||||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& f, *this)
|
|
||||||
f->multiplyHessianAdd(alpha, x, y, FactorKeys);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void GaussianFactorGraph::multiplyInPlace(const VectorValues& x, Errors& e) const {
|
void GaussianFactorGraph::multiplyInPlace(const VectorValues& x, Errors& e) const {
|
||||||
multiplyInPlace(x, e.begin());
|
multiplyInPlace(x, e.begin());
|
||||||
|
|
|
@ -310,10 +310,6 @@ namespace gtsam {
|
||||||
void multiplyHessianAdd(double alpha, const VectorValues& x,
|
void multiplyHessianAdd(double alpha, const VectorValues& x,
|
||||||
VectorValues& y) const;
|
VectorValues& y) const;
|
||||||
|
|
||||||
///** y += alpha*A'A*x */
|
|
||||||
void multiplyHessianAdd(double alpha, const double* x,
|
|
||||||
double* y) const;
|
|
||||||
|
|
||||||
///** In-place version e <- A*x that overwrites e. */
|
///** In-place version e <- A*x that overwrites e. */
|
||||||
void multiplyInPlace(const VectorValues& x, Errors& e) const;
|
void multiplyInPlace(const VectorValues& x, Errors& e) const;
|
||||||
|
|
||||||
|
|
|
@ -359,20 +359,10 @@ VectorValues HessianFactor::hessianDiagonal() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
// TODO: currently assumes all variables of the same size 9 and keys arranged from 0 to n
|
// Raw memory access version should be called in Regular Factors only currently
|
||||||
void HessianFactor::hessianDiagonal(double* d) const {
|
void HessianFactor::hessianDiagonal(double* d) const {
|
||||||
|
throw std::runtime_error(
|
||||||
// Use eigen magic to access raw memory
|
"HessianFactor::hessianDiagonal raw memory access is allowed for Regular Factors only");
|
||||||
typedef Eigen::Matrix<double, 9, 1> DVector;
|
|
||||||
typedef Eigen::Map<DVector> DMap;
|
|
||||||
|
|
||||||
// Loop over all variables in the factor
|
|
||||||
for (DenseIndex pos = 0; pos < (DenseIndex)size(); ++pos) {
|
|
||||||
Key j = keys_[pos];
|
|
||||||
// Get the diagonal block, and insert its diagonal
|
|
||||||
const Matrix& B = info_(pos, pos).selfadjointView();
|
|
||||||
DMap(d + 9 * j) += B.diagonal();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
@ -548,48 +538,6 @@ void HessianFactor::multiplyHessianAdd(double alpha, const VectorValues& x,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
|
||||||
void HessianFactor::multiplyHessianAdd(double alpha, const double* x,
|
|
||||||
double* yvalues, vector<size_t> offsets) const {
|
|
||||||
|
|
||||||
// Use eigen magic to access raw memory
|
|
||||||
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> DVector;
|
|
||||||
typedef Eigen::Map<DVector> DMap;
|
|
||||||
typedef Eigen::Map<const DVector> ConstDMap;
|
|
||||||
|
|
||||||
// Create a vector of temporary y values, corresponding to rows i
|
|
||||||
vector<Vector> y;
|
|
||||||
y.reserve(size());
|
|
||||||
for (const_iterator it = begin(); it != end(); it++)
|
|
||||||
y.push_back(zero(getDim(it)));
|
|
||||||
|
|
||||||
// Accessing the VectorValues one by one is expensive
|
|
||||||
// So we will loop over columns to access x only once per column
|
|
||||||
// And fill the above temporary y values, to be added into yvalues after
|
|
||||||
for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
|
|
||||||
DenseIndex i = 0;
|
|
||||||
for (; i < j; ++i)
|
|
||||||
y[i] += info_(i, j).knownOffDiagonal()
|
|
||||||
* ConstDMap(x + offsets[keys_[j]],
|
|
||||||
offsets[keys_[j] + 1] - offsets[keys_[j]]);
|
|
||||||
// blocks on the diagonal are only half
|
|
||||||
y[i] += info_(j, j).selfadjointView()
|
|
||||||
* ConstDMap(x + offsets[keys_[j]],
|
|
||||||
offsets[keys_[j] + 1] - offsets[keys_[j]]);
|
|
||||||
// for below diagonal, we take transpose block from upper triangular part
|
|
||||||
for (i = j + 1; i < (DenseIndex) size(); ++i)
|
|
||||||
y[i] += info_(i, j).knownOffDiagonal()
|
|
||||||
* ConstDMap(x + offsets[keys_[j]],
|
|
||||||
offsets[keys_[j] + 1] - offsets[keys_[j]]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy to yvalues
|
|
||||||
for (DenseIndex i = 0; i < (DenseIndex) size(); ++i)
|
|
||||||
DMap(yvalues + offsets[keys_[i]], offsets[keys_[i] + 1] - offsets[keys_[i]]) +=
|
|
||||||
alpha * y[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
VectorValues HessianFactor::gradientAtZero() const {
|
VectorValues HessianFactor::gradientAtZero() const {
|
||||||
VectorValues g;
|
VectorValues g;
|
||||||
|
@ -600,20 +548,10 @@ VectorValues HessianFactor::gradientAtZero() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
// TODO: currently assumes all variables of the same size 9 and keys arranged from 0 to n
|
// Raw memory access version should be called in Regular Factors only currently
|
||||||
void HessianFactor::gradientAtZero(double* d) const {
|
void HessianFactor::gradientAtZero(double* d) const {
|
||||||
|
throw std::runtime_error(
|
||||||
// Use eigen magic to access raw memory
|
"HessianFactor::gradientAtZero raw memory access is allowed for Regular Factors only");
|
||||||
typedef Eigen::Matrix<double, 9, 1> DVector;
|
|
||||||
typedef Eigen::Map<DVector> DMap;
|
|
||||||
|
|
||||||
// Loop over all variables in the factor
|
|
||||||
for (DenseIndex pos = 0; pos < (DenseIndex)size(); ++pos) {
|
|
||||||
Key j = keys_[pos];
|
|
||||||
// Get the diagonal block, and insert its diagonal
|
|
||||||
DVector dj = -info_(pos,size()).knownOffDiagonal();
|
|
||||||
DMap(d + 9 * j) += dj;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
|
@ -340,7 +340,7 @@ namespace gtsam {
|
||||||
/// Return the diagonal of the Hessian for this factor
|
/// Return the diagonal of the Hessian for this factor
|
||||||
virtual VectorValues hessianDiagonal() const;
|
virtual VectorValues hessianDiagonal() const;
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/// Raw memory access version of hessianDiagonal
|
||||||
virtual void hessianDiagonal(double* d) const;
|
virtual void hessianDiagonal(double* d) const;
|
||||||
|
|
||||||
/// Return the block diagonal of the Hessian for this factor
|
/// Return the block diagonal of the Hessian for this factor
|
||||||
|
@ -380,13 +380,10 @@ namespace gtsam {
|
||||||
/** y += alpha * A'*A*x */
|
/** y += alpha * A'*A*x */
|
||||||
void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const;
|
void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const;
|
||||||
|
|
||||||
void multiplyHessianAdd(double alpha, const double* x, double* y, std::vector<size_t> keys) const;
|
|
||||||
|
|
||||||
void multiplyHessianAdd(double alpha, const double* x, double* y) const {};
|
|
||||||
|
|
||||||
/// eta for Hessian
|
/// eta for Hessian
|
||||||
VectorValues gradientAtZero() const;
|
VectorValues gradientAtZero() const;
|
||||||
|
|
||||||
|
/// Raw memory access version of gradientAtZero
|
||||||
virtual void gradientAtZero(double* d) const;
|
virtual void gradientAtZero(double* d) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -461,22 +461,9 @@ VectorValues JacobianFactor::hessianDiagonal() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
// TODO: currently assumes all variables of the same size 9 and keys arranged from 0 to n
|
// Raw memory access version should be called in Regular Factors only currently
|
||||||
void JacobianFactor::hessianDiagonal(double* d) const {
|
void JacobianFactor::hessianDiagonal(double* d) const {
|
||||||
|
throw std::runtime_error("JacobianFactor::hessianDiagonal raw memory access is allowed for Regular Factors only");
|
||||||
// Use eigen magic to access raw memory
|
|
||||||
typedef Eigen::Matrix<double, 9, 1> DVector;
|
|
||||||
typedef Eigen::Map<DVector> DMap;
|
|
||||||
|
|
||||||
// Loop over all variables in the factor
|
|
||||||
for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
|
|
||||||
// Get the diagonal block, and insert its diagonal
|
|
||||||
DVector dj;
|
|
||||||
for (size_t k = 0; k < 9; ++k)
|
|
||||||
dj(k) = Ab_(j).col(k).squaredNorm();
|
|
||||||
|
|
||||||
DMap(d + 9 * j) += dj;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
@ -528,40 +515,6 @@ void JacobianFactor::multiplyHessianAdd(double alpha, const VectorValues& x,
|
||||||
transposeMultiplyAdd(alpha, Ax, y);
|
transposeMultiplyAdd(alpha, Ax, y);
|
||||||
}
|
}
|
||||||
|
|
||||||
void JacobianFactor::multiplyHessianAdd(double alpha, const double* x,
|
|
||||||
double* y, std::vector<size_t> keys) const {
|
|
||||||
|
|
||||||
// Use eigen magic to access raw memory
|
|
||||||
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> DVector;
|
|
||||||
typedef Eigen::Map<DVector> DMap;
|
|
||||||
typedef Eigen::Map<const DVector> ConstDMap;
|
|
||||||
|
|
||||||
if (empty())
|
|
||||||
return;
|
|
||||||
Vector Ax = zero(Ab_.rows());
|
|
||||||
|
|
||||||
// Just iterate over all A matrices and multiply in correct config part
|
|
||||||
for (size_t pos = 0; pos < size(); ++pos)
|
|
||||||
Ax += Ab_(pos)
|
|
||||||
* ConstDMap(x + keys[keys_[pos]],
|
|
||||||
keys[keys_[pos] + 1] - keys[keys_[pos]]);
|
|
||||||
|
|
||||||
// Deal with noise properly, need to Double* whiten as we are dividing by variance
|
|
||||||
if (model_) {
|
|
||||||
model_->whitenInPlace(Ax);
|
|
||||||
model_->whitenInPlace(Ax);
|
|
||||||
}
|
|
||||||
|
|
||||||
// multiply with alpha
|
|
||||||
Ax *= alpha;
|
|
||||||
|
|
||||||
// Again iterate over all A matrices and insert Ai^e into y
|
|
||||||
for (size_t pos = 0; pos < size(); ++pos)
|
|
||||||
DMap(y + keys[keys_[pos]], keys[keys_[pos] + 1] - keys[keys_[pos]]) += Ab_(
|
|
||||||
pos).transpose() * Ax;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
VectorValues JacobianFactor::gradientAtZero() const {
|
VectorValues JacobianFactor::gradientAtZero() const {
|
||||||
VectorValues g;
|
VectorValues g;
|
||||||
|
@ -574,8 +527,9 @@ VectorValues JacobianFactor::gradientAtZero() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
// Raw memory access version should be called in Regular Factors only currently
|
||||||
void JacobianFactor::gradientAtZero(double* d) const {
|
void JacobianFactor::gradientAtZero(double* d) const {
|
||||||
throw std::runtime_error("Raw memory version of gradientAtZero not implemented for Jacobian factor");
|
throw std::runtime_error("JacobianFactor::gradientAtZero raw memory access is allowed for Regular Factors only");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
|
@ -283,10 +283,6 @@ namespace gtsam {
|
||||||
/** y += alpha * A'*A*x */
|
/** y += alpha * A'*A*x */
|
||||||
void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const;
|
void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const;
|
||||||
|
|
||||||
void multiplyHessianAdd(double alpha, const double* x, double* y, std::vector<size_t> keys) const;
|
|
||||||
|
|
||||||
void multiplyHessianAdd(double alpha, const double* x, double* y) const {};
|
|
||||||
|
|
||||||
/// A'*b for Jacobian
|
/// A'*b for Jacobian
|
||||||
VectorValues gradientAtZero() const;
|
VectorValues gradientAtZero() const;
|
||||||
|
|
||||||
|
|
|
@ -126,7 +126,9 @@ void BlockJacobiPreconditioner::transposeSolve(const Vector& y, Vector& x) const
|
||||||
void BlockJacobiPreconditioner::build(
|
void BlockJacobiPreconditioner::build(
|
||||||
const GaussianFactorGraph &gfg, const KeyInfo &keyInfo, const std::map<Key,Vector> &lambda)
|
const GaussianFactorGraph &gfg, const KeyInfo &keyInfo, const std::map<Key,Vector> &lambda)
|
||||||
{
|
{
|
||||||
|
// n is the number of keys
|
||||||
const size_t n = keyInfo.size();
|
const size_t n = keyInfo.size();
|
||||||
|
// dims_ is a vector that contains the dimension of keys
|
||||||
dims_ = keyInfo.colSpec();
|
dims_ = keyInfo.colSpec();
|
||||||
|
|
||||||
/* prepare the buffer of block diagonals */
|
/* prepare the buffer of block diagonals */
|
||||||
|
|
|
@ -315,30 +315,6 @@ TEST( GaussianFactorGraph, multiplyHessianAdd2 )
|
||||||
EXPECT(assert_equal(2*expected, actual));
|
EXPECT(assert_equal(2*expected, actual));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
|
||||||
TEST( GaussianFactorGraph, multiplyHessianAdd3 )
|
|
||||||
{
|
|
||||||
GaussianFactorGraph gfg = createGaussianFactorGraphWithHessianFactor();
|
|
||||||
|
|
||||||
// brute force
|
|
||||||
Matrix AtA; Vector eta; boost::tie(AtA,eta) = gfg.hessian();
|
|
||||||
Vector X(6); X<<1,2,3,4,5,6;
|
|
||||||
Vector Y(6); Y<<-450, -450, 300, 400, 2950, 3450;
|
|
||||||
EXPECT(assert_equal(Y,AtA*X));
|
|
||||||
|
|
||||||
double* x = &X[0];
|
|
||||||
|
|
||||||
Vector fast_y = gtsam::zero(6);
|
|
||||||
gfg.multiplyHessianAdd(1.0, x, fast_y.data());
|
|
||||||
EXPECT(assert_equal(Y, fast_y));
|
|
||||||
|
|
||||||
// now, do it with non-zero y
|
|
||||||
gfg.multiplyHessianAdd(1.0, x, fast_y.data());
|
|
||||||
EXPECT(assert_equal(2*Y, fast_y));
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST( GaussianFactorGraph, matricesMixed )
|
TEST( GaussianFactorGraph, matricesMixed )
|
||||||
{
|
{
|
||||||
|
@ -351,7 +327,6 @@ TEST( GaussianFactorGraph, matricesMixed )
|
||||||
EXPECT(assert_equal(A.transpose()*b, eta));
|
EXPECT(assert_equal(A.transpose()*b, eta));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST( GaussianFactorGraph, gradientAtZero )
|
TEST( GaussianFactorGraph, gradientAtZero )
|
||||||
{
|
{
|
||||||
|
|
|
@ -51,18 +51,12 @@ public:
|
||||||
HessianFactor(keys, augmentedInformation) {
|
HessianFactor(keys, augmentedInformation) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** y += alpha * A'*A*x */
|
|
||||||
void multiplyHessianAdd(double alpha, const VectorValues& x,
|
|
||||||
VectorValues& y) const {
|
|
||||||
HessianFactor::multiplyHessianAdd(alpha, x, y);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scratch space for multiplyHessianAdd
|
// Scratch space for multiplyHessianAdd
|
||||||
typedef Eigen::Matrix<double, D, 1> DVector;
|
typedef Eigen::Matrix<double, D, 1> DVector;
|
||||||
mutable std::vector<DVector> y;
|
mutable std::vector<DVector> y;
|
||||||
|
|
||||||
void multiplyHessianAdd(double alpha, const double* x,
|
/** y += alpha * A'*A*x */
|
||||||
double* yvalues) const {
|
void multiplyHessianAdd(double alpha, const double* x, double* yvalues) const {
|
||||||
// Create a vector of temporary y values, corresponding to rows i
|
// Create a vector of temporary y values, corresponding to rows i
|
||||||
y.resize(size());
|
y.resize(size());
|
||||||
BOOST_FOREACH(DVector & yi, y)
|
BOOST_FOREACH(DVector & yi, y)
|
||||||
|
@ -95,6 +89,83 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void multiplyHessianAdd(double alpha, const double* x, double* yvalues,
|
||||||
|
std::vector<size_t> offsets) const {
|
||||||
|
|
||||||
|
// Use eigen magic to access raw memory
|
||||||
|
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> DVector;
|
||||||
|
typedef Eigen::Map<DVector> DMap;
|
||||||
|
typedef Eigen::Map<const DVector> ConstDMap;
|
||||||
|
|
||||||
|
// Create a vector of temporary y values, corresponding to rows i
|
||||||
|
std::vector<Vector> y;
|
||||||
|
y.reserve(size());
|
||||||
|
for (const_iterator it = begin(); it != end(); it++)
|
||||||
|
y.push_back(zero(getDim(it)));
|
||||||
|
|
||||||
|
// Accessing the VectorValues one by one is expensive
|
||||||
|
// So we will loop over columns to access x only once per column
|
||||||
|
// And fill the above temporary y values, to be added into yvalues after
|
||||||
|
for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
|
||||||
|
DenseIndex i = 0;
|
||||||
|
for (; i < j; ++i)
|
||||||
|
y[i] += info_(i, j).knownOffDiagonal()
|
||||||
|
* ConstDMap(x + offsets[keys_[j]],
|
||||||
|
offsets[keys_[j] + 1] - offsets[keys_[j]]);
|
||||||
|
// blocks on the diagonal are only half
|
||||||
|
y[i] += info_(j, j).selfadjointView()
|
||||||
|
* ConstDMap(x + offsets[keys_[j]],
|
||||||
|
offsets[keys_[j] + 1] - offsets[keys_[j]]);
|
||||||
|
// for below diagonal, we take transpose block from upper triangular part
|
||||||
|
for (i = j + 1; i < (DenseIndex) size(); ++i)
|
||||||
|
y[i] += info_(i, j).knownOffDiagonal()
|
||||||
|
* ConstDMap(x + offsets[keys_[j]],
|
||||||
|
offsets[keys_[j] + 1] - offsets[keys_[j]]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy to yvalues
|
||||||
|
for (DenseIndex i = 0; i < (DenseIndex) size(); ++i)
|
||||||
|
DMap(yvalues + offsets[keys_[i]], offsets[keys_[i] + 1] - offsets[keys_[i]]) +=
|
||||||
|
alpha * y[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Return the diagonal of the Hessian for this factor (raw memory version) */
|
||||||
|
virtual void hessianDiagonal(double* d) const {
|
||||||
|
|
||||||
|
// Use eigen magic to access raw memory
|
||||||
|
//typedef Eigen::Matrix<double, 9, 1> DVector;
|
||||||
|
typedef Eigen::Matrix<double, D, 1> DVector;
|
||||||
|
typedef Eigen::Map<DVector> DMap;
|
||||||
|
|
||||||
|
// Loop over all variables in the factor
|
||||||
|
for (DenseIndex pos = 0; pos < (DenseIndex)size(); ++pos) {
|
||||||
|
Key j = keys_[pos];
|
||||||
|
// Get the diagonal block, and insert its diagonal
|
||||||
|
const Matrix& B = info_(pos, pos).selfadjointView();
|
||||||
|
//DMap(d + 9 * j) += B.diagonal();
|
||||||
|
DMap(d + D * j) += B.diagonal();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
// TODO: currently assumes all variables of the same size 9 and keys arranged from 0 to n
|
||||||
|
virtual void gradientAtZero(double* d) const {
|
||||||
|
|
||||||
|
// Use eigen magic to access raw memory
|
||||||
|
//typedef Eigen::Matrix<double, 9, 1> DVector;
|
||||||
|
typedef Eigen::Matrix<double, D, 1> DVector;
|
||||||
|
typedef Eigen::Map<DVector> DMap;
|
||||||
|
|
||||||
|
// Loop over all variables in the factor
|
||||||
|
for (DenseIndex pos = 0; pos < (DenseIndex)size(); ++pos) {
|
||||||
|
Key j = keys_[pos];
|
||||||
|
// Get the diagonal block, and insert its diagonal
|
||||||
|
VectorD dj = -info_(pos,size()).knownOffDiagonal();
|
||||||
|
//DMap(d + 9 * j) += dj;
|
||||||
|
DMap(d + D * j) += dj;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -159,7 +159,7 @@ public:
|
||||||
* @brief add the contribution of this factor to the diagonal of the hessian
|
* @brief add the contribution of this factor to the diagonal of the hessian
|
||||||
* d(output) = d(input) + deltaHessianFactor
|
* d(output) = d(input) + deltaHessianFactor
|
||||||
*/
|
*/
|
||||||
void hessianDiagonal(double* d) const {
|
virtual void hessianDiagonal(double* d) const {
|
||||||
// diag(Hessian) = diag(F' * (I - E * PointCov * E') * F);
|
// diag(Hessian) = diag(F' * (I - E * PointCov * E') * F);
|
||||||
// Use eigen magic to access raw memory
|
// Use eigen magic to access raw memory
|
||||||
typedef Eigen::Matrix<double, D, 1> DVector;
|
typedef Eigen::Matrix<double, D, 1> DVector;
|
||||||
|
@ -434,7 +434,7 @@ public:
|
||||||
/**
|
/**
|
||||||
* Calculate gradient, which is -F'Q*b, see paper - RAW MEMORY ACCESS
|
* Calculate gradient, which is -F'Q*b, see paper - RAW MEMORY ACCESS
|
||||||
*/
|
*/
|
||||||
void gradientAtZero(double* d) const {
|
virtual void gradientAtZero(double* d) const {
|
||||||
|
|
||||||
// Use eigen magic to access raw memory
|
// Use eigen magic to access raw memory
|
||||||
typedef Eigen::Matrix<double, D, 1> DVector;
|
typedef Eigen::Matrix<double, D, 1> DVector;
|
||||||
|
|
|
@ -0,0 +1,174 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file RegularJacobianFactor.h
|
||||||
|
* @brief JacobianFactor class with fixed sized blcoks
|
||||||
|
* @author Sungtae An
|
||||||
|
* @date Nov 11, 2014
|
||||||
|
*/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <gtsam/linear/JacobianFactor.h>
|
||||||
|
#include <gtsam/linear/VectorValues.h>
|
||||||
|
#include <boost/foreach.hpp>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
namespace gtsam {
|
||||||
|
|
||||||
|
template<size_t D>
|
||||||
|
class RegularJacobianFactor: public JacobianFactor {
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
/** Use eigen magic to access raw memory */
|
||||||
|
typedef Eigen::Matrix<double, D, 1> DVector;
|
||||||
|
typedef Eigen::Map<DVector> DMap;
|
||||||
|
typedef Eigen::Map<const DVector> ConstDMap;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
/** Construct an n-ary factor
|
||||||
|
* @tparam TERMS A container whose value type is std::pair<Key, Matrix>, specifying the
|
||||||
|
* collection of keys and matrices making up the factor. */
|
||||||
|
template<typename TERMS>
|
||||||
|
RegularJacobianFactor(const TERMS& terms, const Vector& b,
|
||||||
|
const SharedDiagonal& model = SharedDiagonal()) :
|
||||||
|
JacobianFactor(terms, b, model) {
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Constructor with arbitrary number keys, and where the augmented matrix is given all together
|
||||||
|
* instead of in block terms. Note that only the active view of the provided augmented matrix
|
||||||
|
* is used, and that the matrix data is copied into a newly-allocated matrix in the constructed
|
||||||
|
* factor. */
|
||||||
|
template<typename KEYS>
|
||||||
|
RegularJacobianFactor(const KEYS& keys,
|
||||||
|
const VerticalBlockMatrix& augmentedMatrix,
|
||||||
|
const SharedDiagonal& sigmas = SharedDiagonal()) :
|
||||||
|
JacobianFactor(keys, augmentedMatrix, sigmas) {
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Raw memory access version of multiplyHessianAdd y += alpha * A'*A*x
|
||||||
|
* Note: this is not assuming a fixed dimension for the variables,
|
||||||
|
* but requires the vector accumulatedDims to tell the dimension of
|
||||||
|
* each variable: e.g.: x0 has dim 3, x2 has dim 6, x3 has dim 2,
|
||||||
|
* then accumulatedDims is [0 3 9 11 13]
|
||||||
|
* NOTE: size of accumulatedDims is size of keys + 1!! */
|
||||||
|
void multiplyHessianAdd(double alpha, const double* x, double* y,
|
||||||
|
const std::vector<size_t>& accumulatedDims) const {
|
||||||
|
|
||||||
|
/// Use eigen magic to access raw memory
|
||||||
|
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> VectorD;
|
||||||
|
typedef Eigen::Map<VectorD> MapD;
|
||||||
|
typedef Eigen::Map<const VectorD> ConstMapD;
|
||||||
|
|
||||||
|
if (empty())
|
||||||
|
return;
|
||||||
|
Vector Ax = zero(Ab_.rows());
|
||||||
|
|
||||||
|
/// Just iterate over all A matrices and multiply in correct config part (looping over keys)
|
||||||
|
/// E.g.: Jacobian A = [A0 A1 A2] multiplies x = [x0 x1 x2]'
|
||||||
|
/// Hence: Ax = A0 x0 + A1 x1 + A2 x2 (hence we loop over the keys and accumulate)
|
||||||
|
for (size_t pos = 0; pos < size(); ++pos)
|
||||||
|
{
|
||||||
|
Ax += Ab_(pos)
|
||||||
|
* ConstMapD(x + accumulatedDims[keys_[pos]], accumulatedDims[keys_[pos] + 1] - accumulatedDims[keys_[pos]]);
|
||||||
|
}
|
||||||
|
/// Deal with noise properly, need to Double* whiten as we are dividing by variance
|
||||||
|
if (model_) {
|
||||||
|
model_->whitenInPlace(Ax);
|
||||||
|
model_->whitenInPlace(Ax);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// multiply with alpha
|
||||||
|
Ax *= alpha;
|
||||||
|
|
||||||
|
/// Again iterate over all A matrices and insert Ai^T into y
|
||||||
|
for (size_t pos = 0; pos < size(); ++pos){
|
||||||
|
MapD(y + accumulatedDims[keys_[pos]], accumulatedDims[keys_[pos] + 1] - accumulatedDims[keys_[pos]]) += Ab_(
|
||||||
|
pos).transpose() * Ax;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Raw memory access version of multiplyHessianAdd */
|
||||||
|
void multiplyHessianAdd(double alpha, const double* x, double* y) const {
|
||||||
|
|
||||||
|
if (empty()) return;
|
||||||
|
Vector Ax = zero(Ab_.rows());
|
||||||
|
|
||||||
|
// Just iterate over all A matrices and multiply in correct config part
|
||||||
|
for(size_t pos=0; pos<size(); ++pos)
|
||||||
|
Ax += Ab_(pos) * ConstDMap(x + D * keys_[pos]);
|
||||||
|
|
||||||
|
// Deal with noise properly, need to Double* whiten as we are dividing by variance
|
||||||
|
if (model_) { model_->whitenInPlace(Ax); model_->whitenInPlace(Ax); }
|
||||||
|
|
||||||
|
// multiply with alpha
|
||||||
|
Ax *= alpha;
|
||||||
|
|
||||||
|
// Again iterate over all A matrices and insert Ai^e into y
|
||||||
|
for(size_t pos=0; pos<size(); ++pos)
|
||||||
|
DMap(y + D * keys_[pos]) += Ab_(pos).transpose() * Ax;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Raw memory access version of hessianDiagonal
|
||||||
|
* TODO: currently assumes all variables of the same size D (templated) and keys arranged from 0 to n
|
||||||
|
*/
|
||||||
|
virtual void hessianDiagonal(double* d) const {
|
||||||
|
// Loop over all variables in the factor
|
||||||
|
for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
|
||||||
|
// Get the diagonal block, and insert its diagonal
|
||||||
|
DVector dj;
|
||||||
|
for (size_t k = 0; k < D; ++k){
|
||||||
|
if (model_){
|
||||||
|
Vector column_k = Ab_(j).col(k);
|
||||||
|
column_k = model_->whiten(column_k);
|
||||||
|
dj(k) = dot(column_k, column_k);
|
||||||
|
}else{
|
||||||
|
dj(k) = Ab_(j).col(k).squaredNorm();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
DMap(d + D * j) += dj;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Raw memory access version of gradientAtZero
|
||||||
|
* TODO: currently assumes all variables of the same size D (templated) and keys arranged from 0 to n
|
||||||
|
*/
|
||||||
|
virtual void gradientAtZero(double* d) const {
|
||||||
|
|
||||||
|
// Get vector b not weighted
|
||||||
|
Vector b = getb();
|
||||||
|
|
||||||
|
// Whitening b
|
||||||
|
if (model_) {
|
||||||
|
b = model_->whiten(b);
|
||||||
|
b = model_->whiten(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Just iterate over all A matrices
|
||||||
|
for (DenseIndex j = 0; j < (DenseIndex)size(); ++j) {
|
||||||
|
DVector dj;
|
||||||
|
// gradient -= A'*b/sigma^2
|
||||||
|
// Computing with each column
|
||||||
|
for (size_t k = 0; k < D; ++k){
|
||||||
|
Vector column_k = Ab_(j).col(k);
|
||||||
|
dj(k) = -1.0*dot(b, column_k);
|
||||||
|
}
|
||||||
|
DMap(d + D * j) += dj;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,100 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file testRegularHessianFactor.cpp
|
||||||
|
* @author Frank Dellaert
|
||||||
|
* @date March 4, 2014
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <gtsam/linear/VectorValues.h>
|
||||||
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
|
||||||
|
//#include <gtsam_unstable/slam/RegularHessianFactor.h>
|
||||||
|
#include <gtsam/slam/RegularHessianFactor.h>
|
||||||
|
|
||||||
|
#include <boost/assign/std/vector.hpp>
|
||||||
|
#include <boost/assign/std/map.hpp>
|
||||||
|
#include <boost/assign/list_of.hpp>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace gtsam;
|
||||||
|
using namespace boost::assign;
|
||||||
|
|
||||||
|
const double tol = 1e-5;
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(RegularHessianFactor, ConstructorNWay)
|
||||||
|
{
|
||||||
|
Matrix G11 = (Matrix(2,2) << 111, 112, 113, 114).finished();
|
||||||
|
Matrix G12 = (Matrix(2,2) << 121, 122, 123, 124).finished();
|
||||||
|
Matrix G13 = (Matrix(2,2) << 131, 132, 133, 134).finished();
|
||||||
|
|
||||||
|
Matrix G22 = (Matrix(2,2) << 221, 222, 222, 224).finished();
|
||||||
|
Matrix G23 = (Matrix(2,2) << 231, 232, 233, 234).finished();
|
||||||
|
|
||||||
|
Matrix G33 = (Matrix(2,2) << 331, 332, 332, 334).finished();
|
||||||
|
|
||||||
|
Vector g1 = (Vector(2) << -7, -9).finished();
|
||||||
|
Vector g2 = (Vector(2) << -9, 1).finished();
|
||||||
|
Vector g3 = (Vector(2) << 2, 3).finished();
|
||||||
|
|
||||||
|
double f = 10;
|
||||||
|
|
||||||
|
std::vector<Key> js;
|
||||||
|
js.push_back(0); js.push_back(1); js.push_back(3);
|
||||||
|
std::vector<Matrix> Gs;
|
||||||
|
Gs.push_back(G11); Gs.push_back(G12); Gs.push_back(G13); Gs.push_back(G22); Gs.push_back(G23); Gs.push_back(G33);
|
||||||
|
std::vector<Vector> gs;
|
||||||
|
gs.push_back(g1); gs.push_back(g2); gs.push_back(g3);
|
||||||
|
RegularHessianFactor<2> factor(js, Gs, gs, f);
|
||||||
|
|
||||||
|
// multiplyHessianAdd:
|
||||||
|
{
|
||||||
|
// brute force
|
||||||
|
Matrix AtA = factor.information();
|
||||||
|
HessianFactor::const_iterator i1 = factor.begin();
|
||||||
|
HessianFactor::const_iterator i2 = i1 + 1;
|
||||||
|
Vector X(6); X << 1,2,3,4,5,6;
|
||||||
|
Vector Y(6); Y << 2633, 2674, 4465, 4501, 5669, 5696;
|
||||||
|
EXPECT(assert_equal(Y,AtA*X));
|
||||||
|
|
||||||
|
VectorValues x = map_list_of<Key, Vector>
|
||||||
|
(0, (Vector(2) << 1,2).finished())
|
||||||
|
(1, (Vector(2) << 3,4).finished())
|
||||||
|
(3, (Vector(2) << 5,6).finished());
|
||||||
|
|
||||||
|
VectorValues expected;
|
||||||
|
expected.insert(0, Y.segment<2>(0));
|
||||||
|
expected.insert(1, Y.segment<2>(2));
|
||||||
|
expected.insert(3, Y.segment<2>(4));
|
||||||
|
|
||||||
|
// RAW ACCESS
|
||||||
|
Vector expected_y(8); expected_y << 2633, 2674, 4465, 4501, 0, 0, 5669, 5696;
|
||||||
|
Vector fast_y = gtsam::zero(8);
|
||||||
|
double xvalues[8] = {1,2,3,4,0,0,5,6};
|
||||||
|
factor.multiplyHessianAdd(1, xvalues, fast_y.data());
|
||||||
|
EXPECT(assert_equal(expected_y, fast_y));
|
||||||
|
|
||||||
|
// now, do it with non-zero y
|
||||||
|
factor.multiplyHessianAdd(1, xvalues, fast_y.data());
|
||||||
|
EXPECT(assert_equal(2*expected_y, fast_y));
|
||||||
|
|
||||||
|
// check some expressions
|
||||||
|
EXPECT(assert_equal(G12,factor.info(i1,i2).knownOffDiagonal()));
|
||||||
|
EXPECT(assert_equal(G22,factor.info(i2,i2).selfadjointView()));
|
||||||
|
EXPECT(assert_equal((Matrix)G12.transpose(),factor.info(i2,i1).knownOffDiagonal()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
|
||||||
|
/* ************************************************************************* */
|
|
@ -0,0 +1,224 @@
|
||||||
|
/**
|
||||||
|
* @file testRegularJacobianFactor.cpp
|
||||||
|
* @brief unit test regular jacobian factors
|
||||||
|
* @author Sungtae An
|
||||||
|
* @date Nov 12, 2014
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <gtsam/base/TestableAssertions.h>
|
||||||
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
|
||||||
|
#include <gtsam/slam/RegularJacobianFactor.h>
|
||||||
|
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||||
|
#include <gtsam/linear/GaussianConditional.h>
|
||||||
|
#include <gtsam/linear/VectorValues.h>
|
||||||
|
|
||||||
|
#include <boost/assign/std/vector.hpp>
|
||||||
|
#include <boost/assign/list_of.hpp>
|
||||||
|
#include <boost/range/iterator_range.hpp>
|
||||||
|
#include <boost/range/adaptor/map.hpp>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace gtsam;
|
||||||
|
using namespace boost::assign;
|
||||||
|
|
||||||
|
static const size_t fixedDim = 3;
|
||||||
|
static const size_t nrKeys = 3;
|
||||||
|
|
||||||
|
// Keys are assumed to be from 0 to n
|
||||||
|
namespace {
|
||||||
|
namespace simple {
|
||||||
|
// Terms we'll use
|
||||||
|
const vector<pair<Key, Matrix> > terms = list_of<pair<Key,Matrix> >
|
||||||
|
(make_pair(0, Matrix3::Identity()))
|
||||||
|
(make_pair(1, 2*Matrix3::Identity()))
|
||||||
|
(make_pair(2, 3*Matrix3::Identity()));
|
||||||
|
|
||||||
|
// RHS and sigmas
|
||||||
|
const Vector b = (Vector(3) << 1., 2., 3.).finished();
|
||||||
|
const SharedDiagonal noise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.5,0.5,0.5).finished());
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace simple2 {
|
||||||
|
// Terms
|
||||||
|
const vector<pair<Key, Matrix> > terms2 = list_of<pair<Key,Matrix> >
|
||||||
|
(make_pair(0, 2*Matrix3::Identity()))
|
||||||
|
(make_pair(1, 4*Matrix3::Identity()))
|
||||||
|
(make_pair(2, 6*Matrix3::Identity()));
|
||||||
|
|
||||||
|
// RHS
|
||||||
|
const Vector b2 = (Vector(3) << 2., 4., 6.).finished();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
// Convert from double* to VectorValues
|
||||||
|
VectorValues double2vv(const double* x,
|
||||||
|
const size_t nrKeys, const size_t dim) {
|
||||||
|
// create map with dimensions
|
||||||
|
std::map<gtsam::Key, size_t> dims;
|
||||||
|
for (size_t i = 0; i < nrKeys; i++)
|
||||||
|
dims.insert(make_pair(i, dim));
|
||||||
|
|
||||||
|
size_t n = nrKeys*dim;
|
||||||
|
Vector xVec(n);
|
||||||
|
for (size_t i = 0; i < n; i++){
|
||||||
|
xVec(i) = x[i];
|
||||||
|
}
|
||||||
|
return VectorValues(xVec, dims);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
void vv2double(const VectorValues& vv, double* y,
|
||||||
|
const size_t nrKeys, const size_t dim) {
|
||||||
|
// create map with dimensions
|
||||||
|
std::map<gtsam::Key, size_t> dims;
|
||||||
|
for (size_t i = 0; i < nrKeys; i++)
|
||||||
|
dims.insert(make_pair(i, dim));
|
||||||
|
|
||||||
|
Vector yvector = vv.vector(dims);
|
||||||
|
size_t n = nrKeys*dim;
|
||||||
|
for (size_t j = 0; j < n; j++)
|
||||||
|
y[j] = yvector[j];
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(RegularJacobianFactor, constructorNway)
|
||||||
|
{
|
||||||
|
using namespace simple;
|
||||||
|
JacobianFactor factor(terms[0].first, terms[0].second,
|
||||||
|
terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, noise);
|
||||||
|
RegularJacobianFactor<fixedDim> regularFactor(terms, b, noise);
|
||||||
|
|
||||||
|
LONGS_EQUAL((long)terms[2].first, (long)regularFactor.keys().back());
|
||||||
|
EXPECT(assert_equal(terms[2].second, regularFactor.getA(regularFactor.end() - 1)));
|
||||||
|
EXPECT(assert_equal(b, factor.getb()));
|
||||||
|
EXPECT(assert_equal(b, regularFactor.getb()));
|
||||||
|
EXPECT(noise == factor.get_model());
|
||||||
|
EXPECT(noise == regularFactor.get_model());
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(RegularJacobianFactor, hessianDiagonal)
|
||||||
|
{
|
||||||
|
using namespace simple;
|
||||||
|
JacobianFactor factor(terms[0].first, terms[0].second,
|
||||||
|
terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, noise);
|
||||||
|
RegularJacobianFactor<fixedDim> regularFactor(terms, b, noise);
|
||||||
|
|
||||||
|
// we compute hessian diagonal from the standard Jacobian
|
||||||
|
VectorValues expectedHessianDiagonal = factor.hessianDiagonal();
|
||||||
|
|
||||||
|
// we compare against the Raw memory access implementation of hessianDiagonal
|
||||||
|
double actualValue[9]={0};
|
||||||
|
regularFactor.hessianDiagonal(actualValue);
|
||||||
|
VectorValues actualHessianDiagonalRaw = double2vv(actualValue,nrKeys,fixedDim);
|
||||||
|
EXPECT(assert_equal(expectedHessianDiagonal, actualHessianDiagonalRaw));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(RegularJacobian, gradientAtZero)
|
||||||
|
{
|
||||||
|
using namespace simple;
|
||||||
|
JacobianFactor factor(terms[0].first, terms[0].second,
|
||||||
|
terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, noise);
|
||||||
|
RegularJacobianFactor<fixedDim> regularFactor(terms, b, noise);
|
||||||
|
|
||||||
|
// we compute gradient at zero from the standard Jacobian
|
||||||
|
VectorValues expectedGradientAtZero = factor.gradientAtZero();
|
||||||
|
|
||||||
|
//EXPECT(assert_equal(expectedGradientAtZero, regularFactor.gradientAtZero()));
|
||||||
|
|
||||||
|
// we compare against the Raw memory access implementation of gradientAtZero
|
||||||
|
double actualValue[9]={0};
|
||||||
|
regularFactor.gradientAtZero(actualValue);
|
||||||
|
VectorValues actualGradientAtZeroRaw = double2vv(actualValue,nrKeys,fixedDim);
|
||||||
|
EXPECT(assert_equal(expectedGradientAtZero, actualGradientAtZeroRaw));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(RegularJacobian, gradientAtZero_multiFactors)
|
||||||
|
{
|
||||||
|
using namespace simple;
|
||||||
|
JacobianFactor factor(terms[0].first, terms[0].second,
|
||||||
|
terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, noise);
|
||||||
|
RegularJacobianFactor<fixedDim> regularFactor(terms, b, noise);
|
||||||
|
|
||||||
|
// we compute gradient at zero from the standard Jacobian
|
||||||
|
VectorValues expectedGradientAtZero = factor.gradientAtZero();
|
||||||
|
|
||||||
|
// we compare against the Raw memory access implementation of gradientAtZero
|
||||||
|
double actualValue[9]={0};
|
||||||
|
regularFactor.gradientAtZero(actualValue);
|
||||||
|
VectorValues actualGradientAtZeroRaw = double2vv(actualValue,nrKeys,fixedDim);
|
||||||
|
EXPECT(assert_equal(expectedGradientAtZero, actualGradientAtZeroRaw));
|
||||||
|
|
||||||
|
// One more factor
|
||||||
|
using namespace simple2;
|
||||||
|
JacobianFactor factor2(terms2[0].first, terms2[0].second,
|
||||||
|
terms2[1].first, terms2[1].second, terms2[2].first, terms2[2].second, b2, noise);
|
||||||
|
RegularJacobianFactor<fixedDim> regularFactor2(terms2, b2, noise);
|
||||||
|
|
||||||
|
// we accumulate computed gradient at zero from the standard Jacobian
|
||||||
|
VectorValues expectedGradientAtZero2 = expectedGradientAtZero.add(factor2.gradientAtZero());
|
||||||
|
|
||||||
|
// we compare against the Raw memory access implementation of gradientAtZero
|
||||||
|
regularFactor2.gradientAtZero(actualValue);
|
||||||
|
VectorValues actualGradientAtZeroRaw2 = double2vv(actualValue,nrKeys,fixedDim);
|
||||||
|
EXPECT(assert_equal(expectedGradientAtZero2, actualGradientAtZeroRaw2));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(RegularJacobian, multiplyHessianAdd)
|
||||||
|
{
|
||||||
|
using namespace simple;
|
||||||
|
JacobianFactor factor(terms[0].first, terms[0].second,
|
||||||
|
terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, noise);
|
||||||
|
RegularJacobianFactor<fixedDim> regularFactor(terms, b, noise);
|
||||||
|
|
||||||
|
// arbitrary vector X
|
||||||
|
VectorValues X;
|
||||||
|
X.insert(0, (Vector(3) << 10.,20.,30.).finished());
|
||||||
|
X.insert(1, (Vector(3) << 10.,20.,30.).finished());
|
||||||
|
X.insert(2, (Vector(3) << 10.,20.,30.).finished());
|
||||||
|
|
||||||
|
// arbitrary vector Y
|
||||||
|
VectorValues Y;
|
||||||
|
Y.insert(0, (Vector(3) << 10.,10.,10.).finished());
|
||||||
|
Y.insert(1, (Vector(3) << 20.,20.,20.).finished());
|
||||||
|
Y.insert(2, (Vector(3) << 30.,30.,30.).finished());
|
||||||
|
|
||||||
|
// multiplyHessianAdd Y += alpha*A'A*X
|
||||||
|
double alpha = 2.0;
|
||||||
|
VectorValues expectedMHA = Y;
|
||||||
|
factor.multiplyHessianAdd(alpha, X, expectedMHA);
|
||||||
|
|
||||||
|
// create data for raw memory access
|
||||||
|
double XRaw[9];
|
||||||
|
vv2double(X, XRaw, nrKeys, fixedDim);
|
||||||
|
|
||||||
|
// test 1st version: multiplyHessianAdd(double alpha, const double* x, double* y)
|
||||||
|
double actualMHARaw[9];
|
||||||
|
vv2double(Y, actualMHARaw, nrKeys, fixedDim);
|
||||||
|
regularFactor.multiplyHessianAdd(alpha, XRaw, actualMHARaw);
|
||||||
|
VectorValues actualMHARawVV = double2vv(actualMHARaw,nrKeys,fixedDim);
|
||||||
|
EXPECT(assert_equal(expectedMHA,actualMHARawVV));
|
||||||
|
|
||||||
|
// test 2nd version: multiplyHessianAdd(double alpha, const double* x, double* y, std::vector<size_t> keys)
|
||||||
|
double actualMHARaw2[9];
|
||||||
|
vv2double(Y, actualMHARaw2, nrKeys, fixedDim);
|
||||||
|
vector<size_t> dims;
|
||||||
|
size_t accumulatedDim = 0;
|
||||||
|
for (size_t i = 0; i < nrKeys+1; i++){
|
||||||
|
dims.push_back(accumulatedDim);
|
||||||
|
accumulatedDim += fixedDim;
|
||||||
|
}
|
||||||
|
regularFactor.multiplyHessianAdd(alpha, XRaw, actualMHARaw2, dims);
|
||||||
|
VectorValues actualMHARawVV2 = double2vv(actualMHARaw2,nrKeys,fixedDim);
|
||||||
|
EXPECT(assert_equal(expectedMHA,actualMHARawVV2));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
|
||||||
|
/* ************************************************************************* */
|
|
@ -87,19 +87,19 @@ TEST(PCGSolver, simpleLinearSystem) {
|
||||||
simpleGFG += JacobianFactor(0, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2);
|
simpleGFG += JacobianFactor(0, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2);
|
||||||
simpleGFG += JacobianFactor(1, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2);
|
simpleGFG += JacobianFactor(1, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2);
|
||||||
simpleGFG += JacobianFactor(2, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2);
|
simpleGFG += JacobianFactor(2, (Matrix(2,2)<< 1, 0, 0, 1).finished(), (Vector(2) << 0, 0).finished(), unit2);
|
||||||
simpleGFG.print("system");
|
//simpleGFG.print("system");
|
||||||
|
|
||||||
// Expected solution
|
// Expected solution
|
||||||
VectorValues expectedSolution;
|
VectorValues expectedSolution;
|
||||||
expectedSolution.insert(0, (Vector(2) << 0.100498, -0.196756).finished());
|
expectedSolution.insert(0, (Vector(2) << 0.100498, -0.196756).finished());
|
||||||
expectedSolution.insert(2, (Vector(2) << -0.0990413, -0.0980577).finished());
|
expectedSolution.insert(2, (Vector(2) << -0.0990413, -0.0980577).finished());
|
||||||
expectedSolution.insert(1, (Vector(2) << -0.0973252, 0.100582).finished());
|
expectedSolution.insert(1, (Vector(2) << -0.0973252, 0.100582).finished());
|
||||||
expectedSolution.print("Expected");
|
//expectedSolution.print("Expected");
|
||||||
|
|
||||||
// Solve the system using direct method
|
// Solve the system using direct method
|
||||||
VectorValues deltaDirect = simpleGFG.optimize();
|
VectorValues deltaDirect = simpleGFG.optimize();
|
||||||
EXPECT(assert_equal(expectedSolution, deltaDirect, 1e-5));
|
EXPECT(assert_equal(expectedSolution, deltaDirect, 1e-5));
|
||||||
deltaDirect.print("Direct");
|
//deltaDirect.print("Direct");
|
||||||
|
|
||||||
// Solve the system using Preconditioned Conjugate Gradient solver
|
// Solve the system using Preconditioned Conjugate Gradient solver
|
||||||
// Common PCG parameters
|
// Common PCG parameters
|
||||||
|
@ -107,19 +107,19 @@ TEST(PCGSolver, simpleLinearSystem) {
|
||||||
pcg->setMaxIterations(500);
|
pcg->setMaxIterations(500);
|
||||||
pcg->setEpsilon_abs(0.0);
|
pcg->setEpsilon_abs(0.0);
|
||||||
pcg->setEpsilon_rel(0.0);
|
pcg->setEpsilon_rel(0.0);
|
||||||
pcg->setVerbosity("ERROR");
|
//pcg->setVerbosity("ERROR");
|
||||||
|
|
||||||
// With Dummy preconditioner
|
// With Dummy preconditioner
|
||||||
pcg->preconditioner_ = boost::make_shared<gtsam::DummyPreconditionerParameters>();
|
pcg->preconditioner_ = boost::make_shared<gtsam::DummyPreconditionerParameters>();
|
||||||
VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG);
|
VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG);
|
||||||
EXPECT(assert_equal(expectedSolution, deltaPCGDummy, 1e-5));
|
EXPECT(assert_equal(expectedSolution, deltaPCGDummy, 1e-5));
|
||||||
deltaPCGDummy.print("PCG Dummy");
|
//deltaPCGDummy.print("PCG Dummy");
|
||||||
|
|
||||||
// With Block-Jacobi preconditioner
|
// With Block-Jacobi preconditioner
|
||||||
pcg->preconditioner_ = boost::make_shared<gtsam::BlockJacobiPreconditionerParameters>();
|
pcg->preconditioner_ = boost::make_shared<gtsam::BlockJacobiPreconditionerParameters>();
|
||||||
VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG);
|
VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG);
|
||||||
EXPECT(assert_equal(expectedSolution, deltaPCGJacobi, 1e-5));
|
EXPECT(assert_equal(expectedSolution, deltaPCGJacobi, 1e-5));
|
||||||
deltaPCGJacobi.print("PCG Jacobi");
|
//deltaPCGJacobi.print("PCG Jacobi");
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue