fixed unit test with Luca
parent
03ebcb6185
commit
c6faa784e2
|
|
@ -56,8 +56,13 @@ public:
|
|||
JacobianFactor::multiplyHessianAdd(alpha, x, y);
|
||||
}
|
||||
|
||||
// Note: this is not assuming a fixed dimension for the variables,
|
||||
// but requires the vector accumulatedDims to tell the dimension of
|
||||
// each variable: e.g.: x0 has dim 3, x2 has dim 6, x3 has dim 2,
|
||||
// then accumulatedDims is [0 3 9 11 13]
|
||||
// NOTE: size of accumulatedDims is size of keys + 1!!
|
||||
void multiplyHessianAdd(double alpha, const double* x, double* y,
|
||||
std::vector<size_t> keys) const {
|
||||
const std::vector<size_t>& accumulatedDims) const {
|
||||
|
||||
// Use eigen magic to access raw memory
|
||||
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> DVector;
|
||||
|
|
@ -68,13 +73,13 @@ public:
|
|||
return;
|
||||
Vector Ax = zero(Ab_.rows());
|
||||
|
||||
// Just iterate over all A matrices and multiply in correct config part
|
||||
// Just iterate over all A matrices and multiply in correct config part (looping over keys)
|
||||
// E.g.: Jacobian A = [A0 A1 A2] multiplies x = [x0 x1 x2]'
|
||||
// Hence: Ax = A0 x0 + A1 x1 + A2 x2 (hence we loop over the keys and accumulate)
|
||||
for (size_t pos = 0; pos < size(); ++pos)
|
||||
{
|
||||
std::cout << "pos: " << pos << " keys_[pos]: " << keys_[pos] << " keys[keys_[pos]]: " << keys[keys_[pos]] << std::endl;
|
||||
Ax += Ab_(pos)
|
||||
* ConstDMap(x + keys[keys_[pos]],
|
||||
keys[keys_[pos] + 1] - keys[keys_[pos]]);
|
||||
* ConstDMap(x + accumulatedDims[keys_[pos]], accumulatedDims[keys_[pos] + 1] - accumulatedDims[keys_[pos]]);
|
||||
}
|
||||
// Deal with noise properly, need to Double* whiten as we are dividing by variance
|
||||
if (model_) {
|
||||
|
|
@ -85,10 +90,11 @@ public:
|
|||
// multiply with alpha
|
||||
Ax *= alpha;
|
||||
|
||||
// Again iterate over all A matrices and insert Ai^e into y
|
||||
for (size_t pos = 0; pos < size(); ++pos)
|
||||
DMap(y + keys[keys_[pos]], keys[keys_[pos] + 1] - keys[keys_[pos]]) += Ab_(
|
||||
// Again iterate over all A matrices and insert Ai^T into y
|
||||
for (size_t pos = 0; pos < size(); ++pos){
|
||||
DMap(y + accumulatedDims[keys_[pos]], accumulatedDims[keys_[pos] + 1] - accumulatedDims[keys_[pos]]) += Ab_(
|
||||
pos).transpose() * Ax;
|
||||
}
|
||||
}
|
||||
|
||||
void multiplyHessianAdd(double alpha, const double* x, double* y) const {
|
||||
|
|
@ -123,10 +129,9 @@ public:
|
|||
|
||||
/// Raw memory access version of hessianDiagonal
|
||||
/* ************************************************************************* */
|
||||
// TODO: currently assumes all variables of the same size 9 and keys arranged from 0 to n
|
||||
// TODO: currently assumes all variables of the same size D (templated) and keys arranged from 0 to n
|
||||
void hessianDiagonal(double* d) const {
|
||||
// Use eigen magic to access raw memory
|
||||
//typedef Eigen::Matrix<double, 9, 1> DVector;
|
||||
typedef Eigen::Matrix<double, D, 1> DVector;
|
||||
typedef Eigen::Map<DVector> DMap;
|
||||
|
||||
|
|
@ -134,11 +139,15 @@ public:
|
|||
for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
|
||||
// Get the diagonal block, and insert its diagonal
|
||||
DVector dj;
|
||||
//for (size_t k = 0; k < 9; ++k)
|
||||
for (size_t k = 0; k < D; ++k)
|
||||
dj(k) = Ab_(j).col(k).squaredNorm();
|
||||
|
||||
//DMap(d + 9 * j) += dj;
|
||||
for (size_t k = 0; k < D; ++k){
|
||||
if (model_){
|
||||
Vector column_k = Ab_(j).col(k);
|
||||
column_k = model_->whiten(column_k);
|
||||
dj(k) = dot(column_k, column_k);
|
||||
}else{
|
||||
dj(k) = Ab_(j).col(k).squaredNorm();
|
||||
}
|
||||
}
|
||||
DMap(d + D * j) += dj;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,103 +22,161 @@ using namespace std;
|
|||
using namespace gtsam;
|
||||
using namespace boost::assign;
|
||||
|
||||
static const size_t fixedDim = 3;
|
||||
static const size_t nrKeys = 3;
|
||||
|
||||
// Keys are assumed to be from 0 to n
|
||||
namespace {
|
||||
namespace simple {
|
||||
// Terms we'll use
|
||||
const vector<pair<Key, Matrix> > terms = list_of<pair<Key,Matrix> >
|
||||
(make_pair(5, Matrix3::Identity()))
|
||||
(make_pair(10, 2*Matrix3::Identity()))
|
||||
(make_pair(15, 3*Matrix3::Identity()));
|
||||
(make_pair(0, Matrix3::Identity()))
|
||||
(make_pair(1, 2*Matrix3::Identity()))
|
||||
(make_pair(2, 3*Matrix3::Identity()));
|
||||
|
||||
// RHS and sigmas
|
||||
const Vector b = (Vector(3) << 1., 2., 3.);
|
||||
const SharedDiagonal noise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.5, 0.5, 0.5));
|
||||
const SharedDiagonal noise = noiseModel::Diagonal::Sigmas((Vector(3) << 0.5,0.5,0.5));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Convert from double* to VectorValues
|
||||
VectorValues double2vv(const double* x,
|
||||
const size_t nrKeys, const size_t dim) {
|
||||
// create map with dimensions
|
||||
std::map<gtsam::Key, size_t> dims;
|
||||
for (size_t i = 0; i < nrKeys; i++)
|
||||
dims.insert(make_pair(i, dim));
|
||||
|
||||
size_t n = nrKeys*dim;
|
||||
Vector xVec(n);
|
||||
for (size_t i = 0; i < n; i++){
|
||||
xVec(i) = x[i];
|
||||
}
|
||||
return VectorValues(xVec, dims);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void vv2double(const VectorValues& vv, double* y,
|
||||
const size_t nrKeys, const size_t dim) {
|
||||
// create map with dimensions
|
||||
std::map<gtsam::Key, size_t> dims;
|
||||
for (size_t i = 0; i < nrKeys; i++)
|
||||
dims.insert(make_pair(i, dim));
|
||||
|
||||
Vector yvector = vv.vector(dims);
|
||||
size_t n = nrKeys*dim;
|
||||
for (size_t j = 0; j < n; j++)
|
||||
y[j] = yvector[j];
|
||||
}
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(RegularJacobianFactor, constructorNway)
|
||||
{
|
||||
using namespace simple;
|
||||
JacobianFactor expected(terms[0].first, terms[0].second,
|
||||
JacobianFactor factor(terms[0].first, terms[0].second,
|
||||
terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, noise);
|
||||
RegularJacobianFactor<3> actual(terms, b, noise);
|
||||
RegularJacobianFactor<fixedDim> regularFactor(terms, b, noise);
|
||||
|
||||
LONGS_EQUAL((long)terms[2].first, (long)actual.keys().back());
|
||||
EXPECT(assert_equal(terms[2].second, actual.getA(actual.end() - 1)));
|
||||
EXPECT(assert_equal(b, expected.getb()));
|
||||
EXPECT(assert_equal(b, actual.getb()));
|
||||
EXPECT(noise == expected.get_model());
|
||||
EXPECT(noise == actual.get_model());
|
||||
LONGS_EQUAL((long)terms[2].first, (long)regularFactor.keys().back());
|
||||
EXPECT(assert_equal(terms[2].second, regularFactor.getA(regularFactor.end() - 1)));
|
||||
EXPECT(assert_equal(b, factor.getb()));
|
||||
EXPECT(assert_equal(b, regularFactor.getb()));
|
||||
EXPECT(noise == factor.get_model());
|
||||
EXPECT(noise == regularFactor.get_model());
|
||||
}
|
||||
|
||||
TEST(RegularJacobianFactor, hessianDiagonal)
|
||||
{
|
||||
using namespace simple;
|
||||
JacobianFactor expected(terms[0].first, terms[0].second,
|
||||
JacobianFactor factor(terms[0].first, terms[0].second,
|
||||
terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, noise);
|
||||
RegularJacobianFactor<3> actual(terms, b, noise);
|
||||
RegularJacobianFactor<fixedDim> regularFactor(terms, b, noise);
|
||||
|
||||
EXPECT(assert_equal(expected.hessianDiagonal(),actual.hessianDiagonal()));
|
||||
expected.hessianDiagonal().print();
|
||||
actual.hessianDiagonal().print();
|
||||
// we compute hessian diagonal from the standard Jacobian
|
||||
VectorValues expectedHessianDiagonal = factor.hessianDiagonal();
|
||||
|
||||
// we compute hessian diagonal from the regular Jacobian, but still using the
|
||||
// implementation of hessianDiagonal in the base class
|
||||
VectorValues actualHessianDiagonal = regularFactor.hessianDiagonal();
|
||||
|
||||
EXPECT(assert_equal(expectedHessianDiagonal,actualHessianDiagonal));
|
||||
|
||||
// we compare against the Raw memory access implementation of hessianDiagonal
|
||||
double actualValue[9];
|
||||
actual.hessianDiagonal(actualValue);
|
||||
for(int i=0; i<9; ++i)
|
||||
std::cout << actualValue[i] << std::endl;
|
||||
|
||||
// Why unwhitened?
|
||||
regularFactor.hessianDiagonal(actualValue);
|
||||
VectorValues actualHessianDiagonalRaw = double2vv(actualValue,nrKeys,fixedDim);
|
||||
EXPECT(assert_equal(expectedHessianDiagonal,actualHessianDiagonalRaw));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(RegularJacobian, gradientAtZero)
|
||||
{
|
||||
using namespace simple;
|
||||
JacobianFactor expected(terms[0].first, terms[0].second,
|
||||
JacobianFactor factor(terms[0].first, terms[0].second,
|
||||
terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, noise);
|
||||
RegularJacobianFactor<3> actual(terms, b, noise);
|
||||
EXPECT(assert_equal(expected.gradientAtZero(),actual.gradientAtZero()));
|
||||
RegularJacobianFactor<fixedDim> regularFactor(terms, b, noise);
|
||||
EXPECT(assert_equal(factor.gradientAtZero(),regularFactor.gradientAtZero()));
|
||||
|
||||
// create and test raw memory access version
|
||||
// raw memory access is not available now
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(RegularJacobian, multiplyHessianAdd)
|
||||
{
|
||||
using namespace simple;
|
||||
RegularJacobianFactor<3> factor(terms, b, noise);
|
||||
JacobianFactor factor(terms[0].first, terms[0].second,
|
||||
terms[1].first, terms[1].second, terms[2].first, terms[2].second, b, noise);
|
||||
RegularJacobianFactor<fixedDim> regularFactor(terms, b, noise);
|
||||
|
||||
// arbitrary vector X
|
||||
VectorValues X;
|
||||
X.insert(5, (Vector(3) << 10.,20.,30.));
|
||||
X.insert(10, (Vector(3) << 10.,20.,30.));
|
||||
X.insert(15, (Vector(3) << 10.,20.,30.));
|
||||
X.insert(0, (Vector(3) << 10.,20.,30.));
|
||||
X.insert(1, (Vector(3) << 10.,20.,30.));
|
||||
X.insert(2, (Vector(3) << 10.,20.,30.));
|
||||
|
||||
// arbitrary vector Y
|
||||
VectorValues Y;
|
||||
Y.insert(5, (Vector(3) << 10.,10.,10.));
|
||||
Y.insert(10, (Vector(3) << 20.,20.,20.));
|
||||
Y.insert(15, (Vector(3) << 30.,30.,30.));
|
||||
Y.insert(0, (Vector(3) << 10.,10.,10.));
|
||||
Y.insert(1, (Vector(3) << 20.,20.,20.));
|
||||
Y.insert(2, (Vector(3) << 30.,30.,30.));
|
||||
|
||||
// muultiplyHessianAdd Y += alpha*A'A*X
|
||||
factor.multiplyHessianAdd(2.0, X, Y);
|
||||
// multiplyHessianAdd Y += alpha*A'A*X
|
||||
double alpha = 2.0;
|
||||
VectorValues expectedMHA = Y;
|
||||
factor.multiplyHessianAdd(alpha, X, expectedMHA);
|
||||
|
||||
VectorValues expectedValues;
|
||||
expectedValues.insert(5, (Vector(3) << 490.,970.,1450.));
|
||||
expectedValues.insert(10, (Vector(3) << 980.,1940.,2900.));
|
||||
expectedValues.insert(15, (Vector(3) << 1470.,2910.,4350.));
|
||||
VectorValues actualMHA = Y;
|
||||
regularFactor.multiplyHessianAdd(alpha, X, actualMHA);
|
||||
|
||||
EXPECT(assert_equal(expectedValues,Y));
|
||||
EXPECT(assert_equal(expectedMHA, actualMHA));
|
||||
|
||||
//double dataX[9] = {10., 20., 30., 10., 20., 30., 10., 20., 30.};
|
||||
//double dataY[9] = {10., 10., 10., 20., 20., 20., 30., 30., 30.};
|
||||
// create data for raw memory access
|
||||
double XRaw[9];
|
||||
vv2double(X, XRaw, nrKeys, fixedDim);
|
||||
|
||||
std::cout << "size: " << factor.size() << std::endl;
|
||||
double dataX[9] = {0.,};
|
||||
double dataY[9] = {0.,};
|
||||
|
||||
std::vector<Key> ks;
|
||||
ks.push_back(5);ks.push_back(10);ks.push_back(15);
|
||||
|
||||
// Raw memory access version
|
||||
factor.multiplyHessianAdd(2.0, dataX, dataY, ks);
|
||||
// test 1st version: multiplyHessianAdd(double alpha, const double* x, double* y)
|
||||
double actualMHARaw[9];
|
||||
vv2double(Y, actualMHARaw, nrKeys, fixedDim);
|
||||
regularFactor.multiplyHessianAdd(alpha, XRaw, actualMHARaw);
|
||||
VectorValues actualMHARawVV = double2vv(actualMHARaw,nrKeys,fixedDim);
|
||||
EXPECT(assert_equal(expectedMHA,actualMHARawVV));
|
||||
|
||||
// test 2nd version: multiplyHessianAdd(double alpha, const double* x, double* y, std::vector<size_t> keys)
|
||||
double actualMHARaw2[9];
|
||||
vv2double(Y, actualMHARaw2, nrKeys, fixedDim);
|
||||
vector<size_t> dims;
|
||||
size_t accumulatedDim = 0;
|
||||
for (size_t i = 0; i < nrKeys+1; i++){
|
||||
dims.push_back(accumulatedDim);
|
||||
accumulatedDim += fixedDim;
|
||||
}
|
||||
regularFactor.multiplyHessianAdd(alpha, XRaw, actualMHARaw2, dims);
|
||||
VectorValues actualMHARawVV2 = double2vv(actualMHARaw2,nrKeys,fixedDim);
|
||||
EXPECT(assert_equal(expectedMHA,actualMHARawVV2));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
Loading…
Reference in New Issue