Fix Matrix_(...) to Mat() <<... in gtsam/linear

release/4.3a0
Jing Dong 2013-11-13 05:27:20 +00:00
parent 763083d2db
commit c08a0ebcae
10 changed files with 135 additions and 133 deletions

View File

@ -38,8 +38,8 @@ using namespace gtsam;
static const Key _x_=0, _y_=1, _z_=2;
static GaussianBayesNet smallBayesNet = list_of
(GaussianConditional(_x_, (Vec(1) << 9.0), Matrix_(1, 1, 1.0), _y_, Matrix_(1, 1, 1.0)))
(GaussianConditional(_y_, (Vec(1) << 5.0), Matrix_(1, 1, 1.0)));
(GaussianConditional(_x_, (Vec(1) << 9.0), (Mat(1, 1) << 1.0), _y_, (Mat(1, 1) << 1.0)))
(GaussianConditional(_y_, (Vec(1) << 5.0), (Mat(1, 1) << 1.0)));
/* ************************************************************************* */
TEST( GaussianBayesNet, matrix )
@ -47,7 +47,7 @@ TEST( GaussianBayesNet, matrix )
Matrix R; Vector d;
boost::tie(R,d) = smallBayesNet.matrix(); // find matrix and RHS
Matrix R1 = Matrix_(2,2,
Matrix R1 = (Mat(2, 2) <<
1.0, 1.0,
0.0, 1.0
);
@ -113,15 +113,15 @@ TEST( GaussianBayesNet, DeterminantTest )
{
GaussianBayesNet cbn;
cbn += GaussianConditional(
0, (Vec(2) << 3.0, 4.0 ), Matrix_(2, 2, 1.0, 3.0, 0.0, 4.0 ),
1, Matrix_(2, 2, 2.0, 1.0, 2.0, 3.0), noiseModel::Isotropic::Sigma(2, 2.0));
0, (Vec(2) << 3.0, 4.0 ), (Mat(2, 2) << 1.0, 3.0, 0.0, 4.0 ),
1, (Mat(2, 2) << 2.0, 1.0, 2.0, 3.0), noiseModel::Isotropic::Sigma(2, 2.0));
cbn += GaussianConditional(
1, (Vec(2) << 5.0, 6.0 ), Matrix_(2, 2, 1.0, 1.0, 0.0, 3.0 ),
2, Matrix_(2, 2, 1.0, 0.0, 5.0, 2.0), noiseModel::Isotropic::Sigma(2, 2.0));
1, (Vec(2) << 5.0, 6.0 ), (Mat(2, 2) << 1.0, 1.0, 0.0, 3.0 ),
2, (Mat(2, 2) << 1.0, 0.0, 5.0, 2.0), noiseModel::Isotropic::Sigma(2, 2.0));
cbn += GaussianConditional(
3, (Vec(2) << 7.0, 8.0 ), Matrix_(2, 2, 1.0, 1.0, 0.0, 5.0 ), noiseModel::Isotropic::Sigma(2, 2.0));
3, (Vec(2) << 7.0, 8.0 ), (Mat(2, 2) << 1.0, 1.0, 0.0, 5.0 ), noiseModel::Isotropic::Sigma(2, 2.0));
double expectedDeterminant = 60.0 / 64.0;
double actualDeterminant = cbn.determinant();
@ -144,21 +144,21 @@ TEST(GaussianBayesNet, ComputeSteepestDescentPoint) {
// Create an arbitrary Bayes Net
GaussianBayesNet gbn;
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
0, (Vec(2) << 1.0,2.0), Matrix_(2,2, 3.0,4.0,0.0,6.0),
3, Matrix_(2,2, 7.0,8.0,9.0,10.0),
4, Matrix_(2,2, 11.0,12.0,13.0,14.0)));
0, (Vec(2) << 1.0,2.0), (Mat(2, 2) << 3.0,4.0,0.0,6.0),
3, (Mat(2, 2) << 7.0,8.0,9.0,10.0),
4, (Mat(2, 2) << 11.0,12.0,13.0,14.0)));
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
1, (Vec(2) << 15.0,16.0), Matrix_(2,2, 17.0,18.0,0.0,20.0),
2, Matrix_(2,2, 21.0,22.0,23.0,24.0),
4, Matrix_(2,2, 25.0,26.0,27.0,28.0)));
1, (Vec(2) << 15.0,16.0), (Mat(2, 2) << 17.0,18.0,0.0,20.0),
2, (Mat(2, 2) << 21.0,22.0,23.0,24.0),
4, (Mat(2, 2) << 25.0,26.0,27.0,28.0)));
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
2, (Vec(2) << 29.0,30.0), Matrix_(2,2, 31.0,32.0,0.0,34.0),
3, Matrix_(2,2, 35.0,36.0,37.0,38.0)));
2, (Vec(2) << 29.0,30.0), (Mat(2, 2) << 31.0,32.0,0.0,34.0),
3, (Mat(2, 2) << 35.0,36.0,37.0,38.0)));
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
3, (Vec(2) << 39.0,40.0), Matrix_(2,2, 41.0,42.0,0.0,44.0),
4, Matrix_(2,2, 45.0,46.0,47.0,48.0)));
3, (Vec(2) << 39.0,40.0), (Mat(2, 2) << 41.0,42.0,0.0,44.0),
4, (Mat(2, 2) << 45.0,46.0,47.0,48.0)));
gbn += GaussianConditional::shared_ptr(new GaussianConditional(
4, (Vec(2) << 49.0,50.0), Matrix_(2,2, 51.0,52.0,0.0,54.0)));
4, (Vec(2) << 49.0,50.0), (Mat(2, 2) << 51.0,52.0,0.0,54.0)));
// Compute the Hessian numerically
Matrix hessian = numericalHessian(

View File

@ -38,10 +38,10 @@ namespace {
const Key x1=1, x2=2, x3=3, x4=4;
const SharedDiagonal chainNoise = noiseModel::Isotropic::Sigma(1, 0.5);
const GaussianFactorGraph chain = list_of
(JacobianFactor(x2, Matrix_(1,1,1.), x1, Matrix_(1,1,1.), (Vec(1) << 1.), chainNoise))
(JacobianFactor(x2, Matrix_(1,1,1.), x3, Matrix_(1,1,1.), (Vec(1) << 1.), chainNoise))
(JacobianFactor(x3, Matrix_(1,1,1.), x4, Matrix_(1,1,1.), (Vec(1) << 1.), chainNoise))
(JacobianFactor(x4, Matrix_(1,1,1.), (Vec(1) << 1.), chainNoise));
(JacobianFactor(x2, (Mat(1, 1) << 1.), x1, (Mat(1, 1) << 1.), (Vec(1) << 1.), chainNoise))
(JacobianFactor(x2, (Mat(1, 1) << 1.), x3, (Mat(1, 1) << 1.), (Vec(1) << 1.), chainNoise))
(JacobianFactor(x3, (Mat(1, 1) << 1.), x4, (Mat(1, 1) << 1.), (Vec(1) << 1.), chainNoise))
(JacobianFactor(x4, (Mat(1, 1) << 1.), (Vec(1) << 1.), chainNoise));
const Ordering chainOrdering = Ordering(list_of(x2)(x1)(x3)(x4));
/* ************************************************************************* */
@ -84,13 +84,13 @@ TEST( GaussianBayesTree, eliminate )
{
GaussianBayesTree bt = *chain.eliminateMultifrontal(chainOrdering);
Matrix two = Matrix_(1,1,2.);
Matrix one = Matrix_(1,1,1.);
Matrix two = (Mat(1, 1) << 2.);
Matrix one = (Mat(1, 1) << 1.);
GaussianBayesTree bayesTree_expected;
bayesTree_expected.insertRoot(
MakeClique(GaussianConditional(pair_list_of (x3, Matrix_(2,1, 2., 0.)) (x4, Matrix_(2,1, 2., 2.)), 2, (Vec(2) << 2., 2.)), list_of
(MakeClique(GaussianConditional(pair_list_of (x2, Matrix_(2,1, -2.*sqrt(2.), 0.)) (x1, Matrix_(2,1, -sqrt(2.), -sqrt(2.))) (x3, Matrix_(2,1, -sqrt(2.), sqrt(2.))), 2, (Vec(2) << -2.*sqrt(2.), 0.))))));
MakeClique(GaussianConditional(pair_list_of (x3, (Mat(2, 1) << 2., 0.)) (x4, (Mat(2, 1) << 2., 2.)), 2, (Vec(2) << 2., 2.)), list_of
(MakeClique(GaussianConditional(pair_list_of (x2, (Mat(2, 1) << -2.*sqrt(2.), 0.)) (x1, (Mat(2, 1) << -sqrt(2.), -sqrt(2.))) (x3, (Mat(2, 1) << -sqrt(2.), sqrt(2.))), 2, (Vec(2) << -2.*sqrt(2.), 0.))))));
EXPECT(assert_equal(bayesTree_expected, bt));
}
@ -191,21 +191,21 @@ TEST(GaussianBayesTree, ComputeSteepestDescentPointBT) {
GaussianBayesTree bt;
bt.insertRoot(MakeClique(GaussianConditional(
pair_list_of
(2, Matrix_(6,2,
(2, (Mat(6, 2) <<
31.0,32.0,
0.0,34.0,
0.0,0.0,
0.0,0.0,
0.0,0.0,
0.0,0.0))
(3, Matrix_(6,2,
(3, (Mat(6, 2) <<
35.0,36.0,
37.0,38.0,
41.0,42.0,
0.0,44.0,
0.0,0.0,
0.0,0.0))
(4, Matrix_(6,2,
(4, (Mat(6, 2) <<
0.0,0.0,
0.0,0.0,
45.0,46.0,
@ -215,27 +215,27 @@ TEST(GaussianBayesTree, ComputeSteepestDescentPointBT) {
3, (Vec(6) << 29.0,30.0,39.0,40.0,49.0,50.0)), list_of
(MakeClique(GaussianConditional(
pair_list_of
(0, Matrix_(4,2,
(0, (Mat(4, 2) <<
3.0,4.0,
0.0,6.0,
0.0,0.0,
0.0,0.0))
(1, Matrix_(4,2,
(1, (Mat(4, 2) <<
0.0,0.0,
0.0,0.0,
17.0,18.0,
0.0,20.0))
(2, Matrix_(4,2,
(2, (Mat(4, 2) <<
0.0,0.0,
0.0,0.0,
21.0,22.0,
23.0,24.0))
(3, Matrix_(4,2,
(3, (Mat(4, 2) <<
7.0,8.0,
9.0,10.0,
0.0,0.0,
0.0,0.0))
(4, Matrix_(4,2,
(4, (Mat(4, 2) <<
11.0,12.0,
13.0,14.0,
25.0,26.0,

View File

@ -39,20 +39,20 @@ using namespace boost::assign;
static const double tol = 1e-5;
static Matrix R = Matrix_(2,2,
static Matrix R = (Mat(2, 2) <<
-12.1244, -5.1962,
0., 4.6904);
/* ************************************************************************* */
TEST(GaussianConditional, constructor)
{
Matrix S1 = Matrix_(2,2,
Matrix S1 = (Mat(2, 2) <<
-5.2786, -8.6603,
5.0254, 5.5432);
Matrix S2 = Matrix_(2,2,
Matrix S2 = (Mat(2, 2) <<
-10.5573, -5.9385,
5.5737, 3.0153);
Matrix S3 = Matrix_(2,2,
Matrix S3 = (Mat(2, 2) <<
-11.3820, -7.2581,
-3.0153, -3.5635);
@ -133,13 +133,13 @@ TEST( GaussianConditional, solve )
expectedX(0) = 20-3-11 ; expectedX(1) = 40-7-15;
// create a conditional Gaussian node
Matrix R = Matrix_(2,2, 1., 0.,
Matrix R = (Mat(2, 2) << 1., 0.,
0., 1.);
Matrix A1 = Matrix_(2,2, 1., 2.,
Matrix A1 = (Mat(2, 2) << 1., 2.,
3., 4.);
Matrix A2 = Matrix_(2,2, 5., 6.,
Matrix A2 = (Mat(2, 2) << 5., 6.,
7., 8.);
Vector d(2); d << 20.0, 40.0;
@ -241,8 +241,8 @@ TEST( GaussianConditional, solveTranspose ) {
* 1 1 9
* 1 5
*/
Matrix R11 = Matrix_(1, 1, 1.0), S12 = Matrix_(1, 1, 1.0);
Matrix R22 = Matrix_(1, 1, 1.0);
Matrix R11 = (Mat(1, 1) << 1.0), S12 = (Mat(1, 1) << 1.0);
Matrix R22 = (Mat(1, 1) << 1.0);
Vector d1(1), d2(1);
d1(0) = 9;
d2(0) = 5;

View File

@ -54,7 +54,7 @@ TEST(GaussianFactorGraph, initialization) {
// Test sparse, which takes a vector and returns a matrix, used in MATLAB
// Note that this the augmented vector and the RHS is in column 7
Matrix expectedIJS = Matrix_(3,22,
Matrix expectedIJS = (Mat(3, 22) <<
1., 2., 1., 2., 3., 4., 3., 4., 3., 4., 5., 6., 5., 6., 5., 6., 7., 8., 7., 8., 7., 8.,
1., 2., 7., 7., 1., 2., 3., 4., 7., 7., 1., 2., 5., 6., 7., 7., 3., 4., 5., 6., 7., 7.,
10., 10., -1., -1., -10., -10., 10., 10., 2., -1., -5., -5., 5., 5., 0., 1., -5., -5., 5., 5., -1., 1.5
@ -73,7 +73,7 @@ TEST(GaussianFactorGraph, sparseJacobian) {
// 0 0 0 14 15 16
// Expected - NOTE that we transpose this!
Matrix expected = Matrix_(16,3,
Matrix expectedT = (Mat(16, 3) <<
1., 1., 2.,
1., 2., 4.,
1., 3., 6.,
@ -89,12 +89,14 @@ TEST(GaussianFactorGraph, sparseJacobian) {
4., 4.,28.,
4., 5.,30.,
3., 6.,26.,
4., 6.,32.).transpose();
4., 6.,32.);
Matrix expected = expectedT.transpose();
GaussianFactorGraph gfg;
SharedDiagonal model = noiseModel::Isotropic::Sigma(2, 0.5);
gfg.add(0, Matrix_(2,3, 1., 2., 3., 5., 6., 7.), (Vec(2) << 4., 8.), model);
gfg.add(0, Matrix_(2,3, 9.,10., 0., 0., 0., 0.), 1, Matrix_(2,2, 11., 12., 14., 15.), Vector_(2, 13.,16.), model);
gfg.add(0, (Mat(2, 3) << 1., 2., 3., 5., 6., 7.), (Vec(2) << 4., 8.), model);
gfg.add(0, (Mat(2, 3) << 9.,10., 0., 0., 0., 0.), 1, (Mat(2, 2) << 11., 12., 14., 15.), (Vec(2) << 13.,16.), model);
Matrix actual = gfg.sparseJacobian_();
@ -112,8 +114,8 @@ TEST(GaussianFactorGraph, matrices) {
GaussianFactorGraph gfg;
SharedDiagonal model = noiseModel::Unit::Create(2);
gfg.add(0, Matrix_(2,3, 1., 2., 3., 5., 6., 7.), (Vec(2) << 4., 8.), model);
gfg.add(0, Matrix_(2,3, 9.,10., 0., 0., 0., 0.), 1, Matrix_(2,2, 11., 12., 14., 15.), Vector_(2, 13.,16.), model);
gfg.add(0, (Mat(2, 3) << 1., 2., 3., 5., 6., 7.), (Vec(2) << 4., 8.), model);
gfg.add(0, (Mat(2, 3) << 9.,10., 0., 0., 0., 0.), 1, (Mat(2, 2) << 11., 12., 14., 15.), (Vec(2) << 13.,16.), model);
Matrix jacobian(4,6);
jacobian <<

View File

@ -51,7 +51,7 @@ TEST(HessianFactor, emptyConstructor)
TEST(HessianFactor, ConversionConstructor)
{
HessianFactor expected(list_of(0)(1),
SymmetricBlockMatrix(list_of(2)(4)(1), Matrix_(7,7,
SymmetricBlockMatrix(list_of(2)(4)(1), (Mat(7,7) <<
125.0000, 0.0, -25.0000, 0.0, -100.0000, 0.0, 25.0000,
0.0, 125.0000, 0.0, -25.0000, 0.0, -100.0000, -17.5000,
-25.0000, 0.0, 25.0000, 0.0, 0.0, 0.0, -5.0000,
@ -61,11 +61,11 @@ TEST(HessianFactor, ConversionConstructor)
25.0000, -17.5000, -5.0000, 7.5000, -20.0000, 10.0000, 8.2500)));
JacobianFactor jacobian(
0, Matrix_(4,2, -1., 0.,
0, (Mat(4,2) << -1., 0.,
+0.,-1.,
1., 0.,
+0.,1.),
1, Matrix_(4,4, 1., 0., 0.00, 0., // f4
1, (Mat(4,4) << 1., 0., 0.00, 0., // f4
0., 1., 0.00, 0., // f4
0., 0., -1., 0., // f2
0., 0., 0.00, -1.), // f2
@ -86,7 +86,7 @@ TEST(HessianFactor, ConversionConstructor)
/* ************************************************************************* */
TEST(HessianFactor, Constructor1)
{
Matrix G = Matrix_(2,2, 3.0, 5.0, 0.0, 6.0);
Matrix G = (Mat(2,2) << 3.0, 5.0, 0.0, 6.0);
Vector g = (Vec(2) << -8.0, -9.0);
double f = 10.0;
HessianFactor factor(0, G, g, f);
@ -130,9 +130,9 @@ TEST(HessianFactor, Constructor1b)
/* ************************************************************************* */
TEST(HessianFactor, Constructor2)
{
Matrix G11 = Matrix_(1,1, 1.0);
Matrix G12 = Matrix_(1,2, 2.0, 4.0);
Matrix G22 = Matrix_(2,2, 3.0, 5.0, 0.0, 6.0);
Matrix G11 = (Mat(1,1) << 1.0);
Matrix G12 = (Mat(1,2) << 2.0, 4.0);
Matrix G22 = (Mat(2,2) << 3.0, 5.0, 0.0, 6.0);
Vector g1 = (Vec(1) << -7.0);
Vector g2 = (Vec(2) << -8.0, -9.0);
double f = 10.0;
@ -171,14 +171,14 @@ TEST(HessianFactor, Constructor2)
/* ************************************************************************* */
TEST(HessianFactor, Constructor3)
{
Matrix G11 = Matrix_(1,1, 1.0);
Matrix G12 = Matrix_(1,2, 2.0, 4.0);
Matrix G13 = Matrix_(1,3, 3.0, 6.0, 9.0);
Matrix G11 = (Mat(1,1) << 1.0);
Matrix G12 = (Mat(1,2) << 2.0, 4.0);
Matrix G13 = (Mat(1,3) << 3.0, 6.0, 9.0);
Matrix G22 = Matrix_(2,2, 3.0, 5.0, 0.0, 6.0);
Matrix G23 = Matrix_(2,3, 4.0, 6.0, 8.0, 1.0, 2.0, 4.0);
Matrix G22 = (Mat(2,2) << 3.0, 5.0, 0.0, 6.0);
Matrix G23 = (Mat(2,3) << 4.0, 6.0, 8.0, 1.0, 2.0, 4.0);
Matrix G33 = Matrix_(3,3, 1.0, 2.0, 3.0, 0.0, 5.0, 6.0, 0.0, 0.0, 9.0);
Matrix G33 = (Mat(3,3) << 1.0, 2.0, 3.0, 0.0, 5.0, 6.0, 0.0, 0.0, 9.0);
Vector g1 = (Vec(1) << -7.0);
Vector g2 = (Vec(2) << -8.0, -9.0);
@ -218,14 +218,14 @@ TEST(HessianFactor, Constructor3)
/* ************************************************************************* */
TEST(HessianFactor, ConstructorNWay)
{
Matrix G11 = Matrix_(1,1, 1.0);
Matrix G12 = Matrix_(1,2, 2.0, 4.0);
Matrix G13 = Matrix_(1,3, 3.0, 6.0, 9.0);
Matrix G11 = (Mat(1,1) << 1.0);
Matrix G12 = (Mat(1,2) << 2.0, 4.0);
Matrix G13 = (Mat(1,3) << 3.0, 6.0, 9.0);
Matrix G22 = Matrix_(2,2, 3.0, 5.0, 0.0, 6.0);
Matrix G23 = Matrix_(2,3, 4.0, 6.0, 8.0, 1.0, 2.0, 4.0);
Matrix G22 = (Mat(2,2) << 3.0, 5.0, 0.0, 6.0);
Matrix G23 = (Mat(2,3) << 4.0, 6.0, 8.0, 1.0, 2.0, 4.0);
Matrix G33 = Matrix_(3,3, 1.0, 2.0, 3.0, 0.0, 5.0, 6.0, 0.0, 0.0, 9.0);
Matrix G33 = (Mat(3,3) << 1.0, 2.0, 3.0, 0.0, 5.0, 6.0, 0.0, 0.0, 9.0);
Vector g1 = (Vec(1) << -7.0);
Vector g2 = (Vec(2) << -8.0, -9.0);
@ -271,25 +271,25 @@ TEST(HessianFactor, ConstructorNWay)
/* ************************************************************************* */
TEST(HessianFactor, CombineAndEliminate)
{
Matrix A01 = Matrix_(3,3,
Matrix A01 = (Mat(3,3) <<
1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0);
Vector b0 = (Vec(3) << 1.5, 1.5, 1.5);
Vector s0 = (Vec(3) << 1.6, 1.6, 1.6);
Matrix A10 = Matrix_(3,3,
Matrix A10 = (Mat(3,3) <<
2.0, 0.0, 0.0,
0.0, 2.0, 0.0,
0.0, 0.0, 2.0);
Matrix A11 = Matrix_(3,3,
Matrix A11 = (Mat(3,3) <<
-2.0, 0.0, 0.0,
0.0, -2.0, 0.0,
0.0, 0.0, -2.0);
Vector b1 = (Vec(3) << 2.5, 2.5, 2.5);
Vector s1 = (Vec(3) << 2.6, 2.6, 2.6);
Matrix A21 = Matrix_(3,3,
Matrix A21 = (Mat(3,3) <<
3.0, 0.0, 0.0,
0.0, 3.0, 0.0,
0.0, 0.0, 3.0);
@ -333,7 +333,7 @@ TEST(HessianFactor, eliminate2 )
Vector sigmas = (Vec(4) << sigma1, sigma1, sigma2, sigma2);
// the combined linear factor
Matrix Ax2 = Matrix_(4,2,
Matrix Ax2 = (Mat(4,2) <<
// x2
-1., 0.,
+0.,-1.,
@ -341,7 +341,7 @@ TEST(HessianFactor, eliminate2 )
+0.,1.
);
Matrix Al1x1 = Matrix_(4,4,
Matrix Al1x1 = (Mat(4,4) <<
// l1 x1
1., 0., 0.00, 0., // f4
0., 1., 0.00, 0., // f4
@ -370,11 +370,11 @@ TEST(HessianFactor, eliminate2 )
// create expected Conditional Gaussian
double oldSigma = 0.0894427; // from when R was made unit
Matrix R11 = Matrix_(2,2,
Matrix R11 = (Mat(2,2) <<
1.00, 0.00,
0.00, 1.00
)/oldSigma;
Matrix S12 = Matrix_(2,4,
Matrix S12 = (Mat(2,4) <<
-0.20, 0.00,-0.80, 0.00,
+0.00,-0.20,+0.00,-0.80
)/oldSigma;
@ -384,7 +384,7 @@ TEST(HessianFactor, eliminate2 )
// the expected linear factor
double sigma = 0.2236;
Matrix Bl1x1 = Matrix_(2,4,
Matrix Bl1x1 = (Mat(2,4) <<
// l1 x1
1.00, 0.00, -1.00, 0.00,
0.00, 1.00, +0.00, -1.00
@ -398,13 +398,13 @@ TEST(HessianFactor, eliminate2 )
TEST(HessianFactor, combine) {
// update the information matrix with a single jacobian factor
Matrix A0 = Matrix_(2, 2,
Matrix A0 = (Mat(2, 2) <<
11.1803399, 0.0,
0.0, 11.1803399);
Matrix A1 = Matrix_(2, 2,
Matrix A1 = (Mat(2, 2) <<
-2.23606798, 0.0,
0.0, -2.23606798);
Matrix A2 = Matrix_(2, 2,
Matrix A2 = (Mat(2, 2) <<
-8.94427191, 0.0,
0.0, -8.94427191);
Vector b = (Vec(2) << 2.23606798,-1.56524758);
@ -415,7 +415,7 @@ TEST(HessianFactor, combine) {
// Form Ab' * Ab
HessianFactor actual(factors);
Matrix expected = Matrix_(7, 7,
Matrix expected = (Mat(7, 7) <<
125.0000, 0.0, -25.0000, 0.0, -100.0000, 0.0, 25.0000,
0.0, 125.0000, 0.0, -25.0000, 0.0, -100.0000, -17.5000,
-25.0000, 0.0, 5.0000, 0.0, 20.0000, 0.0, -5.0000,
@ -431,9 +431,9 @@ TEST(HessianFactor, combine) {
/* ************************************************************************* */
TEST(HessianFactor, gradientAtZero)
{
Matrix G11 = Matrix_(1,1, 1.0);
Matrix G12 = Matrix_(1,2, 0.0, 0.0);
Matrix G22 = Matrix_(2,2, 1.0, 0.0, 0.0, 1.0);
Matrix G11 = (Mat(1, 1) << 1.0);
Matrix G12 = (Mat(1, 2) << 0.0, 0.0);
Matrix G22 = (Mat(2, 2) << 1.0, 0.0, 0.0, 1.0);
Vector g1 = (Vec(1) << -7.0);
Vector g2 = (Vec(2) << -8.0, -9.0);
double f = 194;

View File

@ -290,25 +290,25 @@ TEST(JacobianFactor, empty )
/* ************************************************************************* */
TEST(JacobianFactor, eliminate)
{
Matrix A01 = Matrix_(3,3,
Matrix A01 = (Mat(3, 3) <<
1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0);
Vector b0 = (Vec(3) << 1.5, 1.5, 1.5);
Vector s0 = (Vec(3) << 1.6, 1.6, 1.6);
Matrix A10 = Matrix_(3,3,
Matrix A10 = (Mat(3, 3) <<
2.0, 0.0, 0.0,
0.0, 2.0, 0.0,
0.0, 0.0, 2.0);
Matrix A11 = Matrix_(3,3,
Matrix A11 = (Mat(3, 3) <<
-2.0, 0.0, 0.0,
0.0, -2.0, 0.0,
0.0, 0.0, -2.0);
Vector b1 = (Vec(3) << 2.5, 2.5, 2.5);
Vector s1 = (Vec(3) << 2.6, 2.6, 2.6);
Matrix A21 = Matrix_(3,3,
Matrix A21 = (Mat(3, 3) <<
3.0, 0.0, 0.0,
0.0, 3.0, 0.0,
0.0, 0.0, 3.0);
@ -348,7 +348,7 @@ TEST(JacobianFactor, eliminate2 )
Vector sigmas = (Vec(4) << sigma1, sigma1, sigma2, sigma2);
// the combined linear factor
Matrix Ax2 = Matrix_(4,2,
Matrix Ax2 = (Mat(4, 2) <<
// x2
-1., 0.,
+0.,-1.,
@ -356,7 +356,7 @@ TEST(JacobianFactor, eliminate2 )
+0.,1.
);
Matrix Al1x1 = Matrix_(4,4,
Matrix Al1x1 = (Mat(4, 4) <<
// l1 x1
1., 0., 0.00, 0., // f4
0., 1., 0.00, 0., // f4
@ -382,11 +382,11 @@ TEST(JacobianFactor, eliminate2 )
// create expected Conditional Gaussian
double oldSigma = 0.0894427; // from when R was made unit
Matrix R11 = Matrix_(2,2,
Matrix R11 = (Mat(2, 2) <<
1.00, 0.00,
0.00, 1.00
)/oldSigma;
Matrix S12 = Matrix_(2,4,
Matrix S12 = (Mat(2, 4) <<
-0.20, 0.00,-0.80, 0.00,
+0.00,-0.20,+0.00,-0.80
)/oldSigma;
@ -397,7 +397,7 @@ TEST(JacobianFactor, eliminate2 )
// the expected linear factor
double sigma = 0.2236;
Matrix Bl1x1 = Matrix_(2,4,
Matrix Bl1x1 = (Mat(2, 4) <<
// l1 x1
1.00, 0.00, -1.00, 0.00,
0.00, 1.00, +0.00, -1.00
@ -411,7 +411,7 @@ TEST(JacobianFactor, eliminate2 )
TEST(JacobianFactor, EliminateQR)
{
// Augmented Ab test case for whole factor graph
Matrix Ab = Matrix_(14,11,
Matrix Ab = (Mat(14, 11) <<
4., 0., 1., 4., 1., 0., 3., 6., 8., 8., 1.,
9., 2., 0., 1., 6., 3., 9., 6., 6., 9., 4.,
5., 3., 7., 9., 5., 5., 9., 1., 3., 7., 0.,
@ -441,7 +441,7 @@ TEST(JacobianFactor, EliminateQR)
EXPECT(assert_equal(2.0 * Ab, actualDense));
// Expected augmented matrix, both GaussianConditional (first 6 rows) and remaining factor (next 4 rows)
Matrix R = 2.0*Matrix_(11,11,
Matrix R = 2.0 * (Mat(11, 11) <<
-12.1244, -5.1962, -5.2786, -8.6603, -10.5573, -5.9385, -11.3820, -7.2581, -8.7427, -13.4440, -5.3611,
0., 4.6904, 5.0254, 5.5432, 5.5737, 3.0153, -3.0153, -3.5635, -3.9290, -2.7412, 2.1625,
0., 0., -13.8160, -8.7166, -10.2245, -8.8666, -8.7632, -5.2544, -6.9192, -10.5537, -9.3250,
@ -523,10 +523,10 @@ TEST ( JacobianFactor, constraint_eliminate2 )
EXPECT(assert_equal(expectedLF, *actual.second));
// verify CG
Matrix R = Matrix_(2, 2,
Matrix R = (Mat(2, 2) <<
1.0, 2.0,
0.0, 1.0);
Matrix S = Matrix_(2,2,
Matrix S = (Mat(2, 2) <<
1.0, 2.0,
0.0, 0.0);
Vector d = (Vec(2) << 3.0, 0.6666);

View File

@ -49,7 +49,7 @@ TEST( KalmanFilter, constructor ) {
// Assert it has the correct mean, covariance and information
EXPECT(assert_equal(x_initial, p1->mean()));
Matrix Sigma = Matrix_(2, 2, 0.01, 0.0, 0.0, 0.01);
Matrix Sigma = (Mat(2, 2) << 0.01, 0.0, 0.0, 0.01);
EXPECT(assert_equal(Sigma, p1->covariance()));
EXPECT(assert_equal(inverse(Sigma), p1->information()));
@ -135,10 +135,10 @@ TEST( KalmanFilter, linear1 ) {
TEST( KalmanFilter, predict ) {
// Create dynamics model
Matrix F = Matrix_(2, 2, 1.0, 0.1, 0.2, 1.1);
Matrix B = Matrix_(2, 3, 1.0, 0.1, 0.2, 1.1, 1.2, 0.8);
Matrix F = (Mat(2, 2) << 1.0, 0.1, 0.2, 1.1);
Matrix B = (Mat(2, 3) << 1.0, 0.1, 0.2, 1.1, 1.2, 0.8);
Vector u = (Vec(3) << 1.0, 0.0, 2.0);
Matrix R = Matrix_(2, 2, 1.0, 0.5, 0.0, 3.0);
Matrix R = (Mat(2, 2) << 1.0, 0.5, 0.0, 3.0);
Matrix M = trans(R)*R;
Matrix Q = inverse(M);
@ -168,7 +168,7 @@ TEST( KalmanFilter, predict ) {
TEST( KalmanFilter, QRvsCholesky ) {
Vector mean = ones(9);
Matrix covariance = 1e-6*Matrix_(9, 9,
Matrix covariance = 1e-6 * (Mat(9, 9) <<
15.0, -6.2, 0.0, 0.0, 0.0, 0.0, 0.0, 63.8, -0.6,
-6.2, 21.9, -0.0, 0.0, 0.0, 0.0, -63.8, -0.0, -0.1,
0.0, -0.0, 100.0, 0.0, 0.0, 0.0, 0.0, 0.1, -0.0,
@ -187,7 +187,7 @@ TEST( KalmanFilter, QRvsCholesky ) {
KalmanFilter::State p0b = kfb.init(mean, covariance);
// Set up dynamics update
Matrix Psi_k = 1e-6*Matrix_(9, 9,
Matrix Psi_k = 1e-6 * (Mat(9, 9) <<
1000000.0, 0.0, 0.0, -19200.0, 600.0, -0.0, 0.0, 0.0, 0.0,
0.0, 1000000.0, 0.0, 600.0, 19200.0, 200.0, 0.0, 0.0, 0.0,
0.0, 0.0, 1000000.0, -0.0, -200.0, 19200.0, 0.0, 0.0, 0.0,
@ -199,7 +199,7 @@ TEST( KalmanFilter, QRvsCholesky ) {
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1000000.0);
Matrix B = zeros(9, 1);
Vector u = zero(1);
Matrix dt_Q_k = 1e-6*Matrix_(9, 9,
Matrix dt_Q_k = 1e-6 * (Mat(9, 9) <<
33.7, 3.1, -0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
3.1, 126.4, -0.3, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-0.0, -0.3, 88.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
@ -222,7 +222,7 @@ TEST( KalmanFilter, QRvsCholesky ) {
Vector expectedMean = (Vec(9) << 0.9814, 1.0200, 1.0190, 1., 1., 1., 1., 1., 1.);
EXPECT(assert_equal(expectedMean, pa->mean(), 1e-7));
EXPECT(assert_equal(expectedMean, pb->mean(), 1e-7));
Matrix expected = 1e-6*Matrix_(9, 9,
Matrix expected = 1e-6 * (Mat(9, 9) <<
48.8, -3.1, -0.0, -0.4, -0.4, 0.0, 0.0, 63.8, -0.6,
-3.1, 148.4, -0.3, 0.5, 1.7, 0.2, -63.8, 0.0, -0.1,
-0.0, -0.3, 188.0, -0.0, 0.2, 1.2, 0.0, 0.1, 0.0,
@ -236,7 +236,7 @@ TEST( KalmanFilter, QRvsCholesky ) {
EXPECT(assert_equal(expected, pb->covariance(), 1e-7));
// prepare update
Matrix H = 1e-3*Matrix_(3, 9,
Matrix H = 1e-3 * (Mat(3, 9) <<
0.0, 9795.9, 83.6, 0.0, 0.0, 0.0, 1000.0, 0.0, 0.0,
-9795.9, 0.0, -5.2, 0.0, 0.0, 0.0, 0.0, 1000.0, 0.0,
-83.6, 5.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1000.);
@ -256,7 +256,7 @@ TEST( KalmanFilter, QRvsCholesky ) {
Vector expectedMean2 = (Vec(9) << 0.9207, 0.9030, 1.0178, 1.0002, 0.9992, 0.9998, 0.9981, 1.0035, 0.9882);
EXPECT(assert_equal(expectedMean2, pa2->mean(), 1e-4));// not happy with tolerance here !
EXPECT(assert_equal(expectedMean2, pb2->mean(), 1e-4));// is something still amiss?
Matrix expected2 = 1e-6*Matrix_(9, 9,
Matrix expected2 = 1e-6 * (Mat(9, 9) <<
46.1, -2.6, -0.0, -0.4, -0.4, 0.0, 0.0, 63.9, -0.5,
-2.6, 132.8, -0.5, 0.4, 1.5, 0.2, -64.0, -0.0, -0.1,
-0.0, -0.5, 188.0, -0.0, 0.2, 1.2, -0.0, 0.1, 0.0,

View File

@ -31,11 +31,11 @@ using namespace gtsam;
using namespace noiseModel;
static double sigma = 2, s_1=1.0/sigma, var = sigma*sigma, prc = 1.0/var;
static Matrix R = Matrix_(3, 3,
static Matrix R = (Mat(3, 3) <<
s_1, 0.0, 0.0,
0.0, s_1, 0.0,
0.0, 0.0, s_1);
static Matrix Sigma = Matrix_(3, 3,
static Matrix Sigma = (Mat(3, 3) <<
var, 0.0, 0.0,
0.0, var, 0.0,
0.0, 0.0, var);
@ -74,7 +74,7 @@ TEST(NoiseModel, constructors)
DOUBLES_EQUAL(distance,mi->Mahalanobis(unwhitened),1e-9);
// test R matrix
Matrix expectedR(Matrix_(3, 3,
Matrix expectedR((Mat(3, 3) <<
s_1, 0.0, 0.0,
0.0, s_1, 0.0,
0.0, 0.0, s_1));
@ -83,12 +83,12 @@ TEST(NoiseModel, constructors)
EXPECT(assert_equal(expectedR,mi->R()));
// test Whiten operator
Matrix H(Matrix_(3, 4,
Matrix H((Mat(3, 4) <<
0.0, 0.0, 1.0, 1.0,
0.0, 1.0, 0.0, 1.0,
1.0, 0.0, 0.0, 1.0));
Matrix expected(Matrix_(3, 4,
Matrix expected((Mat(3, 4) <<
0.0, 0.0, s_1, s_1,
0.0, s_1, 0.0, s_1,
s_1, 0.0, 0.0, s_1));
@ -202,7 +202,7 @@ TEST(NoiseModel, ConstrainedAll )
/* ************************************************************************* */
namespace exampleQR {
// create a matrix to eliminate
Matrix Ab = Matrix_(4, 6+1,
Matrix Ab = (Mat(4, 7) <<
-1., 0., 1., 0., 0., 0., -0.2,
0., -1., 0., 1., 0., 0., 0.3,
1., 0., 0., 0., -1., 0., 0.2,
@ -210,7 +210,7 @@ namespace exampleQR {
Vector sigmas = (Vec(4) << 0.2, 0.2, 0.1, 0.1);
// the matrix AB yields the following factorized version:
Matrix Rd = Matrix_(4, 6+1,
Matrix Rd = (Mat(4, 7) <<
11.1803, 0.0, -2.23607, 0.0, -8.94427, 0.0, 2.23607,
0.0, 11.1803, 0.0, -2.23607, 0.0, -8.94427,-1.56525,
0.0, 0.0, 4.47214, 0.0, -4.47214, 0.0, 0.0,
@ -238,7 +238,7 @@ TEST( NoiseModel, QR )
SharedDiagonal actual2 = constrained->QR(Ab2);
SharedDiagonal expectedModel2 = noiseModel::Diagonal::Sigmas(expectedSigmas);
EXPECT(assert_equal(*expectedModel2,*actual2,1e-6));
Matrix expectedRd2 = Matrix_(4, 6+1,
Matrix expectedRd2 = (Mat(4, 7) <<
1., 0., -0.2, 0., -0.8, 0., 0.2,
0., 1., 0.,-0.2, 0., -0.8,-0.14,
0., 0., 1., 0., -1., 0., 0.0,
@ -250,10 +250,10 @@ TEST( NoiseModel, QR )
TEST(NoiseModel, QRNan )
{
SharedDiagonal constrained = noiseModel::Constrained::All(2);
Matrix Ab = Matrix_(2, 5, 1., 2., 1., 2., 3., 2., 1., 2., 4., 4.);
Matrix Ab = (Mat(2, 5) << 1., 2., 1., 2., 3., 2., 1., 2., 4., 4.);
SharedDiagonal expected = noiseModel::Constrained::All(2);
Matrix expectedAb = Matrix_(2, 5, 1., 2., 1., 2., 3., 0., 1., 0., 0., 2.0/3);
Matrix expectedAb = (Mat(2, 5) << 1., 2., 1., 2., 3., 0., 1., 0., 0., 2.0/3);
SharedDiagonal actual = constrained->QR(Ab);
EXPECT(assert_equal(*expected,*actual));
@ -304,7 +304,7 @@ TEST(NoiseModel, robustFunction)
TEST(NoiseModel, robustNoise)
{
const double k = 10.0, error1 = 1.0, error2 = 100.0;
Matrix A = Matrix_(2, 2, 1.0, 10.0, 100.0, 1000.0);
Matrix A = (Mat(2, 2) << 1.0, 10.0, 100.0, 1000.0);
Vector b = (Vec(2) << error1, error2);
const Robust::shared_ptr robust = Robust::Create(
mEstimator::Huber::Create(k, mEstimator::Huber::Scalar),

View File

@ -159,9 +159,9 @@ TEST (Serialization, linear_factors) {
/* ************************************************************************* */
TEST (Serialization, gaussian_conditional) {
Matrix A1 = Matrix_(2,2, 1., 2., 3., 4.);
Matrix A2 = Matrix_(2,2, 6., 0.2, 8., 0.4);
Matrix R = Matrix_(2,2, 0.1, 0.3, 0.0, 0.34);
Matrix A1 = (Mat(2, 2) << 1., 2., 3., 4.);
Matrix A2 = (Mat(2, 2) << 6., 0.2, 8., 0.4);
Matrix R = (Mat(2, 2) << 0.1, 0.3, 0.0, 0.34);
Vector d(2); d << 0.2, 0.5;
GaussianConditional cg(0, d, R, 1, A1, 2, A2);
@ -174,9 +174,9 @@ TEST (Serialization, gaussian_conditional) {
TEST (Serialization, gaussian_factor_graph) {
GaussianFactorGraph graph;
{
Matrix A1 = Matrix_(2,2, 1., 2., 3., 4.);
Matrix A2 = Matrix_(2,2, 6., 0.2, 8., 0.4);
Matrix R = Matrix_(2,2, 0.1, 0.3, 0.0, 0.34);
Matrix A1 = (Mat(2, 2) << 1., 2., 3., 4.);
Matrix A2 = (Mat(2, 2) << 6., 0.2, 8., 0.4);
Matrix R = (Mat(2, 2) << 0.1, 0.3, 0.0, 0.34);
Vector d(2); d << 0.2, 0.5;
GaussianConditional cg(0, d, R, 1, A1, 2, A2);
graph.push_back(cg);
@ -203,10 +203,10 @@ TEST (Serialization, gaussian_bayes_tree) {
const Ordering chainOrdering = Ordering(list_of(x2)(x1)(x3)(x4));
const SharedDiagonal chainNoise = noiseModel::Isotropic::Sigma(1, 0.5);
const GaussianFactorGraph chain = list_of
(JacobianFactor(x2, Matrix_(1,1,1.), x1, Matrix_(1,1,1.), (Vec(1) << 1.), chainNoise))
(JacobianFactor(x2, Matrix_(1,1,1.), x3, Matrix_(1,1,1.), (Vec(1) << 1.), chainNoise))
(JacobianFactor(x3, Matrix_(1,1,1.), x4, Matrix_(1,1,1.), (Vec(1) << 1.), chainNoise))
(JacobianFactor(x4, Matrix_(1,1,1.), (Vec(1) << 1.), chainNoise));
(JacobianFactor(x2, (Mat(1, 1) << 1.), x1, (Mat(1, 1) << 1.), (Vec(1) << 1.), chainNoise))
(JacobianFactor(x2, (Mat(1, 1) << 1.), x3, (Mat(1, 1) << 1.), (Vec(1) << 1.), chainNoise))
(JacobianFactor(x3, (Mat(1, 1) << 1.), x4, (Mat(1, 1) << 1.), (Vec(1) << 1.), chainNoise))
(JacobianFactor(x4, (Mat(1, 1) << 1.), (Vec(1) << 1.), chainNoise));
GaussianBayesTree init = *chain.eliminateMultifrontal(chainOrdering);
GaussianBayesTree expected = *chain.eliminateMultifrontal(chainOrdering);

View File

@ -53,7 +53,7 @@ static const Index _x1_=1, _x2_=2, _l1_=3;
int main()
{
// create a linear factor
Matrix Ax2 = Matrix_(8,2,
Matrix Ax2 = (Mat(8, 2) <<
// x2
-5., 0.,
+0.,-5.,
@ -65,7 +65,7 @@ int main()
+0.,10.
);
Matrix Al1 = Matrix_(8,10,
Matrix Al1 = (Mat(8, 10) <<
// l1
5., 0.,1.,2.,3.,4.,5.,6.,7.,8.,
0., 5.,1.,2.,3.,4.,5.,6.,7.,8.,
@ -77,7 +77,7 @@ int main()
0., 0.,1.,2.,3.,4.,5.,6.,7.,8.
);
Matrix Ax1 = Matrix_(8,2,
Matrix Ax1 = (Mat(8, 2) <<
// x1
0.00, 0.,1.,2.,3.,4.,5.,6.,7.,8.,
0.00, 0.,1.,2.,3.,4.,5.,6.,7.,8.,