Fix Vector_() to Vec() in tests

release/4.3a0
Jing Dong 2013-10-22 03:56:51 +00:00
parent b8ce816de2
commit 181881a8d7
14 changed files with 119 additions and 119 deletions

View File

@ -223,9 +223,9 @@ Values createValues() {
VectorValues createVectorValues() { VectorValues createVectorValues() {
using namespace impl; using namespace impl;
VectorValues c = boost::assign::pair_list_of VectorValues c = boost::assign::pair_list_of
(_l1_, Vector_(2, 0.0, -1.0)) (_l1_, (Vec(2) << 0.0, -1.0))
(_x1_, Vector_(2, 0.0, 0.0)) (_x1_, (Vec(2) << 0.0, 0.0))
(_x2_, Vector_(2, 1.5, 0.0)); (_x2_, (Vec(2) << 1.5, 0.0));
return c; return c;
} }
@ -250,9 +250,9 @@ VectorValues createCorrectDelta() {
using symbol_shorthand::X; using symbol_shorthand::X;
using symbol_shorthand::L; using symbol_shorthand::L;
VectorValues c; VectorValues c;
c.insert(L(1), Vector_(2, -0.1, 0.1)); c.insert(L(1), (Vec(2) << -0.1, 0.1));
c.insert(X(1), Vector_(2, -0.1, -0.1)); c.insert(X(1), (Vec(2) << -0.1, -0.1));
c.insert(X(2), Vector_(2, 0.1, -0.2)); c.insert(X(2), (Vec(2) << 0.1, -0.2));
return c; return c;
} }
@ -278,13 +278,13 @@ GaussianFactorGraph createGaussianFactorGraph() {
fg += JacobianFactor(X(1), 10*eye(2), -1.0*ones(2)); fg += JacobianFactor(X(1), 10*eye(2), -1.0*ones(2));
// odometry between x1 and x2: x2-x1=[0.2;-0.1] // odometry between x1 and x2: x2-x1=[0.2;-0.1]
fg += JacobianFactor(X(1), -10*eye(2), X(2), 10*eye(2), Vector_(2, 2.0, -1.0)); fg += JacobianFactor(X(1), -10*eye(2), X(2), 10*eye(2), (Vec(2) << 2.0, -1.0));
// measurement between x1 and l1: l1-x1=[0.0;0.2] // measurement between x1 and l1: l1-x1=[0.0;0.2]
fg += JacobianFactor(X(1), -5*eye(2), L(1), 5*eye(2), Vector_(2, 0.0, 1.0)); fg += JacobianFactor(X(1), -5*eye(2), L(1), 5*eye(2), (Vec(2) << 0.0, 1.0));
// measurement between x2 and l1: l1-x2=[-0.2;0.3] // measurement between x2 and l1: l1-x2=[-0.2;0.3]
fg += JacobianFactor(X(2), -5*eye(2), L(1), 5*eye(2), Vector_(2, -1.0, 1.5)); fg += JacobianFactor(X(2), -5*eye(2), L(1), 5*eye(2), (Vec(2) << -1.0, 1.5));
return fg; return fg;
} }
@ -350,7 +350,7 @@ boost::shared_ptr<const NonlinearFactorGraph> sharedReallyNonlinearFactorGraph()
using symbol_shorthand::X; using symbol_shorthand::X;
using symbol_shorthand::L; using symbol_shorthand::L;
boost::shared_ptr<NonlinearFactorGraph> fg(new NonlinearFactorGraph); boost::shared_ptr<NonlinearFactorGraph> fg(new NonlinearFactorGraph);
Vector z = Vector_(2, 1.0, 0.0); Vector z = (Vec(2) << 1.0, 0.0);
double sigma = 0.1; double sigma = 0.1;
boost::shared_ptr<smallOptimize::UnaryFactor> factor( boost::shared_ptr<smallOptimize::UnaryFactor> factor(
new smallOptimize::UnaryFactor(z, noiseModel::Isotropic::Sigma(2,sigma), X(1))); new smallOptimize::UnaryFactor(z, noiseModel::Isotropic::Sigma(2,sigma), X(1)));
@ -422,7 +422,7 @@ GaussianFactorGraph createSimpleConstraintGraph() {
// |0 1||x_2| | 0 -1||y_2| |0| // |0 1||x_2| | 0 -1||y_2| |0|
Matrix Ax1 = eye(2); Matrix Ax1 = eye(2);
Matrix Ay1 = eye(2) * -1; Matrix Ay1 = eye(2) * -1;
Vector b2 = Vector_(2, 0.0, 0.0); Vector b2 = (Vec(2) << 0.0, 0.0);
JacobianFactor::shared_ptr f2(new JacobianFactor(_x_, Ax1, _y_, Ay1, b2, JacobianFactor::shared_ptr f2(new JacobianFactor(_x_, Ax1, _y_, Ay1, b2,
constraintModel)); constraintModel));
@ -440,7 +440,7 @@ VectorValues createSimpleConstraintValues() {
using symbol_shorthand::X; using symbol_shorthand::X;
using symbol_shorthand::L; using symbol_shorthand::L;
VectorValues config; VectorValues config;
Vector v = Vector_(2, 1.0, -1.0); Vector v = (Vec(2) << 1.0, -1.0);
config.insert(_x_, v); config.insert(_x_, v);
config.insert(_y_, v); config.insert(_y_, v);
return config; return config;
@ -468,7 +468,7 @@ GaussianFactorGraph createSingleConstraintGraph() {
Ax1(1, 0) = 2.0; Ax1(1, 0) = 2.0;
Ax1(1, 1) = 1.0; Ax1(1, 1) = 1.0;
Matrix Ay1 = eye(2) * 10; Matrix Ay1 = eye(2) * 10;
Vector b2 = Vector_(2, 1.0, 2.0); Vector b2 = (Vec(2) << 1.0, 2.0);
JacobianFactor::shared_ptr f2(new JacobianFactor(_x_, Ax1, _y_, Ay1, b2, JacobianFactor::shared_ptr f2(new JacobianFactor(_x_, Ax1, _y_, Ay1, b2,
constraintModel)); constraintModel));
@ -484,8 +484,8 @@ GaussianFactorGraph createSingleConstraintGraph() {
VectorValues createSingleConstraintValues() { VectorValues createSingleConstraintValues() {
using namespace impl; using namespace impl;
VectorValues config = boost::assign::pair_list_of VectorValues config = boost::assign::pair_list_of
(_x_, Vector_(2, 1.0, -1.0)) (_x_, (Vec(2) << 1.0, -1.0))
(_y_, Vector_(2, 0.2, 0.1)); (_y_, (Vec(2) << 0.2, 0.1));
return config; return config;
} }
@ -494,7 +494,7 @@ GaussianFactorGraph createMultiConstraintGraph() {
using namespace impl; using namespace impl;
// unary factor 1 // unary factor 1
Matrix A = eye(2); Matrix A = eye(2);
Vector b = Vector_(2, -2.0, 2.0); Vector b = (Vec(2) << -2.0, 2.0);
JacobianFactor::shared_ptr lf1(new JacobianFactor(_x_, A, b, sigma0_1)); JacobianFactor::shared_ptr lf1(new JacobianFactor(_x_, A, b, sigma0_1));
// constraint 1 // constraint 1
@ -548,9 +548,9 @@ GaussianFactorGraph createMultiConstraintGraph() {
VectorValues createMultiConstraintValues() { VectorValues createMultiConstraintValues() {
using namespace impl; using namespace impl;
VectorValues config = boost::assign::pair_list_of VectorValues config = boost::assign::pair_list_of
(_x_, Vector_(2, -2.0, 2.0)) (_x_, (Vec(2) << -2.0, 2.0))
(_y_, Vector_(2, -0.1, 0.4)) (_y_, (Vec(2) << -0.1, 0.4))
(_z_, Vector_(2, -4.0, 5.0)); (_z_, (Vec(2) <<-4.0, 5.0));
return config; return config;
} }

View File

@ -48,21 +48,21 @@ TEST(DoglegOptimizer, ComputeBlend) {
// Create an arbitrary Bayes Net // Create an arbitrary Bayes Net
GaussianBayesNet gbn; GaussianBayesNet gbn;
gbn += GaussianConditional::shared_ptr(new GaussianConditional( gbn += GaussianConditional::shared_ptr(new GaussianConditional(
0, Vector_(2, 1.0,2.0), Matrix_(2,2, 3.0,4.0,0.0,6.0), 0, (Vec(2) << 1.0,2.0), Matrix_(2,2, 3.0,4.0,0.0,6.0),
3, Matrix_(2,2, 7.0,8.0,9.0,10.0), 3, Matrix_(2,2, 7.0,8.0,9.0,10.0),
4, Matrix_(2,2, 11.0,12.0,13.0,14.0))); 4, Matrix_(2,2, 11.0,12.0,13.0,14.0)));
gbn += GaussianConditional::shared_ptr(new GaussianConditional( gbn += GaussianConditional::shared_ptr(new GaussianConditional(
1, Vector_(2, 15.0,16.0), Matrix_(2,2, 17.0,18.0,0.0,20.0), 1, (Vec(2) << 15.0,16.0), Matrix_(2,2, 17.0,18.0,0.0,20.0),
2, Matrix_(2,2, 21.0,22.0,23.0,24.0), 2, Matrix_(2,2, 21.0,22.0,23.0,24.0),
4, Matrix_(2,2, 25.0,26.0,27.0,28.0))); 4, Matrix_(2,2, 25.0,26.0,27.0,28.0)));
gbn += GaussianConditional::shared_ptr(new GaussianConditional( gbn += GaussianConditional::shared_ptr(new GaussianConditional(
2, Vector_(2, 29.0,30.0), Matrix_(2,2, 31.0,32.0,0.0,34.0), 2, (Vec(2) << 29.0,30.0), Matrix_(2,2, 31.0,32.0,0.0,34.0),
3, Matrix_(2,2, 35.0,36.0,37.0,38.0))); 3, Matrix_(2,2, 35.0,36.0,37.0,38.0)));
gbn += GaussianConditional::shared_ptr(new GaussianConditional( gbn += GaussianConditional::shared_ptr(new GaussianConditional(
3, Vector_(2, 39.0,40.0), Matrix_(2,2, 41.0,42.0,0.0,44.0), 3, (Vec(2) << 39.0,40.0), Matrix_(2,2, 41.0,42.0,0.0,44.0),
4, Matrix_(2,2, 45.0,46.0,47.0,48.0))); 4, Matrix_(2,2, 45.0,46.0,47.0,48.0)));
gbn += GaussianConditional::shared_ptr(new GaussianConditional( gbn += GaussianConditional::shared_ptr(new GaussianConditional(
4, Vector_(2, 49.0,50.0), Matrix_(2,2, 51.0,52.0,0.0,54.0))); 4, (Vec(2) << 49.0,50.0), Matrix_(2,2, 51.0,52.0,0.0,54.0)));
// Compute steepest descent point // Compute steepest descent point
VectorValues xu = gbn.optimizeGradientSearch(); VectorValues xu = gbn.optimizeGradientSearch();
@ -84,21 +84,21 @@ TEST(DoglegOptimizer, ComputeDoglegPoint) {
// Create an arbitrary Bayes Net // Create an arbitrary Bayes Net
GaussianBayesNet gbn; GaussianBayesNet gbn;
gbn += GaussianConditional::shared_ptr(new GaussianConditional( gbn += GaussianConditional::shared_ptr(new GaussianConditional(
0, Vector_(2, 1.0,2.0), Matrix_(2,2, 3.0,4.0,0.0,6.0), 0, (Vec(2) << 1.0,2.0), Matrix_(2,2, 3.0,4.0,0.0,6.0),
3, Matrix_(2,2, 7.0,8.0,9.0,10.0), 3, Matrix_(2,2, 7.0,8.0,9.0,10.0),
4, Matrix_(2,2, 11.0,12.0,13.0,14.0))); 4, Matrix_(2,2, 11.0,12.0,13.0,14.0)));
gbn += GaussianConditional::shared_ptr(new GaussianConditional( gbn += GaussianConditional::shared_ptr(new GaussianConditional(
1, Vector_(2, 15.0,16.0), Matrix_(2,2, 17.0,18.0,0.0,20.0), 1, (Vec(2) << 15.0,16.0), Matrix_(2,2, 17.0,18.0,0.0,20.0),
2, Matrix_(2,2, 21.0,22.0,23.0,24.0), 2, Matrix_(2,2, 21.0,22.0,23.0,24.0),
4, Matrix_(2,2, 25.0,26.0,27.0,28.0))); 4, Matrix_(2,2, 25.0,26.0,27.0,28.0)));
gbn += GaussianConditional::shared_ptr(new GaussianConditional( gbn += GaussianConditional::shared_ptr(new GaussianConditional(
2, Vector_(2, 29.0,30.0), Matrix_(2,2, 31.0,32.0,0.0,34.0), 2, (Vec(2) << 29.0,30.0), Matrix_(2,2, 31.0,32.0,0.0,34.0),
3, Matrix_(2,2, 35.0,36.0,37.0,38.0))); 3, Matrix_(2,2, 35.0,36.0,37.0,38.0)));
gbn += GaussianConditional::shared_ptr(new GaussianConditional( gbn += GaussianConditional::shared_ptr(new GaussianConditional(
3, Vector_(2, 39.0,40.0), Matrix_(2,2, 41.0,42.0,0.0,44.0), 3, (Vec(2) << 39.0,40.0), Matrix_(2,2, 41.0,42.0,0.0,44.0),
4, Matrix_(2,2, 45.0,46.0,47.0,48.0))); 4, Matrix_(2,2, 45.0,46.0,47.0,48.0)));
gbn += GaussianConditional::shared_ptr(new GaussianConditional( gbn += GaussianConditional::shared_ptr(new GaussianConditional(
4, Vector_(2, 49.0,50.0), Matrix_(2,2, 51.0,52.0,0.0,54.0))); 4, (Vec(2) << 49.0,50.0), Matrix_(2,2, 51.0,52.0,0.0,54.0)));
// Compute dogleg point for different deltas // Compute dogleg point for different deltas

View File

@ -37,20 +37,20 @@ TEST( ExtendedKalmanFilter, linear ) {
// Create the Kalman Filter initialization point // Create the Kalman Filter initialization point
Point2 x_initial(0.0, 0.0); Point2 x_initial(0.0, 0.0);
SharedDiagonal P_initial = noiseModel::Diagonal::Sigmas(Vector_(2, 0.1, 0.1)); SharedDiagonal P_initial = noiseModel::Diagonal::Sigmas((Vec(2) << 0.1, 0.1));
// Create an ExtendedKalmanFilter object // Create an ExtendedKalmanFilter object
ExtendedKalmanFilter<Point2> ekf(x0, x_initial, P_initial); ExtendedKalmanFilter<Point2> ekf(x0, x_initial, P_initial);
// Create the controls and measurement properties for our example // Create the controls and measurement properties for our example
double dt = 1.0; double dt = 1.0;
Vector u = Vector_(2, 1.0, 0.0); Vector u = (Vec(2) << 1.0, 0.0);
Point2 difference(u*dt); Point2 difference(u*dt);
SharedDiagonal Q = noiseModel::Diagonal::Sigmas(Vector_(2, 0.1, 0.1), true); SharedDiagonal Q = noiseModel::Diagonal::Sigmas((Vec(2) << 0.1, 0.1), true);
Point2 z1(1.0, 0.0); Point2 z1(1.0, 0.0);
Point2 z2(2.0, 0.0); Point2 z2(2.0, 0.0);
Point2 z3(3.0, 0.0); Point2 z3(3.0, 0.0);
SharedDiagonal R = noiseModel::Diagonal::Sigmas(Vector_(2, 0.25, 0.25), true); SharedDiagonal R = noiseModel::Diagonal::Sigmas((Vec(2) << 0.25, 0.25), true);
// Create the set of expected output TestValues // Create the set of expected output TestValues
Point2 expected1(1.0, 0.0); Point2 expected1(1.0, 0.0);
@ -107,7 +107,7 @@ public:
NonlinearMotionModel(){} NonlinearMotionModel(){}
NonlinearMotionModel(const Symbol& TestKey1, const Symbol& TestKey2) : NonlinearMotionModel(const Symbol& TestKey1, const Symbol& TestKey2) :
Base(noiseModel::Diagonal::Sigmas(Vector_(2, 1.0, 1.0)), TestKey1, TestKey2), Q_(2,2) { Base(noiseModel::Diagonal::Sigmas((Vec(2) << 1.0, 1.0)), TestKey1, TestKey2), Q_(2,2) {
// Initialize motion model parameters: // Initialize motion model parameters:
// w is vector of motion noise sigmas. The final covariance is calculated as G*w*w'*G' // w is vector of motion noise sigmas. The final covariance is calculated as G*w*w'*G'
@ -405,7 +405,7 @@ TEST( ExtendedKalmanFilter, nonlinear ) {
// Create the Kalman Filter initialization point // Create the Kalman Filter initialization point
Point2 x_initial(0.90, 1.10); Point2 x_initial(0.90, 1.10);
SharedDiagonal P_initial = noiseModel::Diagonal::Sigmas(Vector_(2, 0.1, 0.1)); SharedDiagonal P_initial = noiseModel::Diagonal::Sigmas((Vec(2) << 0.1, 0.1));
// Create an ExtendedKalmanFilter object // Create an ExtendedKalmanFilter object
ExtendedKalmanFilter<Point2> ekf(X(0), x_initial, P_initial); ExtendedKalmanFilter<Point2> ekf(X(0), x_initial, P_initial);
@ -418,7 +418,7 @@ TEST( ExtendedKalmanFilter, nonlinear ) {
x_predict = ekf.predict(motionFactor); x_predict = ekf.predict(motionFactor);
// Create a measurement factor // Create a measurement factor
NonlinearMeasurementModel measurementFactor(X(i+1), Vector_(1, z[i])); NonlinearMeasurementModel measurementFactor(X(i+1), (Vec(1) << z[i]));
x_update = ekf.update(measurementFactor); x_update = ekf.update(measurementFactor);
EXPECT(assert_equal(expected_predict[i],x_predict, 1e-6)); EXPECT(assert_equal(expected_predict[i],x_predict, 1e-6));

View File

@ -296,13 +296,13 @@ TEST(GaussianBayesTree, shortcut_overlapping_separator)
// f(6,7) // f(6,7)
GaussianFactorGraph fg; GaussianFactorGraph fg;
noiseModel::Diagonal::shared_ptr model = noiseModel::Unit::Create(1); noiseModel::Diagonal::shared_ptr model = noiseModel::Unit::Create(1);
fg.add(1, Matrix_(1,1, 1.0), 3, Matrix_(1,1, 2.0), 5, Matrix_(1,1, 3.0), Vector_(1, 4.0), model); fg.add(1, Matrix_(1,1, 1.0), 3, Matrix_(1,1, 2.0), 5, Matrix_(1,1, 3.0), (Vec(1) << 4.0), model);
fg.add(1, Matrix_(1,1, 5.0), Vector_(1, 6.0), model); fg.add(1, Matrix_(1,1, 5.0), (Vec(1) << 6.0), model);
fg.add(2, Matrix_(1,1, 7.0), 4, Matrix_(1,1, 8.0), 5, Matrix_(1,1, 9.0), Vector_(1, 10.0), model); fg.add(2, Matrix_(1,1, 7.0), 4, Matrix_(1,1, 8.0), 5, Matrix_(1,1, 9.0), (Vec(1) << 10.0), model);
fg.add(2, Matrix_(1,1, 11.0), Vector_(1, 12.0), model); fg.add(2, Matrix_(1,1, 11.0), (Vec(1) << 12.0), model);
fg.add(5, Matrix_(1,1, 13.0), 6, Matrix_(1,1, 14.0), Vector_(1, 15.0), model); fg.add(5, Matrix_(1,1, 13.0), 6, Matrix_(1,1, 14.0), (Vec(1) << 15.0), model);
fg.add(6, Matrix_(1,1, 17.0), 7, Matrix_(1,1, 18.0), Vector_(1, 19.0), model); fg.add(6, Matrix_(1,1, 17.0), 7, Matrix_(1,1, 18.0), (Vec(1) << 19.0), model);
fg.add(7, Matrix_(1,1, 20.0), Vector_(1, 21.0), model); fg.add(7, Matrix_(1,1, 20.0), (Vec(1) << 21.0), model);
// Eliminate into BayesTree // Eliminate into BayesTree
// c(6,7) // c(6,7)

View File

@ -79,7 +79,7 @@ TEST( GaussianFactorGraph, eliminateOne_x1 )
// create expected Conditional Gaussian // create expected Conditional Gaussian
Matrix I = 15*eye(2), R11 = I, S12 = -0.111111*I, S13 = -0.444444*I; Matrix I = 15*eye(2), R11 = I, S12 = -0.111111*I, S13 = -0.444444*I;
Vector d = Vector_(2, -0.133333, -0.0222222); Vector d = (Vec(2) << -0.133333, -0.0222222);
GaussianConditional expected(X(1),15*d,R11,L(1),S12,X(2),S13); GaussianConditional expected(X(1),15*d,R11,L(1),S12,X(2),S13);
EXPECT(assert_equal(expected,*conditional,tol)); EXPECT(assert_equal(expected,*conditional,tol));
@ -97,7 +97,7 @@ TEST( GaussianFactorGraph, eliminateOne_x2 )
// create expected Conditional Gaussian // create expected Conditional Gaussian
double sig = 0.0894427; double sig = 0.0894427;
Matrix I = eye(2)/sig, R11 = I, S12 = -0.2*I, S13 = -0.8*I; Matrix I = eye(2)/sig, R11 = I, S12 = -0.2*I, S13 = -0.8*I;
Vector d = Vector_(2, 0.2, -0.14)/sig, sigma = ones(2); Vector d = (Vec(2) << 0.2, -0.14)/sig, sigma = ones(2);
GaussianConditional expected(ordering[X(2)],d,R11,ordering[L(1)],S12,ordering[X(1)],S13,sigma); GaussianConditional expected(ordering[X(2)],d,R11,ordering[L(1)],S12,ordering[X(1)],S13,sigma);
EXPECT(assert_equal(expected,*actual,tol)); EXPECT(assert_equal(expected,*actual,tol));
@ -113,7 +113,7 @@ TEST( GaussianFactorGraph, eliminateOne_l1 )
// create expected Conditional Gaussian // create expected Conditional Gaussian
double sig = sqrt(2.0)/10.; double sig = sqrt(2.0)/10.;
Matrix I = eye(2)/sig, R11 = I, S12 = -0.5*I, S13 = -0.5*I; Matrix I = eye(2)/sig, R11 = I, S12 = -0.5*I, S13 = -0.5*I;
Vector d = Vector_(2, -0.1, 0.25)/sig, sigma = ones(2); Vector d = (Vec(2) << -0.1, 0.25)/sig, sigma = ones(2);
GaussianConditional expected(ordering[L(1)],d,R11,ordering[X(1)],S12,ordering[X(2)],S13,sigma); GaussianConditional expected(ordering[L(1)],d,R11,ordering[X(1)],S12,ordering[X(2)],S13,sigma);
EXPECT(assert_equal(expected,*actual,tol)); EXPECT(assert_equal(expected,*actual,tol));
@ -130,7 +130,7 @@ TEST( GaussianFactorGraph, eliminateOne_x1_fast )
// create expected Conditional Gaussian // create expected Conditional Gaussian
Matrix I = 15*eye(2), R11 = I, S12 = -0.111111*I, S13 = -0.444444*I; Matrix I = 15*eye(2), R11 = I, S12 = -0.111111*I, S13 = -0.444444*I;
Vector d = Vector_(2, -0.133333, -0.0222222), sigma = ones(2); Vector d = (Vec(2) << -0.133333, -0.0222222), sigma = ones(2);
GaussianConditional expected(ordering[X(1)],15*d,R11,ordering[L(1)],S12,ordering[X(2)],S13,sigma); GaussianConditional expected(ordering[X(1)],15*d,R11,ordering[L(1)],S12,ordering[X(2)],S13,sigma);
// Create expected remaining new factor // Create expected remaining new factor
@ -144,7 +144,7 @@ TEST( GaussianFactorGraph, eliminateOne_x1_fast )
0., -2.357022603955159, 0., -2.357022603955159,
7.071067811865475, 0., 7.071067811865475, 0.,
0., 7.071067811865475), 0., 7.071067811865475),
Vector_(4, -0.707106781186547, 0.942809041582063, 0.707106781186547, -1.414213562373094), noiseModel::Unit::Create(4)); (Vec(4) << -0.707106781186547, 0.942809041582063, 0.707106781186547, -1.414213562373094), noiseModel::Unit::Create(4));
EXPECT(assert_equal(expected,*conditional,tol)); EXPECT(assert_equal(expected,*conditional,tol));
EXPECT(assert_equal((const GaussianFactor&)expectedFactor,*remaining.back(),tol)); EXPECT(assert_equal((const GaussianFactor&)expectedFactor,*remaining.back(),tol));
@ -160,7 +160,7 @@ TEST( GaussianFactorGraph, eliminateOne_x2_fast )
// create expected Conditional Gaussian // create expected Conditional Gaussian
double sig = 0.0894427; double sig = 0.0894427;
Matrix I = eye(2)/sig, R11 = I, S12 = -0.2*I, S13 = -0.8*I; Matrix I = eye(2)/sig, R11 = I, S12 = -0.2*I, S13 = -0.8*I;
Vector d = Vector_(2, 0.2, -0.14)/sig, sigma = ones(2); Vector d = (Vec(2) << 0.2, -0.14)/sig, sigma = ones(2);
GaussianConditional expected(ordering[X(2)],d,R11,ordering[X(1)],S13,ordering[L(1)],S12,sigma); GaussianConditional expected(ordering[X(2)],d,R11,ordering[X(1)],S13,ordering[L(1)],S12,sigma);
EXPECT(assert_equal(expected,*actual,tol)); EXPECT(assert_equal(expected,*actual,tol));
@ -176,7 +176,7 @@ TEST( GaussianFactorGraph, eliminateOne_l1_fast )
// create expected Conditional Gaussian // create expected Conditional Gaussian
double sig = sqrt(2.0)/10.; double sig = sqrt(2.0)/10.;
Matrix I = eye(2)/sig, R11 = I, S12 = -0.5*I, S13 = -0.5*I; Matrix I = eye(2)/sig, R11 = I, S12 = -0.5*I, S13 = -0.5*I;
Vector d = Vector_(2, -0.1, 0.25)/sig, sigma = ones(2); Vector d = (Vec(2) << -0.1, 0.25)/sig, sigma = ones(2);
GaussianConditional expected(ordering[L(1)],d,R11,ordering[X(1)],S12,ordering[X(2)],S13,sigma); GaussianConditional expected(ordering[L(1)],d,R11,ordering[X(1)],S12,ordering[X(2)],S13,sigma);
EXPECT(assert_equal(expected,*actual,tol)); EXPECT(assert_equal(expected,*actual,tol));
@ -191,15 +191,15 @@ TEST( GaussianFactorGraph, eliminateAll )
Ordering ordering; Ordering ordering;
ordering += X(2),L(1),X(1); ordering += X(2),L(1),X(1);
Vector d1 = Vector_(2, -0.1,-0.1); Vector d1 = (Vec(2) << -0.1,-0.1);
GaussianBayesNet expected = simpleGaussian(ordering[X(1)],d1,0.1); GaussianBayesNet expected = simpleGaussian(ordering[X(1)],d1,0.1);
double sig1 = 0.149071; double sig1 = 0.149071;
Vector d2 = Vector_(2, 0.0, 0.2)/sig1, sigma2 = ones(2); Vector d2 = (Vec(2) << 0.0, 0.2)/sig1, sigma2 = ones(2);
push_front(expected,ordering[L(1)],d2, I/sig1,ordering[X(1)], (-1)*I/sig1,sigma2); push_front(expected,ordering[L(1)],d2, I/sig1,ordering[X(1)], (-1)*I/sig1,sigma2);
double sig2 = 0.0894427; double sig2 = 0.0894427;
Vector d3 = Vector_(2, 0.2, -0.14)/sig2, sigma3 = ones(2); Vector d3 = (Vec(2) << 0.2, -0.14)/sig2, sigma3 = ones(2);
push_front(expected,ordering[X(2)],d3, I/sig2,ordering[L(1)], (-0.2)*I/sig2, ordering[X(1)], (-0.8)*I/sig2, sigma3); push_front(expected,ordering[X(2)],d3, I/sig2,ordering[L(1)], (-0.2)*I/sig2, ordering[X(1)], (-0.8)*I/sig2, sigma3);
// Check one ordering // Check one ordering
@ -374,10 +374,10 @@ TEST( GaussianFactorGraph, multiplication )
VectorValues x = createCorrectDelta(ord); VectorValues x = createCorrectDelta(ord);
Errors actual = A * x; Errors actual = A * x;
Errors expected; Errors expected;
expected += Vector_(2,-1.0,-1.0); expected += (Vec(2) << -1.0,-1.0);
expected += Vector_(2, 2.0,-1.0); expected += (Vec(2) << 2.0,-1.0);
expected += Vector_(2, 0.0, 1.0); expected += (Vec(2) << 0.0, 1.0);
expected += Vector_(2,-1.0, 1.5); expected += (Vec(2) << -1.0, 1.5);
EXPECT(assert_equal(expected,actual)); EXPECT(assert_equal(expected,actual));
} }
@ -390,7 +390,7 @@ TEST( GaussianFactorGraph, elimination )
// Create Gaussian Factor Graph // Create Gaussian Factor Graph
GaussianFactorGraph fg; GaussianFactorGraph fg;
Matrix Ap = eye(1), An = eye(1) * -1; Matrix Ap = eye(1), An = eye(1) * -1;
Vector b = Vector_(1, 0.0); Vector b = (Vec(1) << 0.0);
SharedDiagonal sigma = noiseModel::Isotropic::Sigma(1,2.0); SharedDiagonal sigma = noiseModel::Isotropic::Sigma(1,2.0);
fg += ord[X(1)], An, ord[X(2)], Ap, b, sigma; fg += ord[X(1)], An, ord[X(2)], Ap, b, sigma;
fg += ord[X(1)], Ap, b, sigma; fg += ord[X(1)], Ap, b, sigma;
@ -544,7 +544,7 @@ TEST( GaussianFactorGraph, conditional_sigma_failure) {
gtsam::Key xC1 = 0, l32 = 1, l41 = 2; gtsam::Key xC1 = 0, l32 = 1, l41 = 2;
// noisemodels at nonlinear level // noisemodels at nonlinear level
gtsam::SharedNoiseModel priorModel = noiseModel::Diagonal::Sigmas(Vector_(6, 0.05, 0.05, 3.0, 0.2, 0.2, 0.2)); gtsam::SharedNoiseModel priorModel = noiseModel::Diagonal::Sigmas((Vec(6) << 0.05, 0.05, 3.0, 0.2, 0.2, 0.2));
gtsam::SharedNoiseModel measModel = noiseModel::Unit::Create(2); gtsam::SharedNoiseModel measModel = noiseModel::Unit::Create(2);
gtsam::SharedNoiseModel elevationModel = noiseModel::Isotropic::Sigma(1, 3.0); gtsam::SharedNoiseModel elevationModel = noiseModel::Isotropic::Sigma(1, 3.0);

View File

@ -42,8 +42,8 @@ const double tol = 1e-4;
// SETDEBUG("ISAM2 recalculate", true); // SETDEBUG("ISAM2 recalculate", true);
// Set up parameters // Set up parameters
SharedDiagonal odoNoise = noiseModel::Diagonal::Sigmas(Vector_(3, 0.1, 0.1, M_PI/100.0)); SharedDiagonal odoNoise = noiseModel::Diagonal::Sigmas((Vec(3) << 0.1, 0.1, M_PI/100.0));
SharedDiagonal brNoise = noiseModel::Diagonal::Sigmas(Vector_(2, M_PI/100.0, 0.1)); SharedDiagonal brNoise = noiseModel::Diagonal::Sigmas((Vec(2) << M_PI/100.0, 0.1));
ISAM2 createSlamlikeISAM2( ISAM2 createSlamlikeISAM2(
boost::optional<Values&> init_values = boost::none, boost::optional<Values&> init_values = boost::none,
@ -171,10 +171,10 @@ done:
// // Create values where indices 1 and 3 are above the threshold of 0.1 // // Create values where indices 1 and 3 are above the threshold of 0.1
// VectorValues values; // VectorValues values;
// values.reserve(4, 10); // values.reserve(4, 10);
// values.push_back_preallocated(Vector_(2, 0.09, 0.09)); // values.push_back_preallocated((Vec(2) << 0.09, 0.09));
// values.push_back_preallocated(Vector_(3, 0.11, 0.11, 0.09)); // values.push_back_preallocated((Vec(3) << 0.11, 0.11, 0.09));
// values.push_back_preallocated(Vector_(3, 0.09, 0.09, 0.09)); // values.push_back_preallocated((Vec(3) << 0.09, 0.09, 0.09));
// values.push_back_preallocated(Vector_(2, 0.11, 0.11)); // values.push_back_preallocated((Vec(2) << 0.11, 0.11));
// //
// // Create a permutation // // Create a permutation
// Permutation permutation(4); // Permutation permutation(4);

View File

@ -105,7 +105,7 @@ TEST( GaussianJunctionTreeB, optimizeMultiFrontal )
// verify // verify
VectorValues expected(vector<size_t>(7,2)); // expected solution VectorValues expected(vector<size_t>(7,2)); // expected solution
Vector v = Vector_(2, 0., 0.); Vector v = (Vec(2) << 0., 0.);
for (int i=1; i<=7; i++) for (int i=1; i<=7; i++)
expected[ordering[X(i)]] = v; expected[ordering[X(i)]] = v;
EXPECT(assert_equal(expected,actual)); EXPECT(assert_equal(expected,actual));
@ -134,8 +134,8 @@ TEST(GaussianJunctionTreeB, slamlike) {
Values init; Values init;
NonlinearFactorGraph newfactors; NonlinearFactorGraph newfactors;
NonlinearFactorGraph fullgraph; NonlinearFactorGraph fullgraph;
SharedDiagonal odoNoise = noiseModel::Diagonal::Sigmas(Vector_(3, 0.1, 0.1, M_PI/100.0)); SharedDiagonal odoNoise = noiseModel::Diagonal::Sigmas((Vec(3) << 0.1, 0.1, M_PI/100.0));
SharedDiagonal brNoise = noiseModel::Diagonal::Sigmas(Vector_(2, M_PI/100.0, 0.1)); SharedDiagonal brNoise = noiseModel::Diagonal::Sigmas((Vec(2) << M_PI/100.0, 0.1));
size_t i = 0; size_t i = 0;

View File

@ -30,18 +30,18 @@ boost::tuple<NonlinearFactorGraph, Values> generateProblem() {
// 2a. Add Gaussian prior // 2a. Add Gaussian prior
Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin
SharedDiagonal priorNoise = noiseModel::Diagonal::Sigmas(Vector_(3, 0.3, 0.3, 0.1)); SharedDiagonal priorNoise = noiseModel::Diagonal::Sigmas((Vec(3) << 0.3, 0.3, 0.1));
graph += PriorFactor<Pose2>(1, priorMean, priorNoise); graph += PriorFactor<Pose2>(1, priorMean, priorNoise);
// 2b. Add odometry factors // 2b. Add odometry factors
SharedDiagonal odometryNoise = noiseModel::Diagonal::Sigmas(Vector_(3, 0.2, 0.2, 0.1)); SharedDiagonal odometryNoise = noiseModel::Diagonal::Sigmas((Vec(3) << 0.2, 0.2, 0.1));
graph += BetweenFactor<Pose2>(1, 2, Pose2(2.0, 0.0, 0.0 ), odometryNoise); graph += BetweenFactor<Pose2>(1, 2, Pose2(2.0, 0.0, 0.0 ), odometryNoise);
graph += BetweenFactor<Pose2>(2, 3, Pose2(2.0, 0.0, M_PI_2), odometryNoise); graph += BetweenFactor<Pose2>(2, 3, Pose2(2.0, 0.0, M_PI_2), odometryNoise);
graph += BetweenFactor<Pose2>(3, 4, Pose2(2.0, 0.0, M_PI_2), odometryNoise); graph += BetweenFactor<Pose2>(3, 4, Pose2(2.0, 0.0, M_PI_2), odometryNoise);
graph += BetweenFactor<Pose2>(4, 5, Pose2(2.0, 0.0, M_PI_2), odometryNoise); graph += BetweenFactor<Pose2>(4, 5, Pose2(2.0, 0.0, M_PI_2), odometryNoise);
// 2c. Add pose constraint // 2c. Add pose constraint
SharedDiagonal constraintUncertainty = noiseModel::Diagonal::Sigmas(Vector_(3, 0.2, 0.2, 0.1)); SharedDiagonal constraintUncertainty = noiseModel::Diagonal::Sigmas((Vec(3) << 0.2, 0.2, 0.1));
graph += BetweenFactor<Pose2>(5, 2, Pose2(2.0, 0.0, M_PI_2), constraintUncertainty); graph += BetweenFactor<Pose2>(5, 2, Pose2(2.0, 0.0, M_PI_2), constraintUncertainty);
// 3. Create the data structure to hold the initialEstimate estimate to the solution // 3. Create the data structure to hold the initialEstimate estimate to the solution

View File

@ -64,7 +64,7 @@ TEST( Iterative, conjugateGradientDescent )
Vector b; Vector b;
Vector x0 = gtsam::zero(6); Vector x0 = gtsam::zero(6);
boost::tie(A, b) = fg.jacobian(); boost::tie(A, b) = fg.jacobian();
Vector expectedX = Vector_(6, -0.1, 0.1, -0.1, -0.1, 0.1, -0.2); Vector expectedX = (Vec(6) << -0.1, 0.1, -0.1, -0.1, 0.1, -0.2);
// Do conjugate gradient descent, System version // Do conjugate gradient descent, System version
System Ab(A, b); System Ab(A, b);
@ -105,7 +105,7 @@ TEST( Iterative, conjugateGradientDescent_hard_constraint )
VectorValues expected; VectorValues expected;
expected.insert(X(1), zero(3)); expected.insert(X(1), zero(3));
expected.insert(X(2), Vector_(3,-0.5,0.,0.)); expected.insert(X(2), (Vec(3) << -0.5,0.,0.));
CHECK(assert_equal(expected, actual)); CHECK(assert_equal(expected, actual));
} }
@ -132,7 +132,7 @@ TEST( Iterative, conjugateGradientDescent_soft_constraint )
VectorValues expected; VectorValues expected;
expected.insert(X(1), zero(3)); expected.insert(X(1), zero(3));
expected.insert(X(2), Vector_(3,-0.5,0.,0.)); expected.insert(X(2), (Vec(3) << -0.5,0.,0.));
CHECK(assert_equal(expected, actual)); CHECK(assert_equal(expected, actual));
} }

View File

@ -51,13 +51,13 @@ TEST(Marginals, planarSLAMmarginals) {
/* add prior */ /* add prior */
// gaussian for prior // gaussian for prior
SharedDiagonal priorNoise = noiseModel::Diagonal::Sigmas(Vector_(3, 0.3, 0.3, 0.1)); SharedDiagonal priorNoise = noiseModel::Diagonal::Sigmas((Vec(3) << 0.3, 0.3, 0.1));
Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin
graph += PriorFactor<Pose2>(x1, priorMean, priorNoise); // add the factor to the graph graph += PriorFactor<Pose2>(x1, priorMean, priorNoise); // add the factor to the graph
/* add odometry */ /* add odometry */
// general noisemodel for odometry // general noisemodel for odometry
SharedDiagonal odometryNoise = noiseModel::Diagonal::Sigmas(Vector_(3, 0.2, 0.2, 0.1)); SharedDiagonal odometryNoise = noiseModel::Diagonal::Sigmas((Vec(3) << 0.2, 0.2, 0.1));
Pose2 odometry(2.0, 0.0, 0.0); // create a measurement for both factors (the same in this case) Pose2 odometry(2.0, 0.0, 0.0); // create a measurement for both factors (the same in this case)
// create between factors to represent odometry // create between factors to represent odometry
graph += BetweenFactor<Pose2>(x1, x2, odometry, odometryNoise); graph += BetweenFactor<Pose2>(x1, x2, odometry, odometryNoise);
@ -65,7 +65,7 @@ TEST(Marginals, planarSLAMmarginals) {
/* add measurements */ /* add measurements */
// general noisemodel for measurements // general noisemodel for measurements
SharedDiagonal measurementNoise = noiseModel::Diagonal::Sigmas(Vector_(2, 0.1, 0.2)); SharedDiagonal measurementNoise = noiseModel::Diagonal::Sigmas((Vec(2) << 0.1, 0.2));
// create the measurement values - indices are (pose id, landmark id) // create the measurement values - indices are (pose id, landmark id)
Rot2 bearing11 = Rot2::fromDegrees(45), Rot2 bearing11 = Rot2::fromDegrees(45),

View File

@ -166,7 +166,7 @@ TEST ( NonlinearEquality, allow_error_pose ) {
// the unwhitened error should provide logmap to the feasible state // the unwhitened error should provide logmap to the feasible state
Pose2 badPoint1(0.0, 2.0, 3.0); Pose2 badPoint1(0.0, 2.0, 3.0);
Vector actVec = nle.evaluateError(badPoint1); Vector actVec = nle.evaluateError(badPoint1);
Vector expVec = Vector_(3, -0.989992, -0.14112, 0.0); Vector expVec = (Vec(3) << -0.989992, -0.14112, 0.0);
EXPECT(assert_equal(expVec, actVec, 1e-5)); EXPECT(assert_equal(expVec, actVec, 1e-5));
// the actual error should have a gain on it // the actual error should have a gain on it
@ -267,8 +267,8 @@ TEST( testNonlinearEqualityConstraint, unary_basics ) {
Point2 ptBad1(2.0, 2.0); Point2 ptBad1(2.0, 2.0);
config2.insert(key, ptBad1); config2.insert(key, ptBad1);
EXPECT(constraint.active(config2)); EXPECT(constraint.active(config2));
EXPECT(assert_equal(Vector_(2, 1.0, 0.0), constraint.evaluateError(ptBad1), tol)); EXPECT(assert_equal((Vec(2) << 1.0, 0.0), constraint.evaluateError(ptBad1), tol));
EXPECT(assert_equal(Vector_(2, 1.0, 0.0), constraint.unwhitenedError(config2), tol)); EXPECT(assert_equal((Vec(2) << 1.0, 0.0), constraint.unwhitenedError(config2), tol));
EXPECT_DOUBLES_EQUAL(500.0, constraint.error(config2), tol); EXPECT_DOUBLES_EQUAL(500.0, constraint.error(config2), tol);
} }
@ -289,7 +289,7 @@ TEST( testNonlinearEqualityConstraint, unary_linearization ) {
Point2 ptBad(2.0, 2.0); Point2 ptBad(2.0, 2.0);
config2.insert(key, ptBad); config2.insert(key, ptBad);
GaussianFactor::shared_ptr actual2 = constraint.linearize(config2); GaussianFactor::shared_ptr actual2 = constraint.linearize(config2);
GaussianFactor::shared_ptr expected2(new JacobianFactor(key, eye(2,2), Vector_(2,-1.0,0.0), hard_model)); GaussianFactor::shared_ptr expected2(new JacobianFactor(key, eye(2,2), (Vec(2) <<-1.0,0.0), hard_model));
EXPECT(assert_equal(*expected2, *actual2, tol)); EXPECT(assert_equal(*expected2, *actual2, tol));
} }
@ -347,8 +347,8 @@ TEST( testNonlinearEqualityConstraint, odo_basics ) {
config2.insert(key1, x1bad); config2.insert(key1, x1bad);
config2.insert(key2, x2bad); config2.insert(key2, x2bad);
EXPECT(constraint.active(config2)); EXPECT(constraint.active(config2));
EXPECT(assert_equal(Vector_(2, -1.0, -1.0), constraint.evaluateError(x1bad, x2bad), tol)); EXPECT(assert_equal((Vec(2) << -1.0, -1.0), constraint.evaluateError(x1bad, x2bad), tol));
EXPECT(assert_equal(Vector_(2, -1.0, -1.0), constraint.unwhitenedError(config2), tol)); EXPECT(assert_equal((Vec(2) << -1.0, -1.0), constraint.unwhitenedError(config2), tol));
EXPECT_DOUBLES_EQUAL(1000.0, constraint.error(config2), tol); EXPECT_DOUBLES_EQUAL(1000.0, constraint.error(config2), tol);
} }
@ -376,7 +376,7 @@ TEST( testNonlinearEqualityConstraint, odo_linearization ) {
GaussianFactor::shared_ptr actual2 = constraint.linearize(config2); GaussianFactor::shared_ptr actual2 = constraint.linearize(config2);
GaussianFactor::shared_ptr expected2( GaussianFactor::shared_ptr expected2(
new JacobianFactor(key1, -eye(2,2), key2, new JacobianFactor(key1, -eye(2,2), key2,
eye(2,2), Vector_(2, 1.0, 1.0), hard_model)); eye(2,2), (Vec(2) << 1.0, 1.0), hard_model));
EXPECT(assert_equal(*expected2, *actual2, tol)); EXPECT(assert_equal(*expected2, *actual2, tol));
} }

View File

@ -197,7 +197,7 @@ TEST( NonlinearFactor, size )
/* ************************************************************************* */ /* ************************************************************************* */
TEST( NonlinearFactor, linearize_constraint1 ) TEST( NonlinearFactor, linearize_constraint1 )
{ {
Vector sigmas = Vector_(2, 0.2, 0.0); Vector sigmas = (Vec(2) << 0.2, 0.0);
SharedDiagonal constraint = noiseModel::Constrained::MixedSigmas(sigmas); SharedDiagonal constraint = noiseModel::Constrained::MixedSigmas(sigmas);
Point2 mu(1., -1.); Point2 mu(1., -1.);
@ -208,16 +208,16 @@ TEST( NonlinearFactor, linearize_constraint1 )
GaussianFactor::shared_ptr actual = f0->linearize(config); GaussianFactor::shared_ptr actual = f0->linearize(config);
// create expected // create expected
Vector b = Vector_(2, 0., -3.); Vector b = (Vec(2) << 0., -3.);
JacobianFactor expected(X(1), Matrix_(2,2, 5.0, 0.0, 0.0, 1.0), b, JacobianFactor expected(X(1), Matrix_(2,2, 5.0, 0.0, 0.0, 1.0), b,
noiseModel::Constrained::MixedSigmas(Vector_(2, 1.0, 0.0))); noiseModel::Constrained::MixedSigmas((Vec(2) << 1.0, 0.0)));
CHECK(assert_equal((const GaussianFactor&)expected, *actual)); CHECK(assert_equal((const GaussianFactor&)expected, *actual));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( NonlinearFactor, linearize_constraint2 ) TEST( NonlinearFactor, linearize_constraint2 )
{ {
Vector sigmas = Vector_(2, 0.2, 0.0); Vector sigmas = (Vec(2) << 0.2, 0.0);
SharedDiagonal constraint = noiseModel::Constrained::MixedSigmas(sigmas); SharedDiagonal constraint = noiseModel::Constrained::MixedSigmas(sigmas);
Point2 z3(1.,-1.); Point2 z3(1.,-1.);
@ -230,9 +230,9 @@ TEST( NonlinearFactor, linearize_constraint2 )
// create expected // create expected
Matrix A = Matrix_(2,2, 5.0, 0.0, 0.0, 1.0); Matrix A = Matrix_(2,2, 5.0, 0.0, 0.0, 1.0);
Vector b = Vector_(2, -15., -3.); Vector b = (Vec(2) << -15., -3.);
JacobianFactor expected(X(1), -1*A, L(1), A, b, JacobianFactor expected(X(1), -1*A, L(1), A, b,
noiseModel::Constrained::MixedSigmas(Vector_(2, 1.0, 0.0))); noiseModel::Constrained::MixedSigmas((Vec(2) << 1.0, 0.0)));
CHECK(assert_equal((const GaussianFactor&)expected, *actual)); CHECK(assert_equal((const GaussianFactor&)expected, *actual));
} }
@ -240,7 +240,7 @@ TEST( NonlinearFactor, linearize_constraint2 )
class TestFactor4 : public NoiseModelFactor4<LieVector, LieVector, LieVector, LieVector> { class TestFactor4 : public NoiseModelFactor4<LieVector, LieVector, LieVector, LieVector> {
public: public:
typedef NoiseModelFactor4<LieVector, LieVector, LieVector, LieVector> Base; typedef NoiseModelFactor4<LieVector, LieVector, LieVector, LieVector> Base;
TestFactor4() : Base(noiseModel::Diagonal::Sigmas(Vector_(1, 2.0)), X(1), X(2), X(3), X(4)) {} TestFactor4() : Base(noiseModel::Diagonal::Sigmas((Vec(1) << 2.0)), X(1), X(2), X(3), X(4)) {}
virtual Vector virtual Vector
evaluateError(const LieVector& x1, const LieVector& x2, const LieVector& x3, const LieVector& x4, evaluateError(const LieVector& x1, const LieVector& x2, const LieVector& x3, const LieVector& x4,
@ -270,7 +270,7 @@ TEST(NonlinearFactor, NoiseModelFactor4) {
tv.insert(X(2), LieVector(1, 2.0)); tv.insert(X(2), LieVector(1, 2.0));
tv.insert(X(3), LieVector(1, 3.0)); tv.insert(X(3), LieVector(1, 3.0));
tv.insert(X(4), LieVector(1, 4.0)); tv.insert(X(4), LieVector(1, 4.0));
EXPECT(assert_equal(Vector_(1, 10.0), tf.unwhitenedError(tv))); EXPECT(assert_equal((Vec(1) << 10.0), tf.unwhitenedError(tv)));
DOUBLES_EQUAL(25.0/2.0, tf.error(tv), 1e-9); DOUBLES_EQUAL(25.0/2.0, tf.error(tv), 1e-9);
JacobianFactor jf(*boost::dynamic_pointer_cast<JacobianFactor>(tf.linearize(tv))); JacobianFactor jf(*boost::dynamic_pointer_cast<JacobianFactor>(tf.linearize(tv)));
LONGS_EQUAL((long)X(1), (long)jf.keys()[0]); LONGS_EQUAL((long)X(1), (long)jf.keys()[0]);
@ -281,14 +281,14 @@ TEST(NonlinearFactor, NoiseModelFactor4) {
EXPECT(assert_equal(Matrix_(1,1, 1.0), jf.getA(jf.begin()+1))); EXPECT(assert_equal(Matrix_(1,1, 1.0), jf.getA(jf.begin()+1)));
EXPECT(assert_equal(Matrix_(1,1, 1.5), jf.getA(jf.begin()+2))); EXPECT(assert_equal(Matrix_(1,1, 1.5), jf.getA(jf.begin()+2)));
EXPECT(assert_equal(Matrix_(1,1, 2.0), jf.getA(jf.begin()+3))); EXPECT(assert_equal(Matrix_(1,1, 2.0), jf.getA(jf.begin()+3)));
EXPECT(assert_equal(Vector_(1, -5.0), jf.getb())); EXPECT(assert_equal((Vector)(Vec(1) << -5.0), jf.getb()));
} }
/* ************************************************************************* */ /* ************************************************************************* */
class TestFactor5 : public NoiseModelFactor5<LieVector, LieVector, LieVector, LieVector, LieVector> { class TestFactor5 : public NoiseModelFactor5<LieVector, LieVector, LieVector, LieVector, LieVector> {
public: public:
typedef NoiseModelFactor5<LieVector, LieVector, LieVector, LieVector, LieVector> Base; typedef NoiseModelFactor5<LieVector, LieVector, LieVector, LieVector, LieVector> Base;
TestFactor5() : Base(noiseModel::Diagonal::Sigmas(Vector_(1, 2.0)), X(1), X(2), X(3), X(4), X(5)) {} TestFactor5() : Base(noiseModel::Diagonal::Sigmas((Vec(1) << 2.0)), X(1), X(2), X(3), X(4), X(5)) {}
virtual Vector virtual Vector
evaluateError(const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, evaluateError(const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5,
@ -317,7 +317,7 @@ TEST(NonlinearFactor, NoiseModelFactor5) {
tv.insert(X(3), LieVector(1, 3.0)); tv.insert(X(3), LieVector(1, 3.0));
tv.insert(X(4), LieVector(1, 4.0)); tv.insert(X(4), LieVector(1, 4.0));
tv.insert(X(5), LieVector(1, 5.0)); tv.insert(X(5), LieVector(1, 5.0));
EXPECT(assert_equal(Vector_(1, 15.0), tf.unwhitenedError(tv))); EXPECT(assert_equal((Vec(1) << 15.0), tf.unwhitenedError(tv)));
DOUBLES_EQUAL(56.25/2.0, tf.error(tv), 1e-9); DOUBLES_EQUAL(56.25/2.0, tf.error(tv), 1e-9);
JacobianFactor jf(*boost::dynamic_pointer_cast<JacobianFactor>(tf.linearize(tv))); JacobianFactor jf(*boost::dynamic_pointer_cast<JacobianFactor>(tf.linearize(tv)));
LONGS_EQUAL((long)X(1), (long)jf.keys()[0]); LONGS_EQUAL((long)X(1), (long)jf.keys()[0]);
@ -330,14 +330,14 @@ TEST(NonlinearFactor, NoiseModelFactor5) {
EXPECT(assert_equal(Matrix_(1,1, 1.5), jf.getA(jf.begin()+2))); EXPECT(assert_equal(Matrix_(1,1, 1.5), jf.getA(jf.begin()+2)));
EXPECT(assert_equal(Matrix_(1,1, 2.0), jf.getA(jf.begin()+3))); EXPECT(assert_equal(Matrix_(1,1, 2.0), jf.getA(jf.begin()+3)));
EXPECT(assert_equal(Matrix_(1,1, 2.5), jf.getA(jf.begin()+4))); EXPECT(assert_equal(Matrix_(1,1, 2.5), jf.getA(jf.begin()+4)));
EXPECT(assert_equal(Vector_(1, -7.5), jf.getb())); EXPECT(assert_equal((Vector)(Vec(1) << -7.5), jf.getb()));
} }
/* ************************************************************************* */ /* ************************************************************************* */
class TestFactor6 : public NoiseModelFactor6<LieVector, LieVector, LieVector, LieVector, LieVector, LieVector> { class TestFactor6 : public NoiseModelFactor6<LieVector, LieVector, LieVector, LieVector, LieVector, LieVector> {
public: public:
typedef NoiseModelFactor6<LieVector, LieVector, LieVector, LieVector, LieVector, LieVector> Base; typedef NoiseModelFactor6<LieVector, LieVector, LieVector, LieVector, LieVector, LieVector> Base;
TestFactor6() : Base(noiseModel::Diagonal::Sigmas(Vector_(1, 2.0)), X(1), X(2), X(3), X(4), X(5), X(6)) {} TestFactor6() : Base(noiseModel::Diagonal::Sigmas((Vec(1) << 2.0)), X(1), X(2), X(3), X(4), X(5), X(6)) {}
virtual Vector virtual Vector
evaluateError(const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, evaluateError(const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6,
@ -370,7 +370,7 @@ TEST(NonlinearFactor, NoiseModelFactor6) {
tv.insert(X(4), LieVector(1, 4.0)); tv.insert(X(4), LieVector(1, 4.0));
tv.insert(X(5), LieVector(1, 5.0)); tv.insert(X(5), LieVector(1, 5.0));
tv.insert(X(6), LieVector(1, 6.0)); tv.insert(X(6), LieVector(1, 6.0));
EXPECT(assert_equal(Vector_(1, 21.0), tf.unwhitenedError(tv))); EXPECT(assert_equal((Vec(1) << 21.0), tf.unwhitenedError(tv)));
DOUBLES_EQUAL(110.25/2.0, tf.error(tv), 1e-9); DOUBLES_EQUAL(110.25/2.0, tf.error(tv), 1e-9);
JacobianFactor jf(*boost::dynamic_pointer_cast<JacobianFactor>(tf.linearize(tv))); JacobianFactor jf(*boost::dynamic_pointer_cast<JacobianFactor>(tf.linearize(tv)));
LONGS_EQUAL((long)X(1), (long)jf.keys()[0]); LONGS_EQUAL((long)X(1), (long)jf.keys()[0]);
@ -385,7 +385,7 @@ TEST(NonlinearFactor, NoiseModelFactor6) {
EXPECT(assert_equal(Matrix_(1,1, 2.0), jf.getA(jf.begin()+3))); EXPECT(assert_equal(Matrix_(1,1, 2.0), jf.getA(jf.begin()+3)));
EXPECT(assert_equal(Matrix_(1,1, 2.5), jf.getA(jf.begin()+4))); EXPECT(assert_equal(Matrix_(1,1, 2.5), jf.getA(jf.begin()+4)));
EXPECT(assert_equal(Matrix_(1,1, 3.0), jf.getA(jf.begin()+5))); EXPECT(assert_equal(Matrix_(1,1, 3.0), jf.getA(jf.begin()+5)));
EXPECT(assert_equal(Vector_(1, -10.5), jf.getb())); EXPECT(assert_equal((Vector)(Vec(1) << -10.5), jf.getb()));
} }

View File

@ -27,7 +27,7 @@ TEST(testNonlinearISAM, markov_chain ) {
NonlinearISAM isamChol(reorder_interval, EliminatePreferCholesky); // create an ISAM object NonlinearISAM isamChol(reorder_interval, EliminatePreferCholesky); // create an ISAM object
NonlinearISAM isamQR(reorder_interval, EliminateQR); // create an ISAM object NonlinearISAM isamQR(reorder_interval, EliminateQR); // create an ISAM object
SharedDiagonal model = noiseModel::Diagonal::Sigmas(Vector_(3, 3.0, 3.0, 0.5)); SharedDiagonal model = noiseModel::Diagonal::Sigmas((Vec(3) << 3.0, 3.0, 0.5));
Sampler sampler(model, 42u); Sampler sampler(model, 42u);
// create initial graph // create initial graph
@ -74,8 +74,8 @@ TEST(testNonlinearISAM, markov_chain_with_disconnects ) {
NonlinearISAM isamChol(reorder_interval, EliminatePreferCholesky); // create an ISAM object NonlinearISAM isamChol(reorder_interval, EliminatePreferCholesky); // create an ISAM object
NonlinearISAM isamQR(reorder_interval, EliminateQR); // create an ISAM object NonlinearISAM isamQR(reorder_interval, EliminateQR); // create an ISAM object
SharedDiagonal model3 = noiseModel::Diagonal::Sigmas(Vector_(3, 3.0, 3.0, 0.5)); SharedDiagonal model3 = noiseModel::Diagonal::Sigmas((Vec(3) << 3.0, 3.0, 0.5));
SharedDiagonal model2 = noiseModel::Diagonal::Sigmas(Vector_(2, 2.0, 2.0)); SharedDiagonal model2 = noiseModel::Diagonal::Sigmas((Vec(2) << 2.0, 2.0));
Sampler sampler(model3, 42u); Sampler sampler(model3, 42u);
// create initial graph // create initial graph
@ -151,8 +151,8 @@ TEST(testNonlinearISAM, markov_chain_with_reconnect ) {
NonlinearISAM isamChol(reorder_interval, EliminatePreferCholesky); // create an ISAM object NonlinearISAM isamChol(reorder_interval, EliminatePreferCholesky); // create an ISAM object
NonlinearISAM isamQR(reorder_interval, EliminateQR); // create an ISAM object NonlinearISAM isamQR(reorder_interval, EliminateQR); // create an ISAM object
SharedDiagonal model3 = noiseModel::Diagonal::Sigmas(Vector_(3, 3.0, 3.0, 0.5)); SharedDiagonal model3 = noiseModel::Diagonal::Sigmas((Vec(3) << 3.0, 3.0, 0.5));
SharedDiagonal model2 = noiseModel::Diagonal::Sigmas(Vector_(2, 2.0, 2.0)); SharedDiagonal model2 = noiseModel::Diagonal::Sigmas((Vec(2) << 2.0, 2.0));
Sampler sampler(model3, 42u); Sampler sampler(model3, 42u);
// create initial graph // create initial graph

View File

@ -131,12 +131,12 @@ TEST( SubgraphPreconditioner, system )
// y1 = perturbed y0 // y1 = perturbed y0
VectorValues y1 = zeros; VectorValues y1 = zeros;
y1[1] = Vector_(2, 1.0, -1.0); y1[1] = (Vec(2) << 1.0, -1.0);
// Check corresponding x values // Check corresponding x values
VectorValues expected_x1 = xtrue, x1 = system.x(y1); VectorValues expected_x1 = xtrue, x1 = system.x(y1);
expected_x1[1] = Vector_(2, 2.01, 2.99); expected_x1[1] = (Vec(2) << 2.01, 2.99);
expected_x1[0] = Vector_(2, 3.01, 2.99); expected_x1[0] = (Vec(2) << 3.01, 2.99);
CHECK(assert_equal(xtrue, system.x(y0))); CHECK(assert_equal(xtrue, system.x(y0)));
CHECK(assert_equal(expected_x1,system.x(y1))); CHECK(assert_equal(expected_x1,system.x(y1)));
@ -150,28 +150,28 @@ TEST( SubgraphPreconditioner, system )
VectorValues expected_gx0 = zeros; VectorValues expected_gx0 = zeros;
VectorValues expected_gx1 = zeros; VectorValues expected_gx1 = zeros;
CHECK(assert_equal(expected_gx0,gradient(Ab,xtrue))); CHECK(assert_equal(expected_gx0,gradient(Ab,xtrue)));
expected_gx1[2] = Vector_(2, -100., 100.); expected_gx1[2] = (Vec(2) << -100., 100.);
expected_gx1[4] = Vector_(2, -100., 100.); expected_gx1[4] = (Vec(2) << -100., 100.);
expected_gx1[1] = Vector_(2, 200., -200.); expected_gx1[1] = (Vec(2) << 200., -200.);
expected_gx1[3] = Vector_(2, -100., 100.); expected_gx1[3] = (Vec(2) << -100., 100.);
expected_gx1[0] = Vector_(2, 100., -100.); expected_gx1[0] = (Vec(2) << 100., -100.);
CHECK(assert_equal(expected_gx1,gradient(Ab,x1))); CHECK(assert_equal(expected_gx1,gradient(Ab,x1)));
// Test gradient in y // Test gradient in y
VectorValues expected_gy0 = zeros; VectorValues expected_gy0 = zeros;
VectorValues expected_gy1 = zeros; VectorValues expected_gy1 = zeros;
expected_gy1[2] = Vector_(2, 2., -2.); expected_gy1[2] = (Vec(2) << 2., -2.);
expected_gy1[4] = Vector_(2, -2., 2.); expected_gy1[4] = (Vec(2) << -2., 2.);
expected_gy1[1] = Vector_(2, 3., -3.); expected_gy1[1] = (Vec(2) << 3., -3.);
expected_gy1[3] = Vector_(2, -1., 1.); expected_gy1[3] = (Vec(2) << -1., 1.);
expected_gy1[0] = Vector_(2, 1., -1.); expected_gy1[0] = (Vec(2) << 1., -1.);
CHECK(assert_equal(expected_gy0,gradient(system,y0))); CHECK(assert_equal(expected_gy0,gradient(system,y0)));
CHECK(assert_equal(expected_gy1,gradient(system,y1))); CHECK(assert_equal(expected_gy1,gradient(system,y1)));
// Check it numerically for good measure // Check it numerically for good measure
// TODO use boost::bind(&SubgraphPreconditioner::error,&system,_1) // TODO use boost::bind(&SubgraphPreconditioner::error,&system,_1)
// Vector numerical_g1 = numericalGradient<VectorValues> (error, y1, 0.001); // Vector numerical_g1 = numericalGradient<VectorValues> (error, y1, 0.001);
// Vector expected_g1 = Vector_(18, 0., 0., 0., 0., 2., -2., 0., 0., -2., 2., // Vector expected_g1 = (Vec(18) << 0., 0., 0., 0., 2., -2., 0., 0., -2., 2.,
// 3., -3., 0., 0., -1., 1., 1., -1.); // 3., -3., 0., 0., -1., 1., 1., -1.);
// CHECK(assert_equal(expected_g1,numerical_g1)); // CHECK(assert_equal(expected_g1,numerical_g1));
} }
@ -204,7 +204,7 @@ TEST( SubgraphPreconditioner, conjugateGradients )
VectorValues y0 = VectorValues::Zero(xbar); VectorValues y0 = VectorValues::Zero(xbar);
VectorValues y1 = y0; VectorValues y1 = y0;
y1[1] = Vector_(2, 1.0, -1.0); y1[1] = (Vec(2) << 1.0, -1.0);
VectorValues x1 = system.x(y1); VectorValues x1 = system.x(y1);
// Solve for the remaining constraints using PCG // Solve for the remaining constraints using PCG