diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index 49ad4b758..d3da5d270 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -314,8 +314,8 @@ TEST(NonlinearOptimizer, MoreOptimization) { EXPECT(assert_equal(expectedGradient,actualGradient)); // Check errors at convergence and errors in direction of gradient (decreases!) - EXPECT_DOUBLES_EQUAL(46.0254859,fg.error(actual),1e-5); - EXPECT_DOUBLES_EQUAL(44.7490532,fg.error(actual.retract(-0.01*actualGradient)),1e-5); + EXPECT_DOUBLES_EQUAL(46.02558,fg.error(actual),1e-5); + EXPECT_DOUBLES_EQUAL(44.742237,fg.error(actual.retract(-0.01*actualGradient)),1e-5); // Check that solve yields gradient (it's not equal to gradient, as predicted) VectorValues delta = damped.optimize(); @@ -324,7 +324,7 @@ TEST(NonlinearOptimizer, MoreOptimization) { EXPECT(assert_equal(actualGradient,factor*delta)); // Still pointing downhill wrt actual gradient ! - EXPECT_DOUBLES_EQUAL( 0.0105584,dot(-1*actualGradient,delta),1e-5); + EXPECT_DOUBLES_EQUAL( 0.1056157,dot(-1*actualGradient,delta),1e-3); // delta.print("This is the delta value computed by LM with diagonal damping"); @@ -335,7 +335,7 @@ TEST(NonlinearOptimizer, MoreOptimization) { EXPECT_DOUBLES_EQUAL(46.0254859,fg.error(actual.retract(delta)),1e-5); // Check errors at convergence and errors at a small step in direction of solution (does not decrease!) - EXPECT_DOUBLES_EQUAL(46.02549021,fg.error(actual.retract(0.01*delta)),1e-5); + EXPECT_DOUBLES_EQUAL(46.0255,fg.error(actual.retract(0.01*delta)),1e-3); } }