Plot thickens: delta computed from damped system points downhill but non-linear error does not decrease
parent
33ae83fcb2
commit
541dbd5199
|
@ -264,8 +264,8 @@ TEST(NonlinearOptimizer, MoreOptimization) {
|
|||
// params.setVerbosityLM("TRYDELTA");
|
||||
// params.setVerbosity("TERMINATION");
|
||||
params.setlambdaUpperBound(1e9);
|
||||
params.setRelativeErrorTol(0);
|
||||
params.setAbsoluteErrorTol(0);
|
||||
// params.setRelativeErrorTol(0);
|
||||
// params.setAbsoluteErrorTol(0);
|
||||
//params.setlambdaInitial(10);
|
||||
|
||||
{
|
||||
|
@ -281,7 +281,7 @@ TEST(NonlinearOptimizer, MoreOptimization) {
|
|||
}
|
||||
EXPECT(assert_equal(expected, DoglegOptimizer(fg, init).optimize()));
|
||||
|
||||
cout << "===================================================================================" << endl;
|
||||
// cout << "===================================================================================" << endl;
|
||||
|
||||
// Try LM with diagonal damping
|
||||
Values initBetter = init;
|
||||
|
@ -300,13 +300,34 @@ TEST(NonlinearOptimizer, MoreOptimization) {
|
|||
expectedDiagonal = d + params.lambdaInitial * d;
|
||||
EXPECT(assert_equal(expectedDiagonal, damped.hessianDiagonal()));
|
||||
|
||||
// test convergence
|
||||
// test convergence (does not!)
|
||||
Values actual = optimizer.optimize();
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
|
||||
// Check that the gradient is zero
|
||||
// Check that the gradient is zero (it is not!)
|
||||
linear = optimizer.linearize();
|
||||
EXPECT(assert_equal(expectedGradient,linear->gradientAtZero()));
|
||||
|
||||
// Check that the gradient is zero for damped system (it is not!)
|
||||
damped = optimizer.buildDampedSystem(*linear);
|
||||
VectorValues actualGradient = damped.gradientAtZero();
|
||||
EXPECT(assert_equal(expectedGradient,actualGradient));
|
||||
|
||||
// Check errors at convergence and errors in direction of gradient (decreases!)
|
||||
EXPECT_DOUBLES_EQUAL(46.0254859,fg.error(actual),1e-5);
|
||||
EXPECT_DOUBLES_EQUAL(44.7490532,fg.error(actual.retract(-0.01*actualGradient)),1e-5);
|
||||
|
||||
// Check that solve yields gradient (it's not equal to gradient, as predicted)
|
||||
VectorValues delta = damped.optimize();
|
||||
// cout << damped.augmentedHessian() << endl;
|
||||
double factor = actualGradient[0][0]/delta[0][0];
|
||||
EXPECT(assert_equal(actualGradient,factor*delta));
|
||||
|
||||
// Still pointing downhill !
|
||||
EXPECT_DOUBLES_EQUAL( 0.0105584,dot(-1*actualGradient,delta),1e-5);
|
||||
|
||||
// Check errors at convergence and errors in direction of solution (does not decreaase!)
|
||||
EXPECT_DOUBLES_EQUAL(46.0254859,fg.error(actual.retract(delta)),1e-5);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue