improved efficiency of diagonal damping (avoiding multiple traversal of binary tree)

release/4.3a0
Luca 2014-03-13 22:43:51 -04:00
parent afb5bac2f7
commit a6d73e8884
2 changed files with 156 additions and 110 deletions

View File

@ -31,37 +31,61 @@
using namespace std; using namespace std;
namespace gtsam { namespace gtsam {
using boost::adaptors::map_values; using boost::adaptors::map_values;
/* ************************************************************************* */ /* ************************************************************************* */
LevenbergMarquardtParams::VerbosityLM LevenbergMarquardtParams::verbosityLMTranslator(const std::string &src) const { LevenbergMarquardtParams::VerbosityLM LevenbergMarquardtParams::verbosityLMTranslator(
std::string s = src; boost::algorithm::to_upper(s); const std::string &src) const {
if (s == "SILENT") return LevenbergMarquardtParams::SILENT; std::string s = src;
if (s == "LAMBDA") return LevenbergMarquardtParams::LAMBDA; boost::algorithm::to_upper(s);
if (s == "TRYLAMBDA") return LevenbergMarquardtParams::TRYLAMBDA; if (s == "SILENT")
if (s == "TRYCONFIG") return LevenbergMarquardtParams::TRYCONFIG; return LevenbergMarquardtParams::SILENT;
if (s == "TRYDELTA") return LevenbergMarquardtParams::TRYDELTA; if (s == "LAMBDA")
if (s == "DAMPED") return LevenbergMarquardtParams::DAMPED; return LevenbergMarquardtParams::LAMBDA;
if (s == "TRYLAMBDA")
return LevenbergMarquardtParams::TRYLAMBDA;
if (s == "TRYCONFIG")
return LevenbergMarquardtParams::TRYCONFIG;
if (s == "TRYDELTA")
return LevenbergMarquardtParams::TRYDELTA;
if (s == "DAMPED")
return LevenbergMarquardtParams::DAMPED;
/* default is silent */ /* default is silent */
return LevenbergMarquardtParams::SILENT; return LevenbergMarquardtParams::SILENT;
} }
/* ************************************************************************* */ /* ************************************************************************* */
std::string LevenbergMarquardtParams::verbosityLMTranslator(VerbosityLM value) const { std::string LevenbergMarquardtParams::verbosityLMTranslator(
VerbosityLM value) const {
std::string s; std::string s;
switch (value) { switch (value) {
case LevenbergMarquardtParams::SILENT: s = "SILENT" ; break; case LevenbergMarquardtParams::SILENT:
case LevenbergMarquardtParams::TERMINATION: s = "TERMINATION" ; break; s = "SILENT";
case LevenbergMarquardtParams::LAMBDA: s = "LAMBDA" ; break; break;
case LevenbergMarquardtParams::TRYLAMBDA: s = "TRYLAMBDA" ; break; case LevenbergMarquardtParams::TERMINATION:
case LevenbergMarquardtParams::TRYCONFIG: s = "TRYCONFIG" ; break; s = "TERMINATION";
case LevenbergMarquardtParams::TRYDELTA: s = "TRYDELTA" ; break; break;
case LevenbergMarquardtParams::DAMPED: s = "DAMPED" ; break; case LevenbergMarquardtParams::LAMBDA:
default: s = "UNDEFINED" ; break; s = "LAMBDA";
break;
case LevenbergMarquardtParams::TRYLAMBDA:
s = "TRYLAMBDA";
break;
case LevenbergMarquardtParams::TRYCONFIG:
s = "TRYCONFIG";
break;
case LevenbergMarquardtParams::TRYDELTA:
s = "TRYDELTA";
break;
case LevenbergMarquardtParams::DAMPED:
s = "DAMPED";
break;
default:
s = "UNDEFINED";
break;
} }
return s; return s;
} }
@ -73,10 +97,12 @@ void LevenbergMarquardtParams::print(const std::string& str) const {
std::cout << " lambdaFactor: " << lambdaFactor << "\n"; std::cout << " lambdaFactor: " << lambdaFactor << "\n";
std::cout << " lambdaUpperBound: " << lambdaUpperBound << "\n"; std::cout << " lambdaUpperBound: " << lambdaUpperBound << "\n";
std::cout << " lambdaLowerBound: " << lambdaLowerBound << "\n"; std::cout << " lambdaLowerBound: " << lambdaLowerBound << "\n";
std::cout << " disableInnerIterations: " << disableInnerIterations << "\n"; std::cout << " disableInnerIterations: " << disableInnerIterations
<< "\n";
std::cout << " minModelFidelity: " << minModelFidelity << "\n"; std::cout << " minModelFidelity: " << minModelFidelity << "\n";
std::cout << " diagonalDamping: " << diagonalDamping << "\n"; std::cout << " diagonalDamping: " << diagonalDamping << "\n";
std::cout << " verbosityLM: " << verbosityLMTranslator(verbosityLM) << "\n"; std::cout << " verbosityLM: "
<< verbosityLMTranslator(verbosityLM) << "\n";
std::cout.flush(); std::cout.flush();
} }
@ -86,10 +112,10 @@ GaussianFactorGraph::shared_ptr LevenbergMarquardtOptimizer::linearize() const {
} }
/* ************************************************************************* */ /* ************************************************************************* */
void LevenbergMarquardtOptimizer::increaseLambda(){ void LevenbergMarquardtOptimizer::increaseLambda() {
if(params_.useFixedLambdaFactor_){ if (params_.useFixedLambdaFactor_) {
state_.lambda *= params_.lambdaFactor; state_.lambda *= params_.lambdaFactor;
}else{ } else {
state_.lambda *= params_.lambdaFactor; state_.lambda *= params_.lambdaFactor;
params_.lambdaFactor *= 2.0; params_.lambdaFactor *= 2.0;
} }
@ -97,11 +123,11 @@ void LevenbergMarquardtOptimizer::increaseLambda(){
} }
/* ************************************************************************* */ /* ************************************************************************* */
void LevenbergMarquardtOptimizer::decreaseLambda(double stepQuality){ void LevenbergMarquardtOptimizer::decreaseLambda(double stepQuality) {
if(params_.useFixedLambdaFactor_){ if (params_.useFixedLambdaFactor_) {
state_.lambda /= params_.lambdaFactor; state_.lambda /= params_.lambdaFactor;
}else{ } else {
// CHECK_GT(step_quality, 0.0); // CHECK_GT(step_quality, 0.0);
state_.lambda *= std::max(1.0 / 3.0, 1.0 - pow(2.0 * stepQuality - 1.0, 3)); state_.lambda *= std::max(1.0 / 3.0, 1.0 - pow(2.0 * stepQuality - 1.0, 3));
params_.lambdaFactor = 2.0; params_.lambdaFactor = 2.0;
@ -138,21 +164,29 @@ GaussianFactorGraph LevenbergMarquardtOptimizer::buildDampedSystem(
double sigma = 1.0 / std::sqrt(state_.lambda); double sigma = 1.0 / std::sqrt(state_.lambda);
GaussianFactorGraph damped = linear; GaussianFactorGraph damped = linear;
damped.reserve(damped.size() + state_.values.size()); damped.reserve(damped.size() + state_.values.size());
BOOST_FOREACH(const Values::KeyValuePair& key_value, state_.values) { if (params_.diagonalDamping) {
size_t dim = key_value.value.dim(); BOOST_FOREACH(const VectorValues::KeyValuePair& key_vector, state_.hessianDiagonal) {
Matrix A = Matrix::Identity(dim, dim); // Fill in the diagonal of A with diag(hessian)
//Replace the identity matrix with diagonal of Hessian
if (params_.diagonalDamping){
try { try {
A.diagonal() = state_.hessianDiagonal.at(key_value.key); Matrix A = Eigen::DiagonalMatrix<double, Eigen::Dynamic>(state_.hessianDiagonal.at(key_vector.first));
size_t dim = key_vector.second.size();
Vector b = Vector::Zero(dim);
SharedDiagonal model = noiseModel::Isotropic::Sigma(dim, sigma);
damped += boost::make_shared<JacobianFactor>(key_vector.first, A, b, model);
} catch (std::exception e) { } catch (std::exception e) {
// Don't attempt any damping if no key found in diagonal // Don't attempt any damping if no key found in diagonal
continue; continue;
} }
} }
Vector b = Vector::Zero(dim); } else {
SharedDiagonal model = noiseModel::Isotropic::Sigma(dim, sigma); // Straightforward damping:
damped += boost::make_shared<JacobianFactor>(key_value.key, A, b, model); BOOST_FOREACH(const Values::KeyValuePair& key_value, state_.values) {
size_t dim = key_value.value.dim();
Matrix A = Matrix::Identity(dim, dim);
Vector b = Vector::Zero(dim);
SharedDiagonal model = noiseModel::Isotropic::Sigma(dim, sigma);
damped += boost::make_shared<JacobianFactor>(key_value.key, A, b, model);
}
} }
gttoc(damp); gttoc(damp);
return damped; return damped;
@ -161,20 +195,22 @@ GaussianFactorGraph LevenbergMarquardtOptimizer::buildDampedSystem(
/* ************************************************************************* */ /* ************************************************************************* */
void LevenbergMarquardtOptimizer::iterate() { void LevenbergMarquardtOptimizer::iterate() {
gttic (LM_iterate); gttic(LM_iterate);
// Pull out parameters we'll use // Pull out parameters we'll use
const NonlinearOptimizerParams::Verbosity nloVerbosity = params_.verbosity; const NonlinearOptimizerParams::Verbosity nloVerbosity = params_.verbosity;
const LevenbergMarquardtParams::VerbosityLM lmVerbosity = params_.verbosityLM; const LevenbergMarquardtParams::VerbosityLM lmVerbosity = params_.verbosityLM;
// Linearize graph // Linearize graph
if(lmVerbosity >= LevenbergMarquardtParams::DAMPED) cout << "linearizing = " << endl; if (lmVerbosity >= LevenbergMarquardtParams::DAMPED)
cout << "linearizing = " << endl;
GaussianFactorGraph::shared_ptr linear = linearize(); GaussianFactorGraph::shared_ptr linear = linearize();
// Keep increasing lambda until we make make progress // Keep increasing lambda until we make make progress
while (true) { while (true) {
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "trying lambda = " << state_.lambda << endl; if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA)
cout << "trying lambda = " << state_.lambda << endl;
// Build damped system for this lambda (adds prior factors that make it like gradient descent) // Build damped system for this lambda (adds prior factors that make it like gradient descent)
GaussianFactorGraph dampedSystem = buildDampedSystem(*linear); GaussianFactorGraph dampedSystem = buildDampedSystem(*linear);
@ -183,9 +219,11 @@ void LevenbergMarquardtOptimizer::iterate() {
if (!params_.logFile.empty()) { if (!params_.logFile.empty()) {
ofstream os(params_.logFile.c_str(), ios::app); ofstream os(params_.logFile.c_str(), ios::app);
boost::posix_time::ptime currentTime = boost::posix_time::microsec_clock::universal_time(); boost::posix_time::ptime currentTime =
boost::posix_time::microsec_clock::universal_time();
os << state_.totalNumberInnerIterations << "," << 1e-6 * (currentTime - state_.startTime).total_microseconds() << "," os << state_.totalNumberInnerIterations << ","
<< 1e-6 * (currentTime - state_.startTime).total_microseconds() << ","
<< state_.error << "," << state_.lambda << endl; << state_.error << "," << state_.lambda << endl;
} }
@ -203,44 +241,48 @@ void LevenbergMarquardtOptimizer::iterate() {
try { try {
delta = solve(dampedSystem, state_.values, params_); delta = solve(dampedSystem, state_.values, params_);
systemSolvedSuccessfully = true; systemSolvedSuccessfully = true;
} catch(IndeterminantLinearSystemException& e) { } catch (IndeterminantLinearSystemException& e) {
systemSolvedSuccessfully = false; systemSolvedSuccessfully = false;
} }
if(systemSolvedSuccessfully) { if (systemSolvedSuccessfully) {
params_.reuse_diagonal_ = true; params_.reuse_diagonal_ = true;
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "linear delta norm = " << delta.norm() << endl; if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA)
if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA) delta.print("delta"); cout << "linear delta norm = " << delta.norm() << endl;
if (lmVerbosity >= LevenbergMarquardtParams::TRYDELTA)
delta.print("delta");
// cost change in the linearized system (old - new) // cost change in the linearized system (old - new)
double newlinearizedError = linear->error(delta); double newlinearizedError = linear->error(delta);
double linearizedCostChange = state_.error - newlinearizedError; double linearizedCostChange = state_.error - newlinearizedError;
if(linearizedCostChange >= 0){ // step is valid if (linearizedCostChange >= 0) { // step is valid
// update values // update values
gttic (retract); gttic(retract);
newValues = state_.values.retract(delta); newValues = state_.values.retract(delta);
gttoc(retract); gttoc(retract);
// compute new error // compute new error
gttic (compute_error); gttic(compute_error);
if(lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) cout << "calculating error" << endl; if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA)
cout << "calculating error" << endl;
newError = graph_.error(newValues); newError = graph_.error(newValues);
gttoc(compute_error); gttoc(compute_error);
// cost change in the original, nonlinear system (old - new) // cost change in the original, nonlinear system (old - new)
double costChange = state_.error - newError; double costChange = state_.error - newError;
double absolute_function_tolerance = params_.relativeErrorTol * state_.error; double absolute_function_tolerance = params_.relativeErrorTol
* state_.error;
if (fabs(costChange) >= absolute_function_tolerance) { if (fabs(costChange) >= absolute_function_tolerance) {
// fidelity of linearized model VS original system between // fidelity of linearized model VS original system between
if(linearizedCostChange > 1e-15){ if (linearizedCostChange > 1e-15) {
modelFidelity = costChange / linearizedCostChange; modelFidelity = costChange / linearizedCostChange;
// if we decrease the error in the nonlinear system and modelFidelity is above threshold // if we decrease the error in the nonlinear system and modelFidelity is above threshold
step_is_successful = modelFidelity > params_.minModelFidelity; step_is_successful = modelFidelity > params_.minModelFidelity;
}else{ } else {
step_is_successful = true; // linearizedCostChange close to zero step_is_successful = true; // linearizedCostChange close to zero
} }
} else { } else {
@ -249,20 +291,23 @@ void LevenbergMarquardtOptimizer::iterate() {
} }
} }
if(step_is_successful){ // we have successfully decreased the cost and we have good modelFidelity if (step_is_successful) { // we have successfully decreased the cost and we have good modelFidelity
state_.values.swap(newValues); state_.values.swap(newValues);
state_.error = newError; state_.error = newError;
decreaseLambda(modelFidelity); decreaseLambda(modelFidelity);
break; break;
}else if (!stopSearchingLambda){ // we failed to solved the system or we had no decrease in cost } else if (!stopSearchingLambda) { // we failed to solved the system or we had no decrease in cost
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA) if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA)
cout << "increasing lambda: old error (" << state_.error << ") new error (" << newError << ")" << endl; cout << "increasing lambda: old error (" << state_.error
<< ") new error (" << newError << ")" << endl;
increaseLambda(); increaseLambda();
// check if lambda is too big // check if lambda is too big
if(state_.lambda >= params_.lambdaUpperBound) { if (state_.lambda >= params_.lambdaUpperBound) {
if(nloVerbosity >= NonlinearOptimizerParams::TERMINATION) if (nloVerbosity >= NonlinearOptimizerParams::TERMINATION)
cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl; cout
<< "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda"
<< endl;
break; break;
} }
} else { // the change in the cost is very small and it is not worth trying bigger lambdas } else { // the change in the cost is very small and it is not worth trying bigger lambdas
@ -276,9 +321,8 @@ void LevenbergMarquardtOptimizer::iterate() {
/* ************************************************************************* */ /* ************************************************************************* */
LevenbergMarquardtParams LevenbergMarquardtOptimizer::ensureHasOrdering( LevenbergMarquardtParams LevenbergMarquardtOptimizer::ensureHasOrdering(
LevenbergMarquardtParams params, const NonlinearFactorGraph& graph) const LevenbergMarquardtParams params, const NonlinearFactorGraph& graph) const {
{ if (!params.ordering)
if(!params.ordering)
params.ordering = Ordering::COLAMD(graph); params.ordering = Ordering::COLAMD(graph);
return params; return params;
} }

View File

@ -234,7 +234,7 @@ TEST(NonlinearOptimizer, NullFactor) {
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST(NonlinearOptimizer, MoreOptimization) { TEST_UNSAFE(NonlinearOptimizer, MoreOptimization) {
NonlinearFactorGraph fg; NonlinearFactorGraph fg;
fg += PriorFactor<Pose2>(0, Pose2(0, 0, 0), fg += PriorFactor<Pose2>(0, Pose2(0, 0, 0),
@ -284,57 +284,59 @@ TEST(NonlinearOptimizer, MoreOptimization) {
// cout << "===================================================================================" << endl; // cout << "===================================================================================" << endl;
// Try LM with diagonal damping // Try LM with diagonal damping
Values initBetter = init; Values initBetter;
// initBetter.insert(0, Pose2(3,4,0)); initBetter.insert(0, Pose2(3,4,0));
// initBetter.insert(1, Pose2(10,2,M_PI/3)); initBetter.insert(1, Pose2(10,2,M_PI/3));
// initBetter.insert(2, Pose2(11,7,M_PI/2)); initBetter.insert(2, Pose2(11,7,M_PI/2));
{ {
// params.setDiagonalDamping(true); params.setDiagonalDamping(true);
// LevenbergMarquardtOptimizer optimizer(fg, initBetter, params); LevenbergMarquardtOptimizer optimizer(fg, initBetter, params);
//
// // test the diagonal // test the diagonal
// GaussianFactorGraph::shared_ptr linear = optimizer.linearize(); GaussianFactorGraph::shared_ptr linear = optimizer.linearize();
// GaussianFactorGraph damped = optimizer.buildDampedSystem(*linear); GaussianFactorGraph damped = optimizer.buildDampedSystem(*linear);
// VectorValues d = linear->hessianDiagonal(), // VectorValues d = linear->hessianDiagonal(), //
// expectedDiagonal = d + params.lambdaInitial * d; expectedDiagonal = d + params.lambdaInitial * d;
// EXPECT(assert_equal(expectedDiagonal, damped.hessianDiagonal())); EXPECT(assert_equal(expectedDiagonal, damped.hessianDiagonal()));
//
// // test convergence (does not!) // test convergence (does not!)
// Values actual = optimizer.optimize(); Values actual = optimizer.optimize();
// EXPECT(assert_equal(expected, actual)); EXPECT(assert_equal(expected, actual));
//
// // Check that the gradient is zero (it is not!) // Check that the gradient is zero (it is not!)
// linear = optimizer.linearize(); linear = optimizer.linearize();
// EXPECT(assert_equal(expectedGradient,linear->gradientAtZero())); EXPECT(assert_equal(expectedGradient,linear->gradientAtZero()));
//
// // Check that the gradient is zero for damped system (it is not!) // Check that the gradient is zero for damped system (it is not!)
// damped = optimizer.buildDampedSystem(*linear); damped = optimizer.buildDampedSystem(*linear);
// VectorValues actualGradient = damped.gradientAtZero(); VectorValues actualGradient = damped.gradientAtZero();
// EXPECT(assert_equal(expectedGradient,actualGradient)); EXPECT(assert_equal(expectedGradient,actualGradient));
//
// // Check errors at convergence and errors in direction of gradient (decreases!) /* This block was made to test the original initial guess "init"
// EXPECT_DOUBLES_EQUAL(46.02558,fg.error(actual),1e-5); // Check errors at convergence and errors in direction of gradient (decreases!)
// EXPECT_DOUBLES_EQUAL(44.742237,fg.error(actual.retract(-0.01*actualGradient)),1e-5); EXPECT_DOUBLES_EQUAL(46.02558,fg.error(actual),1e-5);
// EXPECT_DOUBLES_EQUAL(44.742237,fg.error(actual.retract(-0.01*actualGradient)),1e-5);
// // Check that solve yields gradient (it's not equal to gradient, as predicted)
// VectorValues delta = damped.optimize(); // Check that solve yields gradient (it's not equal to gradient, as predicted)
// double factor = actualGradient[0][0]/delta[0][0]; VectorValues delta = damped.optimize();
// EXPECT(assert_equal(actualGradient,factor*delta)); double factor = actualGradient[0][0]/delta[0][0];
// EXPECT(assert_equal(actualGradient,factor*delta));
// // Still pointing downhill wrt actual gradient !
// EXPECT_DOUBLES_EQUAL( 0.1056157,dot(-1*actualGradient,delta),1e-3); // Still pointing downhill wrt actual gradient !
// EXPECT_DOUBLES_EQUAL( 0.1056157,dot(-1*actualGradient,delta),1e-3);
// // delta.print("This is the delta value computed by LM with diagonal damping");
// // delta.print("This is the delta value computed by LM with diagonal damping");
// // Still pointing downhill wrt expected gradient (IT DOES NOT! actually they are perpendicular)
// EXPECT_DOUBLES_EQUAL( 0.0,dot(-1*expectedGradient,delta),1e-5); // Still pointing downhill wrt expected gradient (IT DOES NOT! actually they are perpendicular)
// EXPECT_DOUBLES_EQUAL( 0.0,dot(-1*expectedGradient,delta),1e-5);
// // Check errors at convergence and errors in direction of solution (does not decrease!)
// EXPECT_DOUBLES_EQUAL(46.0254859,fg.error(actual.retract(delta)),1e-5); // Check errors at convergence and errors in direction of solution (does not decrease!)
// EXPECT_DOUBLES_EQUAL(46.0254859,fg.error(actual.retract(delta)),1e-5);
// // Check errors at convergence and errors at a small step in direction of solution (does not decrease!)
// EXPECT_DOUBLES_EQUAL(46.0255,fg.error(actual.retract(0.01*delta)),1e-3); // Check errors at convergence and errors at a small step in direction of solution (does not decrease!)
EXPECT_DOUBLES_EQUAL(46.0255,fg.error(actual.retract(0.01*delta)),1e-3);
*/
} }
} }