Enabled DogLeg unit test and made DogLeg verbose printing controlled by a flag

release/4.3a0
Richard Roberts 2011-11-06 18:26:47 +00:00
parent 5670e255ca
commit 26dd292872
3 changed files with 15 additions and 16 deletions

View File

@ -9,33 +9,33 @@
namespace gtsam {
/* ************************************************************************* */
VectorValues DoglegOptimizerImpl::ComputeDoglegPoint(
double Delta, const VectorValues& x_u, const VectorValues& x_n) {
double Delta, const VectorValues& x_u, const VectorValues& x_n, const bool verbose) {
// Get magnitude of each update and find out which segment Delta falls in
assert(Delta >= 0.0);
double DeltaSq = Delta*Delta;
double x_u_norm_sq = x_u.vector().squaredNorm();
double x_n_norm_sq = x_n.vector().squaredNorm();
cout << "Steepest descent magnitude " << sqrt(x_u_norm_sq) << ", Newton's method magnitude " << sqrt(x_n_norm_sq) << endl;
if(verbose) cout << "Steepest descent magnitude " << sqrt(x_u_norm_sq) << ", Newton's method magnitude " << sqrt(x_n_norm_sq) << endl;
if(DeltaSq < x_u_norm_sq) {
// Trust region is smaller than steepest descent update
VectorValues x_d = VectorValues::SameStructure(x_u);
x_d.vector() = x_u.vector() * sqrt(DeltaSq / x_u_norm_sq);
cout << "In steepest descent region with fraction " << sqrt(DeltaSq / x_u_norm_sq) << " of steepest descent magnitude" << endl;
if(verbose) cout << "In steepest descent region with fraction " << sqrt(DeltaSq / x_u_norm_sq) << " of steepest descent magnitude" << endl;
return x_d;
} else if(DeltaSq < x_n_norm_sq) {
// Trust region boundary is between steepest descent point and Newton's method point
return ComputeBlend(Delta, x_u, x_n);
} else {
assert(DeltaSq >= x_n_norm_sq);
cout << "In pure Newton's method region" << endl;
if(verbose) cout << "In pure Newton's method region" << endl;
// Trust region is larger than Newton's method point
return x_n;
}
}
/* ************************************************************************* */
VectorValues DoglegOptimizerImpl::ComputeBlend(double Delta, const VectorValues& x_u, const VectorValues& x_n) {
VectorValues DoglegOptimizerImpl::ComputeBlend(double Delta, const VectorValues& x_u, const VectorValues& x_n, const bool verbose) {
// See doc/trustregion.lyx or doc/trustregion.pdf
@ -64,7 +64,7 @@ VectorValues DoglegOptimizerImpl::ComputeBlend(double Delta, const VectorValues&
}
// Compute blended point
cout << "In blend region with fraction " << tau << " of Newton's method point" << endl;
if(verbose) cout << "In blend region with fraction " << tau << " of Newton's method point" << endl;
VectorValues blend = VectorValues::SameStructure(x_u);
blend.vector() = (1. - tau) * x_u.vector() + tau * x_n.vector();
return blend;

View File

@ -75,7 +75,7 @@ struct DoglegOptimizerImpl {
template<class M, class F, class VALUES>
static IterationResult Iterate(
double Delta, TrustRegionAdaptationMode mode, const M& Rd,
const F& f, const VALUES& x0, const Ordering& ordering, double f_error);
const F& f, const VALUES& x0, const Ordering& ordering, const double f_error, const bool verbose=false);
/**
* Compute the dogleg point given a trust region radius \f$ \Delta \f$. The
@ -98,7 +98,7 @@ struct DoglegOptimizerImpl {
* @param bayesNet The Bayes' net \f$ (R,d) \f$ as described above.
* @return The dogleg point \f$ \delta x_d \f$
*/
static VectorValues ComputeDoglegPoint(double Delta, const VectorValues& x_u, const VectorValues& x_n);
static VectorValues ComputeDoglegPoint(double Delta, const VectorValues& x_u, const VectorValues& x_n, const bool verbose=false);
/** Compute the minimizer \f$ \delta x_u \f$ of the line search along the gradient direction \f$ g \f$ of
* the function
@ -132,7 +132,7 @@ struct DoglegOptimizerImpl {
* @param xu Steepest descent minimizer
* @param xn Newton's method minimizer
*/
static VectorValues ComputeBlend(double Delta, const VectorValues& x_u, const VectorValues& x_n);
static VectorValues ComputeBlend(double Delta, const VectorValues& x_u, const VectorValues& x_n, const bool verbose=false);
};
@ -140,7 +140,7 @@ struct DoglegOptimizerImpl {
template<class M, class F, class VALUES>
typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
double Delta, TrustRegionAdaptationMode mode, const M& Rd,
const F& f, const VALUES& x0, const Ordering& ordering, double f_error) {
const F& f, const VALUES& x0, const Ordering& ordering, const double f_error, const bool verbose) {
// Compute steepest descent and Newton's method points
VectorValues dx_u = ComputeSteepestDescentPoint(Rd);
@ -156,7 +156,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
// Compute dog leg point
result.dx_d = ComputeDoglegPoint(Delta, dx_u, dx_n);
cout << "Delta = " << Delta << ", dx_d_norm = " << result.dx_d.vector().norm() << endl;
if(verbose) cout << "Delta = " << Delta << ", dx_d_norm = " << result.dx_d.vector().norm() << endl;
// Compute expmapped solution
const VALUES x_d(x0.expmap(result.dx_d, ordering));
@ -167,8 +167,8 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
// Compute decrease in M
const double new_M_error = jfg.error(result.dx_d);
cout << "f error: " << f_error << " -> " << result.f_error << endl;
cout << "M error: " << M_error << " -> " << new_M_error << endl;
if(verbose) cout << "f error: " << f_error << " -> " << result.f_error << endl;
if(verbose) cout << "M error: " << M_error << " -> " << new_M_error << endl;
// Compute gain ratio. Here we take advantage of the invariant that the
// Bayes' net error at zero is equal to the nonlinear error
@ -176,7 +176,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
0.5 :
(f_error - result.f_error) / (M_error - new_M_error);
cout << "rho = " << rho << endl;
if(verbose) cout << "rho = " << rho << endl;
if(rho >= 0.75) {
// M agrees very well with f, so try to increase lambda

View File

@ -14,8 +14,7 @@ check_PROGRAMS += testGraph
check_PROGRAMS += testInference
check_PROGRAMS += testGaussianJunctionTree
check_PROGRAMS += testNonlinearEquality testNonlinearFactor testNonlinearFactorGraph
check_PROGRAMS += testNonlinearOptimizer
# check_PROGRAMS += testDoglegOptimizer # Uses debugging prints so commented out in SVN
check_PROGRAMS += testNonlinearOptimizer testDoglegOptimizer
check_PROGRAMS += testSymbolicBayesNet testSymbolicFactorGraph
check_PROGRAMS += testTupleValues
check_PROGRAMS += testNonlinearISAM