Timing statements and avoiding recalculating dimensions

release/4.3a0
Richard Roberts 2012-03-22 06:18:38 +00:00
parent b1d4552781
commit 5862943a8a
2 changed files with 14 additions and 9 deletions

View File

@ -143,9 +143,10 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
optimizeGradientSearchInPlace(Rd, dx_u);
toc(1, "optimizeGradientSearchInPlace");
toc(0, "optimizeGradientSearch");
tic(1, "optimize");
VectorValues dx_n = optimize(Rd);
toc(1, "optimize");
tic(1, "optimizeInPlace");
VectorValues dx_n(VectorValues::SameStructure(dx_u);
optimizeInPlace(Rd, *dx_n);
toc(1, "optimizeInPlace");
tic(2, "jfg error");
const GaussianFactorGraph jfg(Rd);
const double M_error = jfg.error(VectorValues::Zero(dx_u));
@ -182,6 +183,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
if(verbose) cout << "f error: " << f_error << " -> " << result.f_error << endl;
if(verbose) cout << "M error: " << M_error << " -> " << new_M_error << endl;
tic(7, "adjust Delta");
// Compute gain ratio. Here we take advantage of the invariant that the
// Bayes' net error at zero is equal to the nonlinear error
const double rho = fabs(M_error - new_M_error) < 1e-15 ?
@ -242,6 +244,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
stay = false;
}
}
toc(7, "adjust Delta");
}
// dx_d and f_error have already been filled in during the loop

View File

@ -574,14 +574,16 @@ void ISAM2::updateDelta(bool forceFullSolve) const {
// Do one Dogleg iteration
tic(1, "Dogleg Iterate");
DoglegOptimizerImpl::IterationResult doglegResult = DoglegOptimizerImpl::Iterate(
*doglegDelta_, doglegParams.adaptationMode, *this, nonlinearFactors_, theta_, ordering_, nonlinearFactors_.error(theta_), doglegParams.verbose);
DoglegOptimizerImpl::IterationResult doglegResult(DoglegOptimizerImpl::Iterate(
*doglegDelta_, doglegParams.adaptationMode, *this, nonlinearFactors_, theta_, ordering_, nonlinearFactors_.error(theta_), doglegParams.verbose));
toc(1, "Dogleg Iterate");
tic(2, "Copy dx_d");
// Update Delta and linear step
doglegDelta_ = doglegResult.Delta;
delta_.permutation() = Permutation::Identity(delta_.size()); // Dogleg solves for the full delta so there is no permutation
delta_.container() = doglegResult.dx_d; // Copy the VectorValues containing with the linear solution
toc(2, "Copy dx_d");
}
deltaUptodate_ = true;
@ -677,17 +679,17 @@ void optimizeGradientSearchInPlace(const ISAM2& isam, VectorValues& grad) {
toc(1, "UpdateDoglegDeltas");
}
tic(1, "Compute Gradient");
tic(2, "Compute Gradient");
// Compute gradient (call gradientAtZero function, which is defined for various linear systems)
gradientAtZero(isam, grad);
double gradientSqNorm = grad.dot(grad);
toc(1, "Compute Gradient");
toc(2, "Compute Gradient");
tic(2, "Compute minimizing step size");
tic(3, "Compute minimizing step size");
// Compute minimizing step size
double RgNormSq = isam.RgProd_.container().vector().squaredNorm();
double step = -gradientSqNorm / RgNormSq;
toc(2, "Compute minimizing step size");
toc(3, "Compute minimizing step size");
tic(4, "Compute point");
// Compute steepest descent point