Timing statements and avoiding recalculating dimensions
parent
b1d4552781
commit
5862943a8a
|
|
@ -143,9 +143,10 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
|
||||||
optimizeGradientSearchInPlace(Rd, dx_u);
|
optimizeGradientSearchInPlace(Rd, dx_u);
|
||||||
toc(1, "optimizeGradientSearchInPlace");
|
toc(1, "optimizeGradientSearchInPlace");
|
||||||
toc(0, "optimizeGradientSearch");
|
toc(0, "optimizeGradientSearch");
|
||||||
tic(1, "optimize");
|
tic(1, "optimizeInPlace");
|
||||||
VectorValues dx_n = optimize(Rd);
|
VectorValues dx_n(VectorValues::SameStructure(dx_u);
|
||||||
toc(1, "optimize");
|
optimizeInPlace(Rd, *dx_n);
|
||||||
|
toc(1, "optimizeInPlace");
|
||||||
tic(2, "jfg error");
|
tic(2, "jfg error");
|
||||||
const GaussianFactorGraph jfg(Rd);
|
const GaussianFactorGraph jfg(Rd);
|
||||||
const double M_error = jfg.error(VectorValues::Zero(dx_u));
|
const double M_error = jfg.error(VectorValues::Zero(dx_u));
|
||||||
|
|
@ -182,6 +183,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
|
||||||
if(verbose) cout << "f error: " << f_error << " -> " << result.f_error << endl;
|
if(verbose) cout << "f error: " << f_error << " -> " << result.f_error << endl;
|
||||||
if(verbose) cout << "M error: " << M_error << " -> " << new_M_error << endl;
|
if(verbose) cout << "M error: " << M_error << " -> " << new_M_error << endl;
|
||||||
|
|
||||||
|
tic(7, "adjust Delta");
|
||||||
// Compute gain ratio. Here we take advantage of the invariant that the
|
// Compute gain ratio. Here we take advantage of the invariant that the
|
||||||
// Bayes' net error at zero is equal to the nonlinear error
|
// Bayes' net error at zero is equal to the nonlinear error
|
||||||
const double rho = fabs(M_error - new_M_error) < 1e-15 ?
|
const double rho = fabs(M_error - new_M_error) < 1e-15 ?
|
||||||
|
|
@ -242,6 +244,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
|
||||||
stay = false;
|
stay = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
toc(7, "adjust Delta");
|
||||||
}
|
}
|
||||||
|
|
||||||
// dx_d and f_error have already been filled in during the loop
|
// dx_d and f_error have already been filled in during the loop
|
||||||
|
|
|
||||||
|
|
@ -574,14 +574,16 @@ void ISAM2::updateDelta(bool forceFullSolve) const {
|
||||||
|
|
||||||
// Do one Dogleg iteration
|
// Do one Dogleg iteration
|
||||||
tic(1, "Dogleg Iterate");
|
tic(1, "Dogleg Iterate");
|
||||||
DoglegOptimizerImpl::IterationResult doglegResult = DoglegOptimizerImpl::Iterate(
|
DoglegOptimizerImpl::IterationResult doglegResult(DoglegOptimizerImpl::Iterate(
|
||||||
*doglegDelta_, doglegParams.adaptationMode, *this, nonlinearFactors_, theta_, ordering_, nonlinearFactors_.error(theta_), doglegParams.verbose);
|
*doglegDelta_, doglegParams.adaptationMode, *this, nonlinearFactors_, theta_, ordering_, nonlinearFactors_.error(theta_), doglegParams.verbose));
|
||||||
toc(1, "Dogleg Iterate");
|
toc(1, "Dogleg Iterate");
|
||||||
|
|
||||||
|
tic(2, "Copy dx_d");
|
||||||
// Update Delta and linear step
|
// Update Delta and linear step
|
||||||
doglegDelta_ = doglegResult.Delta;
|
doglegDelta_ = doglegResult.Delta;
|
||||||
delta_.permutation() = Permutation::Identity(delta_.size()); // Dogleg solves for the full delta so there is no permutation
|
delta_.permutation() = Permutation::Identity(delta_.size()); // Dogleg solves for the full delta so there is no permutation
|
||||||
delta_.container() = doglegResult.dx_d; // Copy the VectorValues containing with the linear solution
|
delta_.container() = doglegResult.dx_d; // Copy the VectorValues containing with the linear solution
|
||||||
|
toc(2, "Copy dx_d");
|
||||||
}
|
}
|
||||||
|
|
||||||
deltaUptodate_ = true;
|
deltaUptodate_ = true;
|
||||||
|
|
@ -677,17 +679,17 @@ void optimizeGradientSearchInPlace(const ISAM2& isam, VectorValues& grad) {
|
||||||
toc(1, "UpdateDoglegDeltas");
|
toc(1, "UpdateDoglegDeltas");
|
||||||
}
|
}
|
||||||
|
|
||||||
tic(1, "Compute Gradient");
|
tic(2, "Compute Gradient");
|
||||||
// Compute gradient (call gradientAtZero function, which is defined for various linear systems)
|
// Compute gradient (call gradientAtZero function, which is defined for various linear systems)
|
||||||
gradientAtZero(isam, grad);
|
gradientAtZero(isam, grad);
|
||||||
double gradientSqNorm = grad.dot(grad);
|
double gradientSqNorm = grad.dot(grad);
|
||||||
toc(1, "Compute Gradient");
|
toc(2, "Compute Gradient");
|
||||||
|
|
||||||
tic(2, "Compute minimizing step size");
|
tic(3, "Compute minimizing step size");
|
||||||
// Compute minimizing step size
|
// Compute minimizing step size
|
||||||
double RgNormSq = isam.RgProd_.container().vector().squaredNorm();
|
double RgNormSq = isam.RgProd_.container().vector().squaredNorm();
|
||||||
double step = -gradientSqNorm / RgNormSq;
|
double step = -gradientSqNorm / RgNormSq;
|
||||||
toc(2, "Compute minimizing step size");
|
toc(3, "Compute minimizing step size");
|
||||||
|
|
||||||
tic(4, "Compute point");
|
tic(4, "Compute point");
|
||||||
// Compute steepest descent point
|
// Compute steepest descent point
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue