Updated Fixed-Lag smoother example

release/4.3a0
Stephen Williams 2013-05-21 14:57:40 +00:00
parent 6b47aaeba3
commit 13d3f1cb6e
2 changed files with 12 additions and 9 deletions

View File

@ -65,7 +65,10 @@ int main(int argc, char** argv) {
// The Batch version uses Levenberg-Marquardt to perform the nonlinear optimization // The Batch version uses Levenberg-Marquardt to perform the nonlinear optimization
BatchFixedLagSmoother smootherBatch(lag); BatchFixedLagSmoother smootherBatch(lag);
// The Incremental version uses iSAM2 to perform the nonlinear optimization // The Incremental version uses iSAM2 to perform the nonlinear optimization
IncrementalFixedLagSmoother smootherISAM2(lag); ISAM2Params parameters;
parameters.relinearizeThreshold = 0.0; // Set the relin threshold to zero such that the batch estimate is recovered
parameters.relinearizeSkip = 1; // Relinearize every time
IncrementalFixedLagSmoother smootherISAM2(lag, parameters);
// Create containers to store the factors and linearization points that // Create containers to store the factors and linearization points that
// will be sent to the smoothers // will be sent to the smoothers
@ -111,6 +114,9 @@ int main(int argc, char** argv) {
// Update the smoothers with the new factors // Update the smoothers with the new factors
smootherBatch.update(newFactors, newValues, newTimestamps); smootherBatch.update(newFactors, newValues, newTimestamps);
smootherISAM2.update(newFactors, newValues, newTimestamps); smootherISAM2.update(newFactors, newValues, newTimestamps);
for(size_t i = 1; i < 2; ++i) { // Optionally perform multiple iSAM2 iterations
smootherISAM2.update();
}
// Print the optimized current pose // Print the optimized current pose
cout << setprecision(5) << "Timestamp = " << time << endl; cout << setprecision(5) << "Timestamp = " << time << endl;

View File

@ -233,7 +233,7 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
// Create a Values that holds the current evaluation point // Create a Values that holds the current evaluation point
Values evalpoint = theta_.retract(delta_, ordering_); Values evalpoint = theta_.retract(delta_, ordering_);
result.error = factors_.error(evalpoint); result.error = factors_.error(evalpoint);
std::cout << "Initial Error = " << result.error << std::endl;
// Use a custom optimization loop so the linearization points can be controlled // Use a custom optimization loop so the linearization points can be controlled
double previousError; double previousError;
VectorValues newDelta; VectorValues newDelta;
@ -266,22 +266,20 @@ std::cout << "Initial Error = " << result.error << std::endl;
} }
gttoc(damp); gttoc(damp);
result.intermediateSteps++; result.intermediateSteps++;
std::cout << "Trying Lambda = " << lambda << std::endl;
gttic(solve); gttic(solve);
// Solve Damped Gaussian Factor Graph // Solve Damped Gaussian Factor Graph
newDelta = GaussianJunctionTree(dampedFactorGraph).optimize(parameters_.getEliminationFunction()); newDelta = GaussianJunctionTree(dampedFactorGraph).optimize(parameters_.getEliminationFunction());
// update the evalpoint with the new delta // update the evalpoint with the new delta
evalpoint = theta_.retract(newDelta, ordering_); evalpoint = theta_.retract(newDelta, ordering_);
gttoc(solve); gttoc(solve);
std::cout << " Max Delta = " << newDelta.asVector().maxCoeff() << std::endl;
// Evaluate the new error // Evaluate the new error
gttic(compute_error); gttic(compute_error);
double error = factors_.error(evalpoint); double error = factors_.error(evalpoint);
gttoc(compute_error); gttoc(compute_error);
std::cout << " New Error = " << error << std::endl;
std::cout << " Change = " << result.error - error << std::endl;
if(error < result.error) { if(error < result.error) {
std::cout << " Keeping Change" << std::endl;
// Keep this change // Keep this change
// Update the error value // Update the error value
result.error = error; result.error = error;
@ -305,7 +303,6 @@ std::cout << " Keeping Change" << std::endl;
// End this lambda search iteration // End this lambda search iteration
break; break;
} else { } else {
std::cout << " Rejecting Change" << std::endl;
// Reject this change // Reject this change
// Increase lambda and continue searching // Increase lambda and continue searching
lambda *= lambdaFactor; lambda *= lambdaFactor;
@ -322,7 +319,7 @@ std::cout << " Rejecting Change" << std::endl;
result.iterations++; result.iterations++;
} while(result.iterations < maxIterations && } while(result.iterations < maxIterations &&
!checkConvergence(relativeErrorTol, absoluteErrorTol, errorTol, previousError, result.error, NonlinearOptimizerParams::SILENT)); !checkConvergence(relativeErrorTol, absoluteErrorTol, errorTol, previousError, result.error, NonlinearOptimizerParams::SILENT));
std::cout << "Final Error = " << result.error << std::endl;
return result; return result;
} }