undo template removal

release/4.3a0
Varun Agrawal 2024-10-16 09:44:47 -04:00
parent 69729d603b
commit 7dd6d977fe
2 changed files with 163 additions and 138 deletions

View File

@ -48,24 +48,25 @@ NonlinearConjugateGradientOptimizer::NonlinearConjugateGradientOptimizer(
new State(initialValues, graph.error(initialValues)))), new State(initialValues, graph.error(initialValues)))),
params_(params) {} params_(params) {}
double NonlinearConjugateGradientOptimizer::error(const Values& state) const { double NonlinearConjugateGradientOptimizer::System::error(
const Values& state) const {
return graph_.error(state); return graph_.error(state);
} }
VectorValues NonlinearConjugateGradientOptimizer::gradient( VectorValues NonlinearConjugateGradientOptimizer::System::gradient(
const Values& state) const { const Values& state) const {
return gradientInPlace(graph_, state); return gradientInPlace(graph_, state);
} }
Values NonlinearConjugateGradientOptimizer::advance( Values NonlinearConjugateGradientOptimizer::System::advance(
const Values& current, const double alpha, const Values& current, const double alpha,
const VectorValues& gradient) const { const VectorValues& gradient) const {
return current.retract(alpha * gradient); return current.retract(alpha * gradient);
} }
GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() {
const auto [newValues, dummy] = nonlinearConjugateGradient( const auto [newValues, dummy] = nonlinearConjugateGradient<System, Values>(
state_->values, params_, true /* single iteration */); System(graph_), state_->values, params_, true /* single iteration */);
state_.reset( state_.reset(
new State(newValues, graph_.error(newValues), state_->iterations + 1)); new State(newValues, graph_.error(newValues), state_->iterations + 1));
@ -75,136 +76,12 @@ GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() {
const Values& NonlinearConjugateGradientOptimizer::optimize() { const Values& NonlinearConjugateGradientOptimizer::optimize() {
// Optimize until convergence // Optimize until convergence
System system(graph_);
const auto [newValues, iterations] = const auto [newValues, iterations] =
nonlinearConjugateGradient(state_->values, params_, false); nonlinearConjugateGradient(system, state_->values, params_, false);
state_.reset( state_.reset(
new State(std::move(newValues), graph_.error(newValues), iterations)); new State(std::move(newValues), graph_.error(newValues), iterations));
return state_->values; return state_->values;
} }
double NonlinearConjugateGradientOptimizer::lineSearch(
const Values& currentValues, const VectorValues& gradient) const {
/* normalize it such that it becomes a unit vector */
const double g = gradient.norm();
// perform the golden section search algorithm to decide the the optimal
// step size detail refer to
// http://en.wikipedia.org/wiki/Golden_section_search
const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi,
tau = 1e-5;
double minStep = -1.0 / g, maxStep = 0,
newStep = minStep + (maxStep - minStep) / (phi + 1.0);
Values newValues = advance(currentValues, newStep, gradient);
double newError = error(newValues);
while (true) {
const bool flag = (maxStep - newStep > newStep - minStep) ? true : false;
const double testStep = flag ? newStep + resphi * (maxStep - newStep)
: newStep - resphi * (newStep - minStep);
if ((maxStep - minStep) < tau * (std::abs(testStep) + std::abs(newStep))) {
return 0.5 * (minStep + maxStep);
}
const Values testValues = advance(currentValues, testStep, gradient);
const double testError = error(testValues);
// update the working range
if (testError >= newError) {
if (flag)
maxStep = testStep;
else
minStep = testStep;
} else {
if (flag) {
minStep = newStep;
newStep = testStep;
newError = testError;
} else {
maxStep = newStep;
newStep = testStep;
newError = testError;
}
}
}
return 0.0;
}
std::tuple<Values, int>
NonlinearConjugateGradientOptimizer::nonlinearConjugateGradient(
const Values& initial, const NonlinearOptimizerParams& params,
const bool singleIteration, const bool gradientDescent) const {
size_t iteration = 0;
// check if we're already close enough
double currentError = error(initial);
if (currentError <= params.errorTol) {
if (params.verbosity >= NonlinearOptimizerParams::ERROR) {
std::cout << "Exiting, as error = " << currentError << " < "
<< params.errorTol << std::endl;
}
return {initial, iteration};
}
Values currentValues = initial;
VectorValues currentGradient = gradient(currentValues), prevGradient,
direction = currentGradient;
/* do one step of gradient descent */
Values prevValues = currentValues;
double prevError = currentError;
double alpha = lineSearch(currentValues, direction);
currentValues = advance(prevValues, alpha, direction);
currentError = error(currentValues);
// Maybe show output
if (params.verbosity >= NonlinearOptimizerParams::ERROR)
std::cout << "Initial error: " << currentError << std::endl;
// Iterative loop
do {
if (gradientDescent == true) {
direction = gradient(currentValues);
} else {
prevGradient = currentGradient;
currentGradient = gradient(currentValues);
// Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1
const double beta =
std::max(0.0, currentGradient.dot(currentGradient - prevGradient) /
prevGradient.dot(prevGradient));
direction = currentGradient + (beta * direction);
}
alpha = lineSearch(currentValues, direction);
prevValues = currentValues;
prevError = currentError;
currentValues = advance(prevValues, alpha, direction);
currentError = error(currentValues);
// User hook:
if (params.iterationHook)
params.iterationHook(iteration, prevError, currentError);
// Maybe show output
if (params.verbosity >= NonlinearOptimizerParams::ERROR)
std::cout << "iteration: " << iteration
<< ", currentError: " << currentError << std::endl;
} while (++iteration < params.maxIterations && !singleIteration &&
!checkConvergence(params.relativeErrorTol, params.absoluteErrorTol,
params.errorTol, prevError, currentError,
params.verbosity));
// Printing if verbose
if (params.verbosity >= NonlinearOptimizerParams::ERROR &&
iteration >= params.maxIterations)
std::cout << "nonlinearConjugateGradient: Terminating because reached "
"maximum iterations"
<< std::endl;
return {currentValues, iteration};
}
} /* namespace gtsam */ } /* namespace gtsam */

View File

@ -26,6 +26,22 @@ namespace gtsam {
/** An implementation of the nonlinear CG method using the template below */ /** An implementation of the nonlinear CG method using the template below */
class GTSAM_EXPORT NonlinearConjugateGradientOptimizer class GTSAM_EXPORT NonlinearConjugateGradientOptimizer
: public NonlinearOptimizer { : public NonlinearOptimizer {
/* a class for the nonlinearConjugateGradient template */
class System {
public:
typedef NonlinearOptimizerParams Parameters;
protected:
const NonlinearFactorGraph &graph_;
public:
System(const NonlinearFactorGraph &graph) : graph_(graph) {}
double error(const Values &state) const;
VectorValues gradient(const Values &state) const;
Values advance(const Values &current, const double alpha,
const VectorValues &g) const;
};
public: public:
typedef NonlinearOptimizer Base; typedef NonlinearOptimizer Base;
typedef NonlinearOptimizerParams Parameters; typedef NonlinearOptimizerParams Parameters;
@ -45,13 +61,6 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer
/// Destructor /// Destructor
~NonlinearConjugateGradientOptimizer() override {} ~NonlinearConjugateGradientOptimizer() override {}
double error(const Values &state) const;
VectorValues gradient(const Values &state) const;
Values advance(const Values &current, const double alpha,
const VectorValues &g) const;
/** /**
* Perform a single iteration, returning GaussianFactorGraph corresponding to * Perform a single iteration, returning GaussianFactorGraph corresponding to
* the linearized factor graph. * the linearized factor graph.
@ -81,4 +90,143 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer
const bool singleIteration, const bool gradientDescent = false) const; const bool singleIteration, const bool gradientDescent = false) const;
}; };
/** Implement the golden-section line search algorithm */
template <class S, class V, class W>
double lineSearch(const S &system, const V currentValues, const W &gradient) {
/* normalize it such that it becomes a unit vector */
const double g = gradient.norm();
// perform the golden section search algorithm to decide the the optimal step
// size detail refer to http://en.wikipedia.org/wiki/Golden_section_search
const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi,
tau = 1e-5;
double minStep = -1.0 / g, maxStep = 0,
newStep = minStep + (maxStep - minStep) / (phi + 1.0);
V newValues = system.advance(currentValues, newStep, gradient);
double newError = system.error(newValues);
while (true) {
const bool flag = (maxStep - newStep > newStep - minStep) ? true : false;
const double testStep = flag ? newStep + resphi * (maxStep - newStep)
: newStep - resphi * (newStep - minStep);
if ((maxStep - minStep) < tau * (std::abs(testStep) + std::abs(newStep))) {
return 0.5 * (minStep + maxStep);
}
const V testValues = system.advance(currentValues, testStep, gradient);
const double testError = system.error(testValues);
// update the working range
if (testError >= newError) {
if (flag)
maxStep = testStep;
else
minStep = testStep;
} else {
if (flag) {
minStep = newStep;
newStep = testStep;
newError = testError;
} else {
maxStep = newStep;
newStep = testStep;
newError = testError;
}
}
}
return 0.0;
}
/**
* Implement the nonlinear conjugate gradient method using the Polak-Ribiere
* formula suggested in
* http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method.
*
* The S (system) class requires three member functions: error(state),
* gradient(state) and advance(state, step-size, direction). The V class denotes
* the state or the solution.
*
* The last parameter is a switch between gradient-descent and conjugate
* gradient
*/
template <class S, class V>
std::tuple<V, int> nonlinearConjugateGradient(
const S &system, const V &initial, const NonlinearOptimizerParams &params,
const bool singleIteration, const bool gradientDescent = false) {
// GTSAM_CONCEPT_MANIFOLD_TYPE(V)
size_t iteration = 0;
// check if we're already close enough
double currentError = system.error(initial);
if (currentError <= params.errorTol) {
if (params.verbosity >= NonlinearOptimizerParams::ERROR) {
std::cout << "Exiting, as error = " << currentError << " < "
<< params.errorTol << std::endl;
}
return {initial, iteration};
}
V currentValues = initial;
VectorValues currentGradient = system.gradient(currentValues), prevGradient,
direction = currentGradient;
/* do one step of gradient descent */
V prevValues = currentValues;
double prevError = currentError;
double alpha = lineSearch(system, currentValues, direction);
currentValues = system.advance(prevValues, alpha, direction);
currentError = system.error(currentValues);
// Maybe show output
if (params.verbosity >= NonlinearOptimizerParams::ERROR)
std::cout << "Initial error: " << currentError << std::endl;
// Iterative loop
do {
if (gradientDescent == true) {
direction = system.gradient(currentValues);
} else {
prevGradient = currentGradient;
currentGradient = system.gradient(currentValues);
// Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1
const double beta =
std::max(0.0, currentGradient.dot(currentGradient - prevGradient) /
prevGradient.dot(prevGradient));
direction = currentGradient + (beta * direction);
}
alpha = lineSearch(system, currentValues, direction);
prevValues = currentValues;
prevError = currentError;
currentValues = system.advance(prevValues, alpha, direction);
currentError = system.error(currentValues);
// User hook:
if (params.iterationHook)
params.iterationHook(iteration, prevError, currentError);
// Maybe show output
if (params.verbosity >= NonlinearOptimizerParams::ERROR)
std::cout << "iteration: " << iteration
<< ", currentError: " << currentError << std::endl;
} while (++iteration < params.maxIterations && !singleIteration &&
!checkConvergence(params.relativeErrorTol, params.absoluteErrorTol,
params.errorTol, prevError, currentError,
params.verbosity));
// Printing if verbose
if (params.verbosity >= NonlinearOptimizerParams::ERROR &&
iteration >= params.maxIterations)
std::cout << "nonlinearConjugateGradient: Terminating because reached "
"maximum iterations"
<< std::endl;
return {currentValues, iteration};
}
} // namespace gtsam } // namespace gtsam