Merge branch 'conjugate-gradient' into cg-methods

release/4.3a0
Varun Agrawal 2024-10-16 10:26:31 -04:00
commit efce38c14c
14 changed files with 286 additions and 296 deletions

View File

@ -94,11 +94,10 @@ int main(int argc, char* argv[]) {
parameters.maxIterations = 500;
PCGSolverParameters::shared_ptr pcg =
std::make_shared<PCGSolverParameters>();
pcg->preconditioner_ =
std::make_shared<BlockJacobiPreconditionerParameters>();
pcg->preconditioner = std::make_shared<BlockJacobiPreconditionerParameters>();
// Following is crucial:
pcg->setEpsilon_abs(1e-10);
pcg->setEpsilon_rel(1e-10);
pcg->epsilon_abs = 1e-10;
pcg->epsilon_rel = 1e-10;
parameters.iterativeParams = pcg;
LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, parameters);

View File

@ -26,13 +26,13 @@ namespace gtsam {
/*****************************************************************************/
void ConjugateGradientParameters::print(ostream &os) const {
Base::print(os);
cout << "ConjugateGradientParameters" << endl
<< "minIter: " << minIterations_ << endl
<< "maxIter: " << maxIterations_ << endl
<< "resetIter: " << reset_ << endl
<< "eps_rel: " << epsilon_rel_ << endl
<< "eps_abs: " << epsilon_abs_ << endl;
Base::print(os);
cout << "ConjugateGradientParameters" << endl
<< "minIter: " << minIterations << endl
<< "maxIter: " << maxIterations << endl
<< "resetIter: " << reset << endl
<< "eps_rel: " << epsilon_rel << endl
<< "eps_abs: " << epsilon_abs << endl;
}
/*****************************************************************************/

View File

@ -26,59 +26,64 @@ namespace gtsam {
/**
* Parameters for the Conjugate Gradient method
*/
class GTSAM_EXPORT ConjugateGradientParameters : public IterativeOptimizationParameters {
public:
struct GTSAM_EXPORT ConjugateGradientParameters
: public IterativeOptimizationParameters {
typedef IterativeOptimizationParameters Base;
typedef std::shared_ptr<ConjugateGradientParameters> shared_ptr;
protected:
size_t minIterations_; ///< minimum number of cg iterations
size_t maxIterations_; ///< maximum number of cg iterations
size_t reset_; ///< number of iterations before reset
double epsilon_rel_; ///< threshold for relative error decrease
double epsilon_abs_; ///< threshold for absolute error decrease
size_t minIterations; ///< minimum number of cg iterations
size_t maxIterations; ///< maximum number of cg iterations
size_t reset; ///< number of iterations before reset
double epsilon_rel; ///< threshold for relative error decrease
double epsilon_abs; ///< threshold for absolute error decrease
public:
/* Matrix Operation Kernel */
enum BLASKernel {
GTSAM = 0, ///< Jacobian Factor Graph of GTSAM
} blas_kernel_ ;
GTSAM = 0, ///< Jacobian Factor Graph of GTSAM
} blas_kernel;
ConjugateGradientParameters()
: minIterations_(1), maxIterations_(500), reset_(501), epsilon_rel_(1e-3),
epsilon_abs_(1e-3), blas_kernel_(GTSAM) {}
: minIterations(1),
maxIterations(500),
reset(501),
epsilon_rel(1e-3),
epsilon_abs(1e-3),
blas_kernel(GTSAM) {}
ConjugateGradientParameters(size_t minIterations, size_t maxIterations, size_t reset,
double epsilon_rel, double epsilon_abs, BLASKernel blas)
: minIterations_(minIterations), maxIterations_(maxIterations), reset_(reset),
epsilon_rel_(epsilon_rel), epsilon_abs_(epsilon_abs), blas_kernel_(blas) {}
ConjugateGradientParameters(size_t minIterations, size_t maxIterations,
size_t reset, double epsilon_rel,
double epsilon_abs, BLASKernel blas)
: minIterations(minIterations),
maxIterations(maxIterations),
reset(reset),
epsilon_rel(epsilon_rel),
epsilon_abs(epsilon_abs),
blas_kernel(blas) {}
ConjugateGradientParameters(const ConjugateGradientParameters &p)
: Base(p), minIterations_(p.minIterations_), maxIterations_(p.maxIterations_), reset_(p.reset_),
epsilon_rel_(p.epsilon_rel_), epsilon_abs_(p.epsilon_abs_), blas_kernel_(GTSAM) {}
: Base(p),
minIterations(p.minIterations),
maxIterations(p.maxIterations),
reset(p.reset),
epsilon_rel(p.epsilon_rel),
epsilon_abs(p.epsilon_abs),
blas_kernel(GTSAM) {}
/* general interface */
inline size_t minIterations() const { return minIterations_; }
inline size_t maxIterations() const { return maxIterations_; }
inline size_t reset() const { return reset_; }
inline double epsilon() const { return epsilon_rel_; }
inline double epsilon_rel() const { return epsilon_rel_; }
inline double epsilon_abs() const { return epsilon_abs_; }
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V43
inline size_t getMinIterations() const { return minIterations; }
inline size_t getMaxIterations() const { return maxIterations; }
inline size_t getReset() const { return reset; }
inline double getEpsilon() const { return epsilon_rel; }
inline double getEpsilon_rel() const { return epsilon_rel; }
inline double getEpsilon_abs() const { return epsilon_abs; }
inline size_t getMinIterations() const { return minIterations_; }
inline size_t getMaxIterations() const { return maxIterations_; }
inline size_t getReset() const { return reset_; }
inline double getEpsilon() const { return epsilon_rel_; }
inline double getEpsilon_rel() const { return epsilon_rel_; }
inline double getEpsilon_abs() const { return epsilon_abs_; }
inline void setMinIterations(size_t value) { minIterations_ = value; }
inline void setMaxIterations(size_t value) { maxIterations_ = value; }
inline void setReset(size_t value) { reset_ = value; }
inline void setEpsilon(double value) { epsilon_rel_ = value; }
inline void setEpsilon_rel(double value) { epsilon_rel_ = value; }
inline void setEpsilon_abs(double value) { epsilon_abs_ = value; }
inline void setMinIterations(size_t value) { minIterations = value; }
inline void setMaxIterations(size_t value) { maxIterations = value; }
inline void setReset(size_t value) { reset = value; }
inline void setEpsilon(double value) { epsilon_rel = value; }
inline void setEpsilon_rel(double value) { epsilon_rel = value; }
inline void setEpsilon_abs(double value) { epsilon_abs = value; }
#endif
void print() const { Base::print(); }
@ -111,18 +116,19 @@ V preconditionedConjugateGradient(const S &system, const V &initial,
double currentGamma = system.dot(residual, residual), prevGamma, alpha, beta;
const size_t iMaxIterations = parameters.maxIterations(),
iMinIterations = parameters.minIterations(),
iReset = parameters.reset() ;
const double threshold = std::max(parameters.epsilon_abs(),
parameters.epsilon() * parameters.epsilon() * currentGamma);
const size_t iMaxIterations = parameters.maxIterations,
iMinIterations = parameters.minIterations,
iReset = parameters.reset;
const double threshold =
std::max(parameters.epsilon_abs,
parameters.epsilon_rel * parameters.epsilon_rel * currentGamma);
if (parameters.verbosity() >= ConjugateGradientParameters::COMPLEXITY )
std::cout << "[PCG] epsilon = " << parameters.epsilon()
<< ", max = " << parameters.maxIterations()
<< ", reset = " << parameters.reset()
<< ", ||r0||^2 = " << currentGamma
<< ", threshold = " << threshold << std::endl;
if (parameters.verbosity() >= ConjugateGradientParameters::COMPLEXITY)
std::cout << "[PCG] epsilon = " << parameters.epsilon_rel
<< ", max = " << parameters.maxIterations
<< ", reset = " << parameters.reset
<< ", ||r0||^2 = " << currentGamma
<< ", threshold = " << threshold << std::endl;
size_t k;
for ( k = 1 ; k <= iMaxIterations && (currentGamma > threshold || k <= iMinIterations) ; k++ ) {

View File

@ -34,17 +34,13 @@ namespace gtsam {
void PCGSolverParameters::print(ostream &os) const {
Base::print(os);
os << "PCGSolverParameters:" << endl;
preconditioner_->print(os);
preconditioner->print(os);
}
/*****************************************************************************/
PCGSolver::PCGSolver(const PCGSolverParameters &p) {
parameters_ = p;
preconditioner_ = createPreconditioner(p.preconditioner());
}
void PCGSolverParameters::setPreconditionerParams(const std::shared_ptr<PreconditionerParameters> preconditioner) {
preconditioner_ = preconditioner;
preconditioner_ = createPreconditioner(p.preconditioner);
}
void PCGSolverParameters::print(const std::string &s) const {

View File

@ -33,30 +33,19 @@ struct PreconditionerParameters;
/**
* Parameters for Preconditioned Conjugate Gradient solver.
*/
struct GTSAM_EXPORT PCGSolverParameters: public ConjugateGradientParameters {
public:
struct GTSAM_EXPORT PCGSolverParameters : public ConjugateGradientParameters {
typedef ConjugateGradientParameters Base;
typedef std::shared_ptr<PCGSolverParameters> shared_ptr;
protected:
std::shared_ptr<PreconditionerParameters> preconditioner_;
std::shared_ptr<PreconditionerParameters> preconditioner;
public:
PCGSolverParameters() {}
PCGSolverParameters(
const std::shared_ptr<PreconditionerParameters> &preconditioner)
: preconditioner_(preconditioner) {}
: preconditioner(preconditioner) {}
void print(std::ostream &os) const override;
const std::shared_ptr<PreconditionerParameters> preconditioner() const {
return preconditioner_;
}
void setPreconditionerParams(
const std::shared_ptr<PreconditionerParameters> preconditioner);
void print(const std::string &s) const;
};

View File

@ -49,10 +49,12 @@ namespace gtsam {
// init gamma and calculate threshold
gamma = dot(g,g);
threshold = std::max(parameters_.epsilon_abs(), parameters_.epsilon() * parameters_.epsilon() * gamma);
threshold =
std::max(parameters_.epsilon_abs,
parameters_.epsilon_rel * parameters_.epsilon_rel * gamma);
// Allocate and calculate A*d for first iteration
if (gamma > parameters_.epsilon_abs()) Ad = Ab * d;
if (gamma > parameters_.epsilon_abs) Ad = Ab * d;
}
/* ************************************************************************* */
@ -79,13 +81,13 @@ namespace gtsam {
// take a step, return true if converged
bool step(const S& Ab, V& x) {
if ((++k) >= ((int)parameters_.maxIterations())) return true;
if ((++k) >= ((int)parameters_.maxIterations)) return true;
//---------------------------------->
double alpha = takeOptimalStep(x);
// update gradient (or re-calculate at reset time)
if (k % parameters_.reset() == 0) g = Ab.gradient(x);
if (k % parameters_.reset == 0) g = Ab.gradient(x);
// axpy(alpha, Ab ^ Ad, g); // g += alpha*(Ab^Ad)
else Ab.transposeMultiplyAdd(alpha, Ad, g);
@ -126,11 +128,10 @@ namespace gtsam {
CGState<S, V, E> state(Ab, x, parameters, steepest);
if (parameters.verbosity() != ConjugateGradientParameters::SILENT)
std::cout << "CG: epsilon = " << parameters.epsilon()
<< ", maxIterations = " << parameters.maxIterations()
std::cout << "CG: epsilon = " << parameters.epsilon_rel
<< ", maxIterations = " << parameters.maxIterations
<< ", ||g0||^2 = " << state.gamma
<< ", threshold = " << state.threshold
<< std::endl;
<< ", threshold = " << state.threshold << std::endl;
if ( state.gamma < state.threshold ) {
if (parameters.verbosity() != ConjugateGradientParameters::SILENT)

View File

@ -710,17 +710,11 @@ virtual class IterativeOptimizationParameters {
#include <gtsam/linear/ConjugateGradientSolver.h>
virtual class ConjugateGradientParameters : gtsam::IterativeOptimizationParameters {
ConjugateGradientParameters();
int getMinIterations() const ;
int getMaxIterations() const ;
int getReset() const;
double getEpsilon_rel() const;
double getEpsilon_abs() const;
void setMinIterations(int value);
void setMaxIterations(int value);
void setReset(int value);
void setEpsilon_rel(double value);
void setEpsilon_abs(double value);
int minIterations;
int maxIterations;
int reset;
double epsilon_rel;
double epsilon_abs;
};
#include <gtsam/linear/Preconditioner.h>
@ -739,8 +733,10 @@ virtual class BlockJacobiPreconditionerParameters : gtsam::PreconditionerParamet
#include <gtsam/linear/PCGSolver.h>
virtual class PCGSolverParameters : gtsam::ConjugateGradientParameters {
PCGSolverParameters();
PCGSolverParameters(gtsam::PreconditionerParameters* preconditioner);
void print(string s = "");
void setPreconditionerParams(gtsam::PreconditionerParameters* preconditioner);
gtsam::PreconditionerParameters* preconditioner;
};
#include <gtsam/linear/SubgraphSolver.h>

View File

@ -88,24 +88,29 @@ NonlinearConjugateGradientOptimizer::NonlinearConjugateGradientOptimizer(
new State(initialValues, graph.error(initialValues)))),
params_(params) {}
double NonlinearConjugateGradientOptimizer::error(const Values& state) const {
double NonlinearConjugateGradientOptimizer::System::error(
const State& state) const {
return graph_.error(state);
}
VectorValues NonlinearConjugateGradientOptimizer::gradient(
const Values& state) const {
NonlinearConjugateGradientOptimizer::System::Gradient
NonlinearConjugateGradientOptimizer::System::gradient(
const State& state) const {
return gradientInPlace(graph_, state);
}
Values NonlinearConjugateGradientOptimizer::advance(
const Values& current, const double alpha,
const VectorValues& gradient) const {
return current.retract(alpha * gradient);
NonlinearConjugateGradientOptimizer::System::State
NonlinearConjugateGradientOptimizer::System::advance(const State& current,
const double alpha,
const Gradient& g) const {
Gradient step = g;
step *= alpha;
return current.retract(step);
}
GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() {
const auto [newValues, dummy] = nonlinearConjugateGradient(
state_->values, params_, true /* single iteration */);
const auto [newValues, dummy] = nonlinearConjugateGradient<System, Values>(
System(graph_), state_->values, params_, true /* single iteration */);
state_.reset(
new State(newValues, graph_.error(newValues), state_->iterations + 1));
@ -115,148 +120,12 @@ GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() {
const Values& NonlinearConjugateGradientOptimizer::optimize() {
// Optimize until convergence
System system(graph_);
const auto [newValues, iterations] =
nonlinearConjugateGradient(state_->values, params_, false);
nonlinearConjugateGradient(system, state_->values, params_, false);
state_.reset(
new State(std::move(newValues), graph_.error(newValues), iterations));
return state_->values;
}
double NonlinearConjugateGradientOptimizer::lineSearch(
const Values& currentValues, const VectorValues& gradient) const {
/* normalize it such that it becomes a unit vector */
const double g = gradient.norm();
// perform the golden section search algorithm to decide the the optimal
// step size detail refer to
// http://en.wikipedia.org/wiki/Golden_section_search
const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi,
tau = 1e-5;
double minStep = -1.0 / g, maxStep = 0,
newStep = minStep + (maxStep - minStep) / (phi + 1.0);
Values newValues = advance(currentValues, newStep, gradient);
double newError = error(newValues);
while (true) {
const bool flag = (maxStep - newStep > newStep - minStep) ? true : false;
const double testStep = flag ? newStep + resphi * (maxStep - newStep)
: newStep - resphi * (newStep - minStep);
if ((maxStep - minStep) < tau * (std::abs(testStep) + std::abs(newStep))) {
return 0.5 * (minStep + maxStep);
}
const Values testValues = advance(currentValues, testStep, gradient);
const double testError = error(testValues);
// update the working range
if (testError >= newError) {
if (flag)
maxStep = testStep;
else
minStep = testStep;
} else {
if (flag) {
minStep = newStep;
newStep = testStep;
newError = testError;
} else {
maxStep = newStep;
newStep = testStep;
newError = testError;
}
}
}
return 0.0;
}
std::tuple<Values, int>
NonlinearConjugateGradientOptimizer::nonlinearConjugateGradient(
const Values& initial, const NonlinearOptimizerParams& params,
const bool singleIteration, const bool gradientDescent) const {
size_t iteration = 0;
// check if we're already close enough
double currentError = error(initial);
if (currentError <= params.errorTol) {
if (params.verbosity >= NonlinearOptimizerParams::ERROR) {
std::cout << "Exiting, as error = " << currentError << " < "
<< params.errorTol << std::endl;
}
return {initial, iteration};
}
Values currentValues = initial;
VectorValues currentGradient = gradient(currentValues), prevGradient,
direction = currentGradient;
/* do one step of gradient descent */
Values prevValues = currentValues;
double prevError = currentError;
double alpha = lineSearch(currentValues, direction);
currentValues = advance(prevValues, alpha, direction);
currentError = error(currentValues);
// Maybe show output
if (params.verbosity >= NonlinearOptimizerParams::ERROR)
std::cout << "Initial error: " << currentError << std::endl;
// Iterative loop
do {
if (gradientDescent == true) {
direction = gradient(currentValues);
} else {
prevGradient = currentGradient;
currentGradient = gradient(currentValues);
double beta;
switch (directionMethod_) {
case DirectionMethod::FletcherReeves:
beta = FletcherReeves(currentGradient, prevGradient);
break;
case DirectionMethod::PolakRibiere:
beta = PolakRibiere(currentGradient, prevGradient);
break;
case DirectionMethod::HestenesStiefel:
beta = HestenesStiefel(currentGradient, prevGradient, direction);
break;
case DirectionMethod::DaiYuan:
beta = DaiYuan(currentGradient, prevGradient, direction);
break;
}
direction = currentGradient + (beta * direction);
}
alpha = lineSearch(currentValues, direction);
prevValues = currentValues;
prevError = currentError;
currentValues = advance(prevValues, alpha, direction);
currentError = error(currentValues);
// User hook:
if (params.iterationHook)
params.iterationHook(iteration, prevError, currentError);
// Maybe show output
if (params.verbosity >= NonlinearOptimizerParams::ERROR)
std::cout << "iteration: " << iteration
<< ", currentError: " << currentError << std::endl;
} while (++iteration < params.maxIterations && !singleIteration &&
!checkConvergence(params.relativeErrorTol, params.absoluteErrorTol,
params.errorTol, prevError, currentError,
params.verbosity));
// Printing if verbose
if (params.verbosity >= NonlinearOptimizerParams::ERROR &&
iteration >= params.maxIterations)
std::cout << "nonlinearConjugateGradient: Terminating because reached "
"maximum iterations"
<< std::endl;
return {currentValues, iteration};
}
} /* namespace gtsam */

View File

@ -26,6 +26,24 @@ namespace gtsam {
/** An implementation of the nonlinear CG method using the template below */
class GTSAM_EXPORT NonlinearConjugateGradientOptimizer
: public NonlinearOptimizer {
/* a class for the nonlinearConjugateGradient template */
class System {
public:
typedef Values State;
typedef VectorValues Gradient;
typedef NonlinearOptimizerParams Parameters;
protected:
const NonlinearFactorGraph &graph_;
public:
System(const NonlinearFactorGraph &graph) : graph_(graph) {}
double error(const State &state) const;
Gradient gradient(const State &state) const;
State advance(const State &current, const double alpha,
const Gradient &g) const;
};
public:
typedef NonlinearOptimizer Base;
typedef NonlinearOptimizerParams Parameters;
@ -54,13 +72,6 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer
/// Destructor
~NonlinearConjugateGradientOptimizer() override {}
double error(const Values &state) const;
VectorValues gradient(const Values &state) const;
Values advance(const Values &current, const double alpha,
const VectorValues &g) const;
/**
* Perform a single iteration, returning GaussianFactorGraph corresponding to
* the linearized factor graph.
@ -72,22 +83,145 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer
* variable assignments.
*/
const Values &optimize() override;
/** Implement the golden-section line search algorithm */
double lineSearch(const Values &currentValues,
const VectorValues &gradient) const;
/**
* Implement the nonlinear conjugate gradient method using the Polak-Ribiere
* formula suggested in
* http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method.
*
* The last parameter is a switch between gradient-descent and conjugate
* gradient
*/
std::tuple<Values, int> nonlinearConjugateGradient(
const Values &initial, const NonlinearOptimizerParams &params,
const bool singleIteration, const bool gradientDescent = false) const;
};
/** Implement the golden-section line search algorithm */
template <class S, class V, class W>
double lineSearch(const S &system, const V currentValues, const W &gradient) {
/* normalize it such that it becomes a unit vector */
const double g = gradient.norm();
// perform the golden section search algorithm to decide the the optimal step
// size detail refer to http://en.wikipedia.org/wiki/Golden_section_search
const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi,
tau = 1e-5;
double minStep = -1.0 / g, maxStep = 0,
newStep = minStep + (maxStep - minStep) / (phi + 1.0);
V newValues = system.advance(currentValues, newStep, gradient);
double newError = system.error(newValues);
while (true) {
const bool flag = (maxStep - newStep > newStep - minStep) ? true : false;
const double testStep = flag ? newStep + resphi * (maxStep - newStep)
: newStep - resphi * (newStep - minStep);
if ((maxStep - minStep) < tau * (std::abs(testStep) + std::abs(newStep))) {
return 0.5 * (minStep + maxStep);
}
const V testValues = system.advance(currentValues, testStep, gradient);
const double testError = system.error(testValues);
// update the working range
if (testError >= newError) {
if (flag)
maxStep = testStep;
else
minStep = testStep;
} else {
if (flag) {
minStep = newStep;
newStep = testStep;
newError = testError;
} else {
maxStep = newStep;
newStep = testStep;
newError = testError;
}
}
}
return 0.0;
}
/**
* Implement the nonlinear conjugate gradient method using the Polak-Ribiere
* formula suggested in
* http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method.
*
* The S (system) class requires three member functions: error(state),
* gradient(state) and advance(state, step-size, direction). The V class denotes
* the state or the solution.
*
* The last parameter is a switch between gradient-descent and conjugate
* gradient
*/
template <class S, class V>
std::tuple<V, int> nonlinearConjugateGradient(
const S &system, const V &initial, const NonlinearOptimizerParams &params,
const bool singleIteration, const bool gradientDescent = false) {
// GTSAM_CONCEPT_MANIFOLD_TYPE(V)
size_t iteration = 0;
// check if we're already close enough
double currentError = system.error(initial);
if (currentError <= params.errorTol) {
if (params.verbosity >= NonlinearOptimizerParams::ERROR) {
std::cout << "Exiting, as error = " << currentError << " < "
<< params.errorTol << std::endl;
}
return {initial, iteration};
}
V currentValues = initial;
typename S::Gradient currentGradient = system.gradient(currentValues),
prevGradient, direction = currentGradient;
/* do one step of gradient descent */
V prevValues = currentValues;
double prevError = currentError;
double alpha = lineSearch(system, currentValues, direction);
currentValues = system.advance(prevValues, alpha, direction);
currentError = system.error(currentValues);
// Maybe show output
if (params.verbosity >= NonlinearOptimizerParams::ERROR)
std::cout << "Initial error: " << currentError << std::endl;
// Iterative loop
do {
if (gradientDescent == true) {
direction = system.gradient(currentValues);
} else {
prevGradient = currentGradient;
currentGradient = system.gradient(currentValues);
// Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1
const double beta =
std::max(0.0, currentGradient.dot(currentGradient - prevGradient) /
prevGradient.dot(prevGradient));
direction = currentGradient + (beta * direction);
}
alpha = lineSearch(system, currentValues, direction);
prevValues = currentValues;
prevError = currentError;
currentValues = system.advance(prevValues, alpha, direction);
currentError = system.error(currentValues);
// User hook:
if (params.iterationHook)
params.iterationHook(iteration, prevError, currentError);
// Maybe show output
if (params.verbosity >= NonlinearOptimizerParams::ERROR)
std::cout << "iteration: " << iteration
<< ", currentError: " << currentError << std::endl;
} while (++iteration < params.maxIterations && !singleIteration &&
!checkConvergence(params.relativeErrorTol, params.absoluteErrorTol,
params.errorTol, prevError, currentError,
params.verbosity));
// Printing if verbose
if (params.verbosity >= NonlinearOptimizerParams::ERROR &&
iteration >= params.maxIterations)
std::cout << "nonlinearConjugateGradient: Terminating because reached "
"maximum iterations"
<< std::endl;
return {currentValues, iteration};
}
} // namespace gtsam

View File

@ -69,7 +69,7 @@ class TestScenario(GtsamTestCase):
lmParams = LevenbergMarquardtParams.CeresDefaults()
lmParams.setLinearSolverType("ITERATIVE")
cgParams = PCGSolverParameters()
cgParams.setPreconditionerParams(DummyPreconditionerParameters())
cgParams.preconditioner = DummyPreconditionerParameters()
lmParams.setIterativeParams(cgParams)
actual = LevenbergMarquardtOptimizer(self.fg, self.initial_values, lmParams).optimize()
self.assertAlmostEqual(0, self.fg.error(actual))

View File

@ -95,9 +95,9 @@ TEST( Iterative, conjugateGradientDescent_hard_constraint )
VectorValues zeros = config.zeroVectors();
ConjugateGradientParameters parameters;
parameters.setEpsilon_abs(1e-3);
parameters.setEpsilon_rel(1e-5);
parameters.setMaxIterations(100);
parameters.epsilon_abs = 1e-3;
parameters.epsilon_rel = 1e-5;
parameters.maxIterations = 100;
VectorValues actual = conjugateGradientDescent(*fg, zeros, parameters);
VectorValues expected;
@ -122,9 +122,9 @@ TEST( Iterative, conjugateGradientDescent_soft_constraint )
VectorValues zeros = config.zeroVectors();
ConjugateGradientParameters parameters;
parameters.setEpsilon_abs(1e-3);
parameters.setEpsilon_rel(1e-5);
parameters.setMaxIterations(100);
parameters.epsilon_abs = 1e-3;
parameters.epsilon_rel = 1e-5;
parameters.maxIterations = 100;
VectorValues actual = conjugateGradientDescent(*fg, zeros, parameters);
VectorValues expected;

View File

@ -54,23 +54,23 @@ TEST( PCGsolver, verySimpleLinearSystem) {
// Solve the system using Preconditioned Conjugate Gradient solver
// Common PCG parameters
gtsam::PCGSolverParameters::shared_ptr pcg = std::make_shared<gtsam::PCGSolverParameters>();
pcg->setMaxIterations(500);
pcg->setEpsilon_abs(0.0);
pcg->setEpsilon_rel(0.0);
pcg->maxIterations = 500;
pcg->epsilon_abs = 0.0;
pcg->epsilon_rel = 0.0;
//pcg->setVerbosity("ERROR");
// With Dummy preconditioner
pcg->setPreconditionerParams(
std::make_shared<gtsam::DummyPreconditionerParameters>());
pcg->preconditioner =
std::make_shared<gtsam::DummyPreconditionerParameters>();
VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG);
EXPECT(assert_equal(exactSolution, deltaPCGDummy, 1e-7));
//deltaPCGDummy.print("PCG Dummy");
// With Block-Jacobi preconditioner
pcg->setPreconditionerParams(
std::make_shared<gtsam::BlockJacobiPreconditionerParameters>());
pcg->preconditioner =
std::make_shared<gtsam::BlockJacobiPreconditionerParameters>();
// It takes more than 1000 iterations for this test
pcg->setMaxIterations(1500);
pcg->maxIterations = 1500;
VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG);
EXPECT(assert_equal(exactSolution, deltaPCGJacobi, 1e-5));
@ -107,21 +107,21 @@ TEST(PCGSolver, simpleLinearSystem) {
// Solve the system using Preconditioned Conjugate Gradient solver
// Common PCG parameters
gtsam::PCGSolverParameters::shared_ptr pcg = std::make_shared<gtsam::PCGSolverParameters>();
pcg->setMaxIterations(500);
pcg->setEpsilon_abs(0.0);
pcg->setEpsilon_rel(0.0);
pcg->maxIterations = 500;
pcg->epsilon_abs = 0.0;
pcg->epsilon_rel = 0.0;
//pcg->setVerbosity("ERROR");
// With Dummy preconditioner
pcg->setPreconditionerParams(
std::make_shared<gtsam::DummyPreconditionerParameters>());
pcg->preconditioner =
std::make_shared<gtsam::DummyPreconditionerParameters>();
VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG);
EXPECT(assert_equal(expectedSolution, deltaPCGDummy, 1e-5));
//deltaPCGDummy.print("PCG Dummy");
// With Block-Jacobi preconditioner
pcg->setPreconditionerParams(
std::make_shared<gtsam::BlockJacobiPreconditionerParameters>());
pcg->preconditioner =
std::make_shared<gtsam::BlockJacobiPreconditionerParameters>();
VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG);
EXPECT(assert_equal(expectedSolution, deltaPCGJacobi, 1e-5));
//deltaPCGJacobi.print("PCG Jacobi");

View File

@ -48,7 +48,7 @@ static double error(const GaussianFactorGraph& fg, const VectorValues& x) {
TEST( SubgraphSolver, Parameters )
{
LONGS_EQUAL(SubgraphSolverParameters::SILENT, kParameters.verbosity());
LONGS_EQUAL(500, kParameters.maxIterations());
LONGS_EQUAL(500, kParameters.maxIterations);
}
/* ************************************************************************* */

View File

@ -83,7 +83,7 @@ int main(int argc, char* argv[]) {
// params.setVerbosityLM("SUMMARY");
// params.linearSolverType = LevenbergMarquardtParams::Iterative;
// auto pcg = std::make_shared<PCGSolverParameters>();
// pcg->preconditioner_ =
// pcg->preconditioner =
// std::make_shared<SubgraphPreconditionerParameters>();
// std::make_shared<BlockJacobiPreconditionerParameters>();
// params.iterativeParams = pcg;