Merge pull request #1876 from borglab/conjugate-gradient
commit
dc24255418
|
@ -94,11 +94,10 @@ int main(int argc, char* argv[]) {
|
|||
parameters.maxIterations = 500;
|
||||
PCGSolverParameters::shared_ptr pcg =
|
||||
std::make_shared<PCGSolverParameters>();
|
||||
pcg->preconditioner_ =
|
||||
std::make_shared<BlockJacobiPreconditionerParameters>();
|
||||
pcg->preconditioner = std::make_shared<BlockJacobiPreconditionerParameters>();
|
||||
// Following is crucial:
|
||||
pcg->setEpsilon_abs(1e-10);
|
||||
pcg->setEpsilon_rel(1e-10);
|
||||
pcg->epsilon_abs = 1e-10;
|
||||
pcg->epsilon_rel = 1e-10;
|
||||
parameters.iterativeParams = pcg;
|
||||
|
||||
LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, parameters);
|
||||
|
|
|
@ -28,11 +28,11 @@ namespace gtsam {
|
|||
void ConjugateGradientParameters::print(ostream &os) const {
|
||||
Base::print(os);
|
||||
cout << "ConjugateGradientParameters" << endl
|
||||
<< "minIter: " << minIterations_ << endl
|
||||
<< "maxIter: " << maxIterations_ << endl
|
||||
<< "resetIter: " << reset_ << endl
|
||||
<< "eps_rel: " << epsilon_rel_ << endl
|
||||
<< "eps_abs: " << epsilon_abs_ << endl;
|
||||
<< "minIter: " << minIterations << endl
|
||||
<< "maxIter: " << maxIterations << endl
|
||||
<< "resetIter: " << reset << endl
|
||||
<< "eps_rel: " << epsilon_rel << endl
|
||||
<< "eps_abs: " << epsilon_abs << endl;
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
|
|
|
@ -24,59 +24,66 @@
|
|||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* parameters for the conjugate gradient method
|
||||
* Parameters for the Conjugate Gradient method
|
||||
*/
|
||||
class GTSAM_EXPORT ConjugateGradientParameters : public IterativeOptimizationParameters {
|
||||
|
||||
public:
|
||||
struct GTSAM_EXPORT ConjugateGradientParameters
|
||||
: public IterativeOptimizationParameters {
|
||||
typedef IterativeOptimizationParameters Base;
|
||||
typedef std::shared_ptr<ConjugateGradientParameters> shared_ptr;
|
||||
|
||||
size_t minIterations_; ///< minimum number of cg iterations
|
||||
size_t maxIterations_; ///< maximum number of cg iterations
|
||||
size_t reset_; ///< number of iterations before reset
|
||||
double epsilon_rel_; ///< threshold for relative error decrease
|
||||
double epsilon_abs_; ///< threshold for absolute error decrease
|
||||
size_t minIterations; ///< minimum number of cg iterations
|
||||
size_t maxIterations; ///< maximum number of cg iterations
|
||||
size_t reset; ///< number of iterations before reset
|
||||
double epsilon_rel; ///< threshold for relative error decrease
|
||||
double epsilon_abs; ///< threshold for absolute error decrease
|
||||
|
||||
/* Matrix Operation Kernel */
|
||||
enum BLASKernel {
|
||||
GTSAM = 0, ///< Jacobian Factor Graph of GTSAM
|
||||
} blas_kernel_ ;
|
||||
} blas_kernel;
|
||||
|
||||
ConjugateGradientParameters()
|
||||
: minIterations_(1), maxIterations_(500), reset_(501), epsilon_rel_(1e-3),
|
||||
epsilon_abs_(1e-3), blas_kernel_(GTSAM) {}
|
||||
: minIterations(1),
|
||||
maxIterations(500),
|
||||
reset(501),
|
||||
epsilon_rel(1e-3),
|
||||
epsilon_abs(1e-3),
|
||||
blas_kernel(GTSAM) {}
|
||||
|
||||
ConjugateGradientParameters(size_t minIterations, size_t maxIterations, size_t reset,
|
||||
double epsilon_rel, double epsilon_abs, BLASKernel blas)
|
||||
: minIterations_(minIterations), maxIterations_(maxIterations), reset_(reset),
|
||||
epsilon_rel_(epsilon_rel), epsilon_abs_(epsilon_abs), blas_kernel_(blas) {}
|
||||
ConjugateGradientParameters(size_t minIterations, size_t maxIterations,
|
||||
size_t reset, double epsilon_rel,
|
||||
double epsilon_abs, BLASKernel blas)
|
||||
: minIterations(minIterations),
|
||||
maxIterations(maxIterations),
|
||||
reset(reset),
|
||||
epsilon_rel(epsilon_rel),
|
||||
epsilon_abs(epsilon_abs),
|
||||
blas_kernel(blas) {}
|
||||
|
||||
ConjugateGradientParameters(const ConjugateGradientParameters &p)
|
||||
: Base(p), minIterations_(p.minIterations_), maxIterations_(p.maxIterations_), reset_(p.reset_),
|
||||
epsilon_rel_(p.epsilon_rel_), epsilon_abs_(p.epsilon_abs_), blas_kernel_(GTSAM) {}
|
||||
: Base(p),
|
||||
minIterations(p.minIterations),
|
||||
maxIterations(p.maxIterations),
|
||||
reset(p.reset),
|
||||
epsilon_rel(p.epsilon_rel),
|
||||
epsilon_abs(p.epsilon_abs),
|
||||
blas_kernel(GTSAM) {}
|
||||
|
||||
/* general interface */
|
||||
inline size_t minIterations() const { return minIterations_; }
|
||||
inline size_t maxIterations() const { return maxIterations_; }
|
||||
inline size_t reset() const { return reset_; }
|
||||
inline double epsilon() const { return epsilon_rel_; }
|
||||
inline double epsilon_rel() const { return epsilon_rel_; }
|
||||
inline double epsilon_abs() const { return epsilon_abs_; }
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V43
|
||||
inline size_t getMinIterations() const { return minIterations; }
|
||||
inline size_t getMaxIterations() const { return maxIterations; }
|
||||
inline size_t getReset() const { return reset; }
|
||||
inline double getEpsilon() const { return epsilon_rel; }
|
||||
inline double getEpsilon_rel() const { return epsilon_rel; }
|
||||
inline double getEpsilon_abs() const { return epsilon_abs; }
|
||||
|
||||
inline size_t getMinIterations() const { return minIterations_; }
|
||||
inline size_t getMaxIterations() const { return maxIterations_; }
|
||||
inline size_t getReset() const { return reset_; }
|
||||
inline double getEpsilon() const { return epsilon_rel_; }
|
||||
inline double getEpsilon_rel() const { return epsilon_rel_; }
|
||||
inline double getEpsilon_abs() const { return epsilon_abs_; }
|
||||
|
||||
inline void setMinIterations(size_t value) { minIterations_ = value; }
|
||||
inline void setMaxIterations(size_t value) { maxIterations_ = value; }
|
||||
inline void setReset(size_t value) { reset_ = value; }
|
||||
inline void setEpsilon(double value) { epsilon_rel_ = value; }
|
||||
inline void setEpsilon_rel(double value) { epsilon_rel_ = value; }
|
||||
inline void setEpsilon_abs(double value) { epsilon_abs_ = value; }
|
||||
inline void setMinIterations(size_t value) { minIterations = value; }
|
||||
inline void setMaxIterations(size_t value) { maxIterations = value; }
|
||||
inline void setReset(size_t value) { reset = value; }
|
||||
inline void setEpsilon(double value) { epsilon_rel = value; }
|
||||
inline void setEpsilon_rel(double value) { epsilon_rel = value; }
|
||||
inline void setEpsilon_abs(double value) { epsilon_abs = value; }
|
||||
#endif
|
||||
|
||||
|
||||
void print() const { Base::print(); }
|
||||
|
@ -109,16 +116,17 @@ V preconditionedConjugateGradient(const S &system, const V &initial,
|
|||
|
||||
double currentGamma = system.dot(residual, residual), prevGamma, alpha, beta;
|
||||
|
||||
const size_t iMaxIterations = parameters.maxIterations(),
|
||||
iMinIterations = parameters.minIterations(),
|
||||
iReset = parameters.reset() ;
|
||||
const double threshold = std::max(parameters.epsilon_abs(),
|
||||
parameters.epsilon() * parameters.epsilon() * currentGamma);
|
||||
const size_t iMaxIterations = parameters.maxIterations,
|
||||
iMinIterations = parameters.minIterations,
|
||||
iReset = parameters.reset;
|
||||
const double threshold =
|
||||
std::max(parameters.epsilon_abs,
|
||||
parameters.epsilon_rel * parameters.epsilon_rel * currentGamma);
|
||||
|
||||
if (parameters.verbosity() >= ConjugateGradientParameters::COMPLEXITY )
|
||||
std::cout << "[PCG] epsilon = " << parameters.epsilon()
|
||||
<< ", max = " << parameters.maxIterations()
|
||||
<< ", reset = " << parameters.reset()
|
||||
if (parameters.verbosity() >= ConjugateGradientParameters::COMPLEXITY)
|
||||
std::cout << "[PCG] epsilon = " << parameters.epsilon_rel
|
||||
<< ", max = " << parameters.maxIterations
|
||||
<< ", reset = " << parameters.reset
|
||||
<< ", ||r0||^2 = " << currentGamma
|
||||
<< ", threshold = " << threshold << std::endl;
|
||||
|
||||
|
|
|
@ -34,17 +34,13 @@ namespace gtsam {
|
|||
void PCGSolverParameters::print(ostream &os) const {
|
||||
Base::print(os);
|
||||
os << "PCGSolverParameters:" << endl;
|
||||
preconditioner_->print(os);
|
||||
preconditioner->print(os);
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
PCGSolver::PCGSolver(const PCGSolverParameters &p) {
|
||||
parameters_ = p;
|
||||
preconditioner_ = createPreconditioner(p.preconditioner_);
|
||||
}
|
||||
|
||||
void PCGSolverParameters::setPreconditionerParams(const std::shared_ptr<PreconditionerParameters> preconditioner) {
|
||||
preconditioner_ = preconditioner;
|
||||
preconditioner_ = createPreconditioner(p.preconditioner);
|
||||
}
|
||||
|
||||
void PCGSolverParameters::print(const std::string &s) const {
|
||||
|
|
|
@ -31,29 +31,22 @@ class VectorValues;
|
|||
struct PreconditionerParameters;
|
||||
|
||||
/**
|
||||
* Parameters for PCG
|
||||
* Parameters for Preconditioned Conjugate Gradient solver.
|
||||
*/
|
||||
struct GTSAM_EXPORT PCGSolverParameters: public ConjugateGradientParameters {
|
||||
public:
|
||||
struct GTSAM_EXPORT PCGSolverParameters : public ConjugateGradientParameters {
|
||||
typedef ConjugateGradientParameters Base;
|
||||
typedef std::shared_ptr<PCGSolverParameters> shared_ptr;
|
||||
|
||||
PCGSolverParameters() {
|
||||
}
|
||||
std::shared_ptr<PreconditionerParameters> preconditioner;
|
||||
|
||||
PCGSolverParameters() {}
|
||||
|
||||
PCGSolverParameters(
|
||||
const std::shared_ptr<PreconditionerParameters> &preconditioner)
|
||||
: preconditioner(preconditioner) {}
|
||||
|
||||
void print(std::ostream &os) const override;
|
||||
|
||||
/* interface to preconditioner parameters */
|
||||
inline const PreconditionerParameters& preconditioner() const {
|
||||
return *preconditioner_;
|
||||
}
|
||||
|
||||
// needed for python wrapper
|
||||
void print(const std::string &s) const;
|
||||
|
||||
std::shared_ptr<PreconditionerParameters> preconditioner_;
|
||||
|
||||
void setPreconditionerParams(const std::shared_ptr<PreconditionerParameters> preconditioner);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -87,16 +80,16 @@ public:
|
|||
* System class needed for calling preconditionedConjugateGradient
|
||||
*/
|
||||
class GTSAM_EXPORT GaussianFactorGraphSystem {
|
||||
public:
|
||||
|
||||
GaussianFactorGraphSystem(const GaussianFactorGraph &gfg,
|
||||
const Preconditioner &preconditioner, const KeyInfo &info,
|
||||
const std::map<Key, Vector> &lambda);
|
||||
|
||||
const GaussianFactorGraph &gfg_;
|
||||
const Preconditioner &preconditioner_;
|
||||
const KeyInfo &keyInfo_;
|
||||
const std::map<Key, Vector> &lambda_;
|
||||
KeyInfo keyInfo_;
|
||||
std::map<Key, Vector> lambda_;
|
||||
|
||||
public:
|
||||
GaussianFactorGraphSystem(const GaussianFactorGraph &gfg,
|
||||
const Preconditioner &preconditioner,
|
||||
const KeyInfo &info,
|
||||
const std::map<Key, Vector> &lambda);
|
||||
|
||||
void residual(const Vector &x, Vector &r) const;
|
||||
void multiply(const Vector &x, Vector& y) const;
|
||||
|
|
|
@ -49,10 +49,12 @@ namespace gtsam {
|
|||
|
||||
// init gamma and calculate threshold
|
||||
gamma = dot(g,g);
|
||||
threshold = std::max(parameters_.epsilon_abs(), parameters_.epsilon() * parameters_.epsilon() * gamma);
|
||||
threshold =
|
||||
std::max(parameters_.epsilon_abs,
|
||||
parameters_.epsilon_rel * parameters_.epsilon_rel * gamma);
|
||||
|
||||
// Allocate and calculate A*d for first iteration
|
||||
if (gamma > parameters_.epsilon_abs()) Ad = Ab * d;
|
||||
if (gamma > parameters_.epsilon_abs) Ad = Ab * d;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -79,13 +81,13 @@ namespace gtsam {
|
|||
// take a step, return true if converged
|
||||
bool step(const S& Ab, V& x) {
|
||||
|
||||
if ((++k) >= ((int)parameters_.maxIterations())) return true;
|
||||
if ((++k) >= ((int)parameters_.maxIterations)) return true;
|
||||
|
||||
//---------------------------------->
|
||||
double alpha = takeOptimalStep(x);
|
||||
|
||||
// update gradient (or re-calculate at reset time)
|
||||
if (k % parameters_.reset() == 0) g = Ab.gradient(x);
|
||||
if (k % parameters_.reset == 0) g = Ab.gradient(x);
|
||||
// axpy(alpha, Ab ^ Ad, g); // g += alpha*(Ab^Ad)
|
||||
else Ab.transposeMultiplyAdd(alpha, Ad, g);
|
||||
|
||||
|
@ -126,11 +128,10 @@ namespace gtsam {
|
|||
CGState<S, V, E> state(Ab, x, parameters, steepest);
|
||||
|
||||
if (parameters.verbosity() != ConjugateGradientParameters::SILENT)
|
||||
std::cout << "CG: epsilon = " << parameters.epsilon()
|
||||
<< ", maxIterations = " << parameters.maxIterations()
|
||||
std::cout << "CG: epsilon = " << parameters.epsilon_rel
|
||||
<< ", maxIterations = " << parameters.maxIterations
|
||||
<< ", ||g0||^2 = " << state.gamma
|
||||
<< ", threshold = " << state.threshold
|
||||
<< std::endl;
|
||||
<< ", threshold = " << state.threshold << std::endl;
|
||||
|
||||
if ( state.gamma < state.threshold ) {
|
||||
if (parameters.verbosity() != ConjugateGradientParameters::SILENT)
|
||||
|
|
|
@ -710,17 +710,11 @@ virtual class IterativeOptimizationParameters {
|
|||
#include <gtsam/linear/ConjugateGradientSolver.h>
|
||||
virtual class ConjugateGradientParameters : gtsam::IterativeOptimizationParameters {
|
||||
ConjugateGradientParameters();
|
||||
int getMinIterations() const ;
|
||||
int getMaxIterations() const ;
|
||||
int getReset() const;
|
||||
double getEpsilon_rel() const;
|
||||
double getEpsilon_abs() const;
|
||||
|
||||
void setMinIterations(int value);
|
||||
void setMaxIterations(int value);
|
||||
void setReset(int value);
|
||||
void setEpsilon_rel(double value);
|
||||
void setEpsilon_abs(double value);
|
||||
int minIterations;
|
||||
int maxIterations;
|
||||
int reset;
|
||||
double epsilon_rel;
|
||||
double epsilon_abs;
|
||||
};
|
||||
|
||||
#include <gtsam/linear/Preconditioner.h>
|
||||
|
@ -739,8 +733,10 @@ virtual class BlockJacobiPreconditionerParameters : gtsam::PreconditionerParamet
|
|||
#include <gtsam/linear/PCGSolver.h>
|
||||
virtual class PCGSolverParameters : gtsam::ConjugateGradientParameters {
|
||||
PCGSolverParameters();
|
||||
PCGSolverParameters(gtsam::PreconditionerParameters* preconditioner);
|
||||
void print(string s = "");
|
||||
void setPreconditionerParams(gtsam::PreconditionerParameters* preconditioner);
|
||||
|
||||
gtsam::PreconditionerParameters* preconditioner;
|
||||
};
|
||||
|
||||
#include <gtsam/linear/SubgraphSolver.h>
|
||||
|
|
|
@ -16,11 +16,11 @@
|
|||
* @date Jun 11, 2012
|
||||
*/
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h>
|
||||
#include <gtsam/nonlinear/internal/NonlinearOptimizerState.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
#include <gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/nonlinear/internal/NonlinearOptimizerState.h>
|
||||
|
||||
#include <cmath>
|
||||
|
||||
|
@ -34,29 +34,35 @@ typedef internal::NonlinearOptimizerState State;
|
|||
* @param values a linearization point
|
||||
* Can be moved to NonlinearFactorGraph.h if desired
|
||||
*/
|
||||
static VectorValues gradientInPlace(const NonlinearFactorGraph &nfg,
|
||||
const Values &values) {
|
||||
static VectorValues gradientInPlace(const NonlinearFactorGraph& nfg,
|
||||
const Values& values) {
|
||||
// Linearize graph
|
||||
GaussianFactorGraph::shared_ptr linear = nfg.linearize(values);
|
||||
return linear->gradientAtZero();
|
||||
}
|
||||
|
||||
NonlinearConjugateGradientOptimizer::NonlinearConjugateGradientOptimizer(
|
||||
const NonlinearFactorGraph& graph, const Values& initialValues, const Parameters& params)
|
||||
: Base(graph, std::unique_ptr<State>(new State(initialValues, graph.error(initialValues)))),
|
||||
const NonlinearFactorGraph& graph, const Values& initialValues,
|
||||
const Parameters& params)
|
||||
: Base(graph, std::unique_ptr<State>(
|
||||
new State(initialValues, graph.error(initialValues)))),
|
||||
params_(params) {}
|
||||
|
||||
double NonlinearConjugateGradientOptimizer::System::error(const State& state) const {
|
||||
double NonlinearConjugateGradientOptimizer::System::error(
|
||||
const State& state) const {
|
||||
return graph_.error(state);
|
||||
}
|
||||
|
||||
NonlinearConjugateGradientOptimizer::System::Gradient NonlinearConjugateGradientOptimizer::System::gradient(
|
||||
const State &state) const {
|
||||
NonlinearConjugateGradientOptimizer::System::Gradient
|
||||
NonlinearConjugateGradientOptimizer::System::gradient(
|
||||
const State& state) const {
|
||||
return gradientInPlace(graph_, state);
|
||||
}
|
||||
|
||||
NonlinearConjugateGradientOptimizer::System::State NonlinearConjugateGradientOptimizer::System::advance(
|
||||
const State ¤t, const double alpha, const Gradient &g) const {
|
||||
NonlinearConjugateGradientOptimizer::System::State
|
||||
NonlinearConjugateGradientOptimizer::System::advance(const State& current,
|
||||
const double alpha,
|
||||
const Gradient& g) const {
|
||||
Gradient step = g;
|
||||
step *= alpha;
|
||||
return current.retract(step);
|
||||
|
@ -65,7 +71,8 @@ NonlinearConjugateGradientOptimizer::System::State NonlinearConjugateGradientOpt
|
|||
GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() {
|
||||
const auto [newValues, dummy] = nonlinearConjugateGradient<System, Values>(
|
||||
System(graph_), state_->values, params_, true /* single iteration */);
|
||||
state_.reset(new State(newValues, graph_.error(newValues), state_->iterations + 1));
|
||||
state_.reset(
|
||||
new State(newValues, graph_.error(newValues), state_->iterations + 1));
|
||||
|
||||
// NOTE(frank): We don't linearize this system, so we must return null here.
|
||||
return nullptr;
|
||||
|
@ -76,9 +83,9 @@ const Values& NonlinearConjugateGradientOptimizer::optimize() {
|
|||
System system(graph_);
|
||||
const auto [newValues, iterations] =
|
||||
nonlinearConjugateGradient(system, state_->values, params_, false);
|
||||
state_.reset(new State(std::move(newValues), graph_.error(newValues), iterations));
|
||||
state_.reset(
|
||||
new State(std::move(newValues), graph_.error(newValues), iterations));
|
||||
return state_->values;
|
||||
}
|
||||
|
||||
} /* namespace gtsam */
|
||||
|
||||
|
|
|
@ -24,8 +24,8 @@
|
|||
namespace gtsam {
|
||||
|
||||
/** An implementation of the nonlinear CG method using the template below */
|
||||
class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimizer {
|
||||
|
||||
class GTSAM_EXPORT NonlinearConjugateGradientOptimizer
|
||||
: public NonlinearOptimizer {
|
||||
/* a class for the nonlinearConjugateGradient template */
|
||||
class System {
|
||||
public:
|
||||
|
@ -37,37 +37,31 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimiz
|
|||
const NonlinearFactorGraph &graph_;
|
||||
|
||||
public:
|
||||
System(const NonlinearFactorGraph &graph) :
|
||||
graph_(graph) {
|
||||
}
|
||||
System(const NonlinearFactorGraph &graph) : graph_(graph) {}
|
||||
double error(const State &state) const;
|
||||
Gradient gradient(const State &state) const;
|
||||
State advance(const State ¤t, const double alpha,
|
||||
const Gradient &g) const;
|
||||
};
|
||||
|
||||
public:
|
||||
|
||||
public:
|
||||
typedef NonlinearOptimizer Base;
|
||||
typedef NonlinearOptimizerParams Parameters;
|
||||
typedef std::shared_ptr<NonlinearConjugateGradientOptimizer> shared_ptr;
|
||||
|
||||
protected:
|
||||
protected:
|
||||
Parameters params_;
|
||||
|
||||
const NonlinearOptimizerParams& _params() const override {
|
||||
return params_;
|
||||
}
|
||||
|
||||
public:
|
||||
const NonlinearOptimizerParams &_params() const override { return params_; }
|
||||
|
||||
public:
|
||||
/// Constructor
|
||||
NonlinearConjugateGradientOptimizer(const NonlinearFactorGraph& graph,
|
||||
const Values& initialValues, const Parameters& params = Parameters());
|
||||
NonlinearConjugateGradientOptimizer(const NonlinearFactorGraph &graph,
|
||||
const Values &initialValues,
|
||||
const Parameters ¶ms = Parameters());
|
||||
|
||||
/// Destructor
|
||||
~NonlinearConjugateGradientOptimizer() override {
|
||||
}
|
||||
~NonlinearConjugateGradientOptimizer() override {}
|
||||
|
||||
/**
|
||||
* Perform a single iteration, returning GaussianFactorGraph corresponding to
|
||||
|
@ -79,34 +73,31 @@ public:
|
|||
* Optimize for the maximum-likelihood estimate, returning a the optimized
|
||||
* variable assignments.
|
||||
*/
|
||||
const Values& optimize() override;
|
||||
const Values &optimize() override;
|
||||
};
|
||||
|
||||
/** Implement the golden-section line search algorithm */
|
||||
template<class S, class V, class W>
|
||||
template <class S, class V, class W>
|
||||
double lineSearch(const S &system, const V currentValues, const W &gradient) {
|
||||
|
||||
/* normalize it such that it becomes a unit vector */
|
||||
const double g = gradient.norm();
|
||||
|
||||
// perform the golden section search algorithm to decide the the optimal step size
|
||||
// detail refer to http://en.wikipedia.org/wiki/Golden_section_search
|
||||
const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi, tau =
|
||||
1e-5;
|
||||
double minStep = -1.0 / g, maxStep = 0, newStep = minStep
|
||||
+ (maxStep - minStep) / (phi + 1.0);
|
||||
// perform the golden section search algorithm to decide the the optimal step
|
||||
// size detail refer to http://en.wikipedia.org/wiki/Golden_section_search
|
||||
const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi,
|
||||
tau = 1e-5;
|
||||
double minStep = -1.0 / g, maxStep = 0,
|
||||
newStep = minStep + (maxStep - minStep) / (phi + 1.0);
|
||||
|
||||
V newValues = system.advance(currentValues, newStep, gradient);
|
||||
double newError = system.error(newValues);
|
||||
|
||||
while (true) {
|
||||
const bool flag = (maxStep - newStep > newStep - minStep) ? true : false;
|
||||
const double testStep =
|
||||
flag ? newStep + resphi * (maxStep - newStep) :
|
||||
newStep - resphi * (newStep - minStep);
|
||||
const double testStep = flag ? newStep + resphi * (maxStep - newStep)
|
||||
: newStep - resphi * (newStep - minStep);
|
||||
|
||||
if ((maxStep - minStep)
|
||||
< tau * (std::abs(testStep) + std::abs(newStep))) {
|
||||
if ((maxStep - minStep) < tau * (std::abs(testStep) + std::abs(newStep))) {
|
||||
return 0.5 * (minStep + maxStep);
|
||||
}
|
||||
|
||||
|
@ -135,19 +126,21 @@ double lineSearch(const S &system, const V currentValues, const W &gradient) {
|
|||
}
|
||||
|
||||
/**
|
||||
* Implement the nonlinear conjugate gradient method using the Polak-Ribiere formula suggested in
|
||||
* Implement the nonlinear conjugate gradient method using the Polak-Ribiere
|
||||
* formula suggested in
|
||||
* http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method.
|
||||
*
|
||||
* The S (system) class requires three member functions: error(state), gradient(state) and
|
||||
* advance(state, step-size, direction). The V class denotes the state or the solution.
|
||||
* The S (system) class requires three member functions: error(state),
|
||||
* gradient(state) and advance(state, step-size, direction). The V class denotes
|
||||
* the state or the solution.
|
||||
*
|
||||
* The last parameter is a switch between gradient-descent and conjugate gradient
|
||||
* The last parameter is a switch between gradient-descent and conjugate
|
||||
* gradient
|
||||
*/
|
||||
template<class S, class V>
|
||||
std::tuple<V, int> nonlinearConjugateGradient(const S &system,
|
||||
const V &initial, const NonlinearOptimizerParams ¶ms,
|
||||
template <class S, class V>
|
||||
std::tuple<V, int> nonlinearConjugateGradient(
|
||||
const S &system, const V &initial, const NonlinearOptimizerParams ¶ms,
|
||||
const bool singleIteration, const bool gradientDescent = false) {
|
||||
|
||||
// GTSAM_CONCEPT_MANIFOLD_TYPE(V)
|
||||
|
||||
size_t iteration = 0;
|
||||
|
@ -185,9 +178,9 @@ std::tuple<V, int> nonlinearConjugateGradient(const S &system,
|
|||
prevGradient = currentGradient;
|
||||
currentGradient = system.gradient(currentValues);
|
||||
// Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1
|
||||
const double beta = std::max(0.0,
|
||||
currentGradient.dot(currentGradient - prevGradient)
|
||||
/ prevGradient.dot(prevGradient));
|
||||
const double beta =
|
||||
std::max(0.0, currentGradient.dot(currentGradient - prevGradient) /
|
||||
prevGradient.dot(prevGradient));
|
||||
direction = currentGradient + (beta * direction);
|
||||
}
|
||||
|
||||
|
@ -205,20 +198,21 @@ std::tuple<V, int> nonlinearConjugateGradient(const S &system,
|
|||
|
||||
// Maybe show output
|
||||
if (params.verbosity >= NonlinearOptimizerParams::ERROR)
|
||||
std::cout << "iteration: " << iteration << ", currentError: " << currentError << std::endl;
|
||||
} while (++iteration < params.maxIterations && !singleIteration
|
||||
&& !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol,
|
||||
params.errorTol, prevError, currentError, params.verbosity));
|
||||
std::cout << "iteration: " << iteration
|
||||
<< ", currentError: " << currentError << std::endl;
|
||||
} while (++iteration < params.maxIterations && !singleIteration &&
|
||||
!checkConvergence(params.relativeErrorTol, params.absoluteErrorTol,
|
||||
params.errorTol, prevError, currentError,
|
||||
params.verbosity));
|
||||
|
||||
// Printing if verbose
|
||||
if (params.verbosity >= NonlinearOptimizerParams::ERROR
|
||||
&& iteration >= params.maxIterations)
|
||||
std::cout
|
||||
<< "nonlinearConjugateGradient: Terminating because reached maximum iterations"
|
||||
if (params.verbosity >= NonlinearOptimizerParams::ERROR &&
|
||||
iteration >= params.maxIterations)
|
||||
std::cout << "nonlinearConjugateGradient: Terminating because reached "
|
||||
"maximum iterations"
|
||||
<< std::endl;
|
||||
|
||||
return {currentValues, iteration};
|
||||
}
|
||||
|
||||
} // \ namespace gtsam
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -1,56 +1,52 @@
|
|||
/**
|
||||
* @file NonlinearConjugateGradientOptimizer.cpp
|
||||
* @brief Test simple CG optimizer
|
||||
* @file testNonlinearConjugateGradientOptimizer.cpp
|
||||
* @brief Test nonlinear CG optimizer
|
||||
* @author Yong-Dian Jian
|
||||
* @author Varun Agrawal
|
||||
* @date June 11, 2012
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file testGradientDescentOptimizer.cpp
|
||||
* @brief Small test of NonlinearConjugateGradientOptimizer
|
||||
* @author Yong-Dian Jian
|
||||
* @date Jun 11, 2012
|
||||
*/
|
||||
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
// Generate a small PoseSLAM problem
|
||||
std::tuple<NonlinearFactorGraph, Values> generateProblem() {
|
||||
|
||||
// 1. Create graph container and add factors to it
|
||||
NonlinearFactorGraph graph;
|
||||
|
||||
// 2a. Add Gaussian prior
|
||||
Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin
|
||||
SharedDiagonal priorNoise = noiseModel::Diagonal::Sigmas(
|
||||
Vector3(0.3, 0.3, 0.1));
|
||||
SharedDiagonal priorNoise =
|
||||
noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
|
||||
graph.addPrior(1, priorMean, priorNoise);
|
||||
|
||||
// 2b. Add odometry factors
|
||||
SharedDiagonal odometryNoise = noiseModel::Diagonal::Sigmas(
|
||||
Vector3(0.2, 0.2, 0.1));
|
||||
graph.emplace_shared<BetweenFactor<Pose2>>(1, 2, Pose2(2.0, 0.0, 0.0), odometryNoise);
|
||||
graph.emplace_shared<BetweenFactor<Pose2>>(2, 3, Pose2(2.0, 0.0, M_PI_2), odometryNoise);
|
||||
graph.emplace_shared<BetweenFactor<Pose2>>(3, 4, Pose2(2.0, 0.0, M_PI_2), odometryNoise);
|
||||
graph.emplace_shared<BetweenFactor<Pose2>>(4, 5, Pose2(2.0, 0.0, M_PI_2), odometryNoise);
|
||||
SharedDiagonal odometryNoise =
|
||||
noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
|
||||
graph.emplace_shared<BetweenFactor<Pose2>>(1, 2, Pose2(2.0, 0.0, 0.0),
|
||||
odometryNoise);
|
||||
graph.emplace_shared<BetweenFactor<Pose2>>(2, 3, Pose2(2.0, 0.0, M_PI_2),
|
||||
odometryNoise);
|
||||
graph.emplace_shared<BetweenFactor<Pose2>>(3, 4, Pose2(2.0, 0.0, M_PI_2),
|
||||
odometryNoise);
|
||||
graph.emplace_shared<BetweenFactor<Pose2>>(4, 5, Pose2(2.0, 0.0, M_PI_2),
|
||||
odometryNoise);
|
||||
|
||||
// 2c. Add pose constraint
|
||||
SharedDiagonal constraintUncertainty = noiseModel::Diagonal::Sigmas(
|
||||
Vector3(0.2, 0.2, 0.1));
|
||||
SharedDiagonal constraintUncertainty =
|
||||
noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
|
||||
graph.emplace_shared<BetweenFactor<Pose2>>(5, 2, Pose2(2.0, 0.0, M_PI_2),
|
||||
constraintUncertainty);
|
||||
|
||||
// 3. Create the data structure to hold the initialEstimate estimate to the solution
|
||||
// 3. Create the data structure to hold the initialEstimate estimate to the
|
||||
// solution
|
||||
Values initialEstimate;
|
||||
Pose2 x1(0.5, 0.0, 0.2);
|
||||
initialEstimate.insert(1, x1);
|
||||
|
@ -68,16 +64,17 @@ std::tuple<NonlinearFactorGraph, Values> generateProblem() {
|
|||
|
||||
/* ************************************************************************* */
|
||||
TEST(NonlinearConjugateGradientOptimizer, Optimize) {
|
||||
const auto [graph, initialEstimate] = generateProblem();
|
||||
// cout << "initial error = " << graph.error(initialEstimate) << endl;
|
||||
const auto [graph, initialEstimate] = generateProblem();
|
||||
// cout << "initial error = " << graph.error(initialEstimate) << endl;
|
||||
|
||||
NonlinearOptimizerParams param;
|
||||
param.maxIterations = 500; /* requires a larger number of iterations to converge */
|
||||
param.maxIterations =
|
||||
500; /* requires a larger number of iterations to converge */
|
||||
param.verbosity = NonlinearOptimizerParams::SILENT;
|
||||
|
||||
NonlinearConjugateGradientOptimizer optimizer(graph, initialEstimate, param);
|
||||
Values result = optimizer.optimize();
|
||||
// cout << "cg final error = " << graph.error(result) << endl;
|
||||
// cout << "cg final error = " << graph.error(result) << endl;
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0.0, graph.error(result), 1e-4);
|
||||
}
|
|
@ -67,20 +67,15 @@ ShonanAveragingParameters<d>::ShonanAveragingParameters(
|
|||
builderParameters.augmentationWeight = SubgraphBuilderParameters::SKELETON;
|
||||
builderParameters.augmentationFactor = 0.0;
|
||||
|
||||
auto pcg = std::make_shared<PCGSolverParameters>();
|
||||
|
||||
// Choose optimization method
|
||||
if (method == "SUBGRAPH") {
|
||||
lm.iterativeParams =
|
||||
std::make_shared<SubgraphSolverParameters>(builderParameters);
|
||||
} else if (method == "SGPC") {
|
||||
pcg->preconditioner_ =
|
||||
std::make_shared<SubgraphPreconditionerParameters>(builderParameters);
|
||||
lm.iterativeParams = pcg;
|
||||
lm.iterativeParams = std::make_shared<PCGSolverParameters>(
|
||||
std::make_shared<SubgraphPreconditionerParameters>(builderParameters));
|
||||
} else if (method == "JACOBI") {
|
||||
pcg->preconditioner_ =
|
||||
std::make_shared<BlockJacobiPreconditionerParameters>();
|
||||
lm.iterativeParams = pcg;
|
||||
lm.iterativeParams = std::make_shared<PCGSolverParameters>(std::make_shared<BlockJacobiPreconditionerParameters>());
|
||||
} else if (method == "QR") {
|
||||
lm.setLinearSolverType("MULTIFRONTAL_QR");
|
||||
} else if (method == "CHOLESKY") {
|
||||
|
|
|
@ -69,7 +69,7 @@ class TestScenario(GtsamTestCase):
|
|||
lmParams = LevenbergMarquardtParams.CeresDefaults()
|
||||
lmParams.setLinearSolverType("ITERATIVE")
|
||||
cgParams = PCGSolverParameters()
|
||||
cgParams.setPreconditionerParams(DummyPreconditionerParameters())
|
||||
cgParams.preconditioner = DummyPreconditionerParameters()
|
||||
lmParams.setIterativeParams(cgParams)
|
||||
actual = LevenbergMarquardtOptimizer(self.fg, self.initial_values, lmParams).optimize()
|
||||
self.assertAlmostEqual(0, self.fg.error(actual))
|
||||
|
|
|
@ -95,9 +95,9 @@ TEST( Iterative, conjugateGradientDescent_hard_constraint )
|
|||
VectorValues zeros = config.zeroVectors();
|
||||
|
||||
ConjugateGradientParameters parameters;
|
||||
parameters.setEpsilon_abs(1e-3);
|
||||
parameters.setEpsilon_rel(1e-5);
|
||||
parameters.setMaxIterations(100);
|
||||
parameters.epsilon_abs = 1e-3;
|
||||
parameters.epsilon_rel = 1e-5;
|
||||
parameters.maxIterations = 100;
|
||||
VectorValues actual = conjugateGradientDescent(*fg, zeros, parameters);
|
||||
|
||||
VectorValues expected;
|
||||
|
@ -122,9 +122,9 @@ TEST( Iterative, conjugateGradientDescent_soft_constraint )
|
|||
VectorValues zeros = config.zeroVectors();
|
||||
|
||||
ConjugateGradientParameters parameters;
|
||||
parameters.setEpsilon_abs(1e-3);
|
||||
parameters.setEpsilon_rel(1e-5);
|
||||
parameters.setMaxIterations(100);
|
||||
parameters.epsilon_abs = 1e-3;
|
||||
parameters.epsilon_rel = 1e-5;
|
||||
parameters.maxIterations = 100;
|
||||
VectorValues actual = conjugateGradientDescent(*fg, zeros, parameters);
|
||||
|
||||
VectorValues expected;
|
||||
|
|
|
@ -125,8 +125,8 @@ TEST( GaussianFactorGraphSystem, multiply_getb)
|
|||
TEST(PCGSolver, dummy) {
|
||||
LevenbergMarquardtParams params;
|
||||
params.linearSolverType = LevenbergMarquardtParams::Iterative;
|
||||
auto pcg = std::make_shared<PCGSolverParameters>();
|
||||
pcg->preconditioner_ = std::make_shared<DummyPreconditionerParameters>();
|
||||
auto pcg = std::make_shared<PCGSolverParameters>(
|
||||
std::make_shared<DummyPreconditionerParameters>());
|
||||
params.iterativeParams = pcg;
|
||||
|
||||
NonlinearFactorGraph fg = example::createReallyNonlinearFactorGraph();
|
||||
|
@ -145,9 +145,8 @@ TEST(PCGSolver, dummy) {
|
|||
TEST(PCGSolver, blockjacobi) {
|
||||
LevenbergMarquardtParams params;
|
||||
params.linearSolverType = LevenbergMarquardtParams::Iterative;
|
||||
auto pcg = std::make_shared<PCGSolverParameters>();
|
||||
pcg->preconditioner_ =
|
||||
std::make_shared<BlockJacobiPreconditionerParameters>();
|
||||
auto pcg = std::make_shared<PCGSolverParameters>(
|
||||
std::make_shared<BlockJacobiPreconditionerParameters>());
|
||||
params.iterativeParams = pcg;
|
||||
|
||||
NonlinearFactorGraph fg = example::createReallyNonlinearFactorGraph();
|
||||
|
@ -166,8 +165,8 @@ TEST(PCGSolver, blockjacobi) {
|
|||
TEST(PCGSolver, subgraph) {
|
||||
LevenbergMarquardtParams params;
|
||||
params.linearSolverType = LevenbergMarquardtParams::Iterative;
|
||||
auto pcg = std::make_shared<PCGSolverParameters>();
|
||||
pcg->preconditioner_ = std::make_shared<SubgraphPreconditionerParameters>();
|
||||
auto pcg = std::make_shared<PCGSolverParameters>(
|
||||
std::make_shared<SubgraphPreconditionerParameters>());
|
||||
params.iterativeParams = pcg;
|
||||
|
||||
NonlinearFactorGraph fg = example::createReallyNonlinearFactorGraph();
|
||||
|
|
|
@ -54,21 +54,23 @@ TEST( PCGsolver, verySimpleLinearSystem) {
|
|||
// Solve the system using Preconditioned Conjugate Gradient solver
|
||||
// Common PCG parameters
|
||||
gtsam::PCGSolverParameters::shared_ptr pcg = std::make_shared<gtsam::PCGSolverParameters>();
|
||||
pcg->setMaxIterations(500);
|
||||
pcg->setEpsilon_abs(0.0);
|
||||
pcg->setEpsilon_rel(0.0);
|
||||
pcg->maxIterations = 500;
|
||||
pcg->epsilon_abs = 0.0;
|
||||
pcg->epsilon_rel = 0.0;
|
||||
//pcg->setVerbosity("ERROR");
|
||||
|
||||
// With Dummy preconditioner
|
||||
pcg->preconditioner_ = std::make_shared<gtsam::DummyPreconditionerParameters>();
|
||||
pcg->preconditioner =
|
||||
std::make_shared<gtsam::DummyPreconditionerParameters>();
|
||||
VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG);
|
||||
EXPECT(assert_equal(exactSolution, deltaPCGDummy, 1e-7));
|
||||
//deltaPCGDummy.print("PCG Dummy");
|
||||
|
||||
// With Block-Jacobi preconditioner
|
||||
pcg->preconditioner_ = std::make_shared<gtsam::BlockJacobiPreconditionerParameters>();
|
||||
pcg->preconditioner =
|
||||
std::make_shared<gtsam::BlockJacobiPreconditionerParameters>();
|
||||
// It takes more than 1000 iterations for this test
|
||||
pcg->setMaxIterations(1500);
|
||||
pcg->maxIterations = 1500;
|
||||
VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG);
|
||||
|
||||
EXPECT(assert_equal(exactSolution, deltaPCGJacobi, 1e-5));
|
||||
|
@ -105,19 +107,21 @@ TEST(PCGSolver, simpleLinearSystem) {
|
|||
// Solve the system using Preconditioned Conjugate Gradient solver
|
||||
// Common PCG parameters
|
||||
gtsam::PCGSolverParameters::shared_ptr pcg = std::make_shared<gtsam::PCGSolverParameters>();
|
||||
pcg->setMaxIterations(500);
|
||||
pcg->setEpsilon_abs(0.0);
|
||||
pcg->setEpsilon_rel(0.0);
|
||||
pcg->maxIterations = 500;
|
||||
pcg->epsilon_abs = 0.0;
|
||||
pcg->epsilon_rel = 0.0;
|
||||
//pcg->setVerbosity("ERROR");
|
||||
|
||||
// With Dummy preconditioner
|
||||
pcg->preconditioner_ = std::make_shared<gtsam::DummyPreconditionerParameters>();
|
||||
pcg->preconditioner =
|
||||
std::make_shared<gtsam::DummyPreconditionerParameters>();
|
||||
VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG);
|
||||
EXPECT(assert_equal(expectedSolution, deltaPCGDummy, 1e-5));
|
||||
//deltaPCGDummy.print("PCG Dummy");
|
||||
|
||||
// With Block-Jacobi preconditioner
|
||||
pcg->preconditioner_ = std::make_shared<gtsam::BlockJacobiPreconditionerParameters>();
|
||||
pcg->preconditioner =
|
||||
std::make_shared<gtsam::BlockJacobiPreconditionerParameters>();
|
||||
VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG);
|
||||
EXPECT(assert_equal(expectedSolution, deltaPCGJacobi, 1e-5));
|
||||
//deltaPCGJacobi.print("PCG Jacobi");
|
||||
|
|
|
@ -48,7 +48,7 @@ static double error(const GaussianFactorGraph& fg, const VectorValues& x) {
|
|||
TEST( SubgraphSolver, Parameters )
|
||||
{
|
||||
LONGS_EQUAL(SubgraphSolverParameters::SILENT, kParameters.verbosity());
|
||||
LONGS_EQUAL(500, kParameters.maxIterations());
|
||||
LONGS_EQUAL(500, kParameters.maxIterations);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -83,7 +83,7 @@ int main(int argc, char* argv[]) {
|
|||
// params.setVerbosityLM("SUMMARY");
|
||||
// params.linearSolverType = LevenbergMarquardtParams::Iterative;
|
||||
// auto pcg = std::make_shared<PCGSolverParameters>();
|
||||
// pcg->preconditioner_ =
|
||||
// pcg->preconditioner =
|
||||
// std::make_shared<SubgraphPreconditionerParameters>();
|
||||
// std::make_shared<BlockJacobiPreconditionerParameters>();
|
||||
// params.iterativeParams = pcg;
|
||||
|
|
Loading…
Reference in New Issue