Changes in progress
parent
88f5e3d9b6
commit
75bd1689df
|
|
@ -27,11 +27,11 @@ using namespace std;
|
|||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
void DoglegOptimizer::iterate(void) const {
|
||||
void DoglegOptimizer::iterate(void) {
|
||||
|
||||
// Linearize graph
|
||||
const Ordering& ordering = *params_.ordering;
|
||||
GaussianFactorGraph::shared_ptr linear = graph_->linearize(state_.values, ordering);
|
||||
GaussianFactorGraph::shared_ptr linear = graph_.linearize(state_.values, ordering);
|
||||
|
||||
// Check whether to use QR
|
||||
bool useQR;
|
||||
|
|
@ -43,18 +43,18 @@ void DoglegOptimizer::iterate(void) const {
|
|||
throw runtime_error("Optimization parameter is invalid: DoglegParams::factorization");
|
||||
|
||||
// Pull out parameters we'll use
|
||||
const bool dlVerbose = (params_->dlVerbosity > DoglegParams::SILENT);
|
||||
const bool dlVerbose = (params_.dlVerbosity > DoglegParams::SILENT);
|
||||
|
||||
// Do Dogleg iteration with either Multifrontal or Sequential elimination
|
||||
DoglegOptimizerImpl::IterationResult result;
|
||||
|
||||
if(params_.elimination == DoglegParams::MULTIFRONTAL) {
|
||||
GaussianBayesTree::shared_ptr bt = GaussianMultifrontalSolver(*linear, useQR).eliminate();
|
||||
result = DoglegOptimizerImpl::Iterate(state_.delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, graph_, state_.values, ordering, state_.error, dlVerbose);
|
||||
result = DoglegOptimizerImpl::Iterate(state_.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bt, graph_, state_.values, ordering, state_.error, dlVerbose);
|
||||
|
||||
} else if(params_.elimination == DoglegParams::SEQUENTIAL) {
|
||||
GaussianBayesNet::shared_ptr bn = GaussianSequentialSolver(*linear, useQR).eliminate();
|
||||
result = DoglegOptimizerImpl::Iterate(state_.delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, graph_, state_.values, ordering, state_.error, dlVerbose);
|
||||
result = DoglegOptimizerImpl::Iterate(state_.Delta, DoglegOptimizerImpl::ONE_STEP_PER_ITERATION, *bn, graph_, state_.values, ordering, state_.error, dlVerbose);
|
||||
|
||||
} else {
|
||||
throw runtime_error("Optimization parameter is invalid: DoglegParams::elimination");
|
||||
|
|
@ -66,7 +66,7 @@ void DoglegOptimizer::iterate(void) const {
|
|||
// Create new state with new values and new error
|
||||
state_.values = state_.values.retract(result.dx_d, ordering);
|
||||
state_.error = result.f_error;
|
||||
state_.delta = result.delta;
|
||||
state_.Delta = result.Delta;
|
||||
++state_.iterations;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -22,6 +22,8 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
class DoglegOptimizer;
|
||||
|
||||
/** Parameters for Levenberg-Marquardt optimization. Note that this parameters
|
||||
* class inherits from NonlinearOptimizerParams, which specifies the parameters
|
||||
* common to all nonlinear optimization algorithms. This class also contains
|
||||
|
|
@ -53,17 +55,26 @@ public:
|
|||
/**
|
||||
* State for DoglegOptimizer
|
||||
*/
|
||||
class DoglegState : public SuccessiveLinearizationState {
|
||||
class DoglegState : public NonlinearOptimizerState {
|
||||
public:
|
||||
|
||||
double delta;
|
||||
double Delta;
|
||||
|
||||
DoglegState() {}
|
||||
|
||||
virtual ~DoglegState() {}
|
||||
|
||||
protected:
|
||||
DoglegState(const NonlinearFactorGraph& graph, const Values& values, const DoglegParams& params, unsigned int interations = 0) :
|
||||
NonlinearOptimizerState(graph, values, iterations), Delta(params.deltaInitial) {}
|
||||
|
||||
friend class DoglegOptimizer;
|
||||
};
|
||||
|
||||
/**
|
||||
* This class performs Dogleg nonlinear optimization
|
||||
*/
|
||||
class DoglegOptimizer : public SuccessiveLinearizationOptimizer {
|
||||
class DoglegOptimizer : public NonlinearOptimizer {
|
||||
|
||||
public:
|
||||
|
||||
|
|
@ -82,7 +93,7 @@ public:
|
|||
*/
|
||||
DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
|
||||
const DoglegParams& params = DoglegParams()) :
|
||||
NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues) {}
|
||||
NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph, initialValues)), state_(graph, initialValues, params_) {}
|
||||
|
||||
/** Standard constructor, requires a nonlinear factor graph, initial
|
||||
* variable assignments, and optimization parameters. For convenience this
|
||||
|
|
@ -93,8 +104,9 @@ public:
|
|||
* @param params The optimization parameters
|
||||
*/
|
||||
DoglegOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) :
|
||||
NonlinearOptimizer(graph), state_(graph, initialValues) {
|
||||
*params_.ordering = ordering; }
|
||||
NonlinearOptimizer(graph) {
|
||||
*params_.ordering = ordering;
|
||||
state_ = DoglegState(graph, initialValues, params_); }
|
||||
|
||||
/// @}
|
||||
|
||||
|
|
@ -108,7 +120,7 @@ public:
|
|||
* containing the updated variable assignments, which may be retrieved with
|
||||
* values().
|
||||
*/
|
||||
virtual void iterate() const;
|
||||
virtual void iterate();
|
||||
|
||||
/** Access the parameters */
|
||||
const DoglegParams& params() const { return params_; }
|
||||
|
|
@ -116,6 +128,9 @@ public:
|
|||
/** Access the last state */
|
||||
const DoglegState& state() const { return state_; }
|
||||
|
||||
/** Access the current trust region radius Delta */
|
||||
double Delta() const { return state_.Delta; }
|
||||
|
||||
/// @}
|
||||
|
||||
protected:
|
||||
|
|
@ -131,7 +146,7 @@ protected:
|
|||
/** Internal function for computing a COLAMD ordering if no ordering is specified */
|
||||
DoglegParams ensureHasOrdering(DoglegParams params, const NonlinearFactorGraph& graph, const Values& values) const {
|
||||
if(!params.ordering)
|
||||
params.ordering = graph.orderingCOLAMD(values);
|
||||
params.ordering = *graph.orderingCOLAMD(values);
|
||||
return params;
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ using namespace std;
|
|||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
void GaussNewtonOptimizer::iterate() const {
|
||||
void GaussNewtonOptimizer::iterate() {
|
||||
|
||||
const NonlinearOptimizerState& current = state_;
|
||||
|
||||
|
|
@ -54,7 +54,6 @@ void GaussNewtonOptimizer::iterate() const {
|
|||
if(params_.verbosity >= NonlinearOptimizerParams::DELTA) delta->print("delta");
|
||||
|
||||
// Create new state with new values and new error
|
||||
NonlinearOptimizerState newState;
|
||||
state_.values = current.values.retract(*delta, *params_.ordering);
|
||||
state_.error = graph_.error(state_.values);
|
||||
++ state_.iterations;
|
||||
|
|
|
|||
|
|
@ -22,12 +22,22 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
class GaussNewtonOptimizer;
|
||||
|
||||
/** Parameters for Gauss-Newton optimization, inherits from
|
||||
* NonlinearOptimizationParams.
|
||||
*/
|
||||
class GaussNewtonParams : public SuccessiveLinearizationParams {
|
||||
};
|
||||
|
||||
class GaussNewtonState : public NonlinearOptimizerState {
|
||||
protected:
|
||||
GaussNewtonState(const NonlinearFactorGraph& graph, const Values& values, unsigned int iterations = 0) :
|
||||
NonlinearOptimizerState(graph, values, iterations) {}
|
||||
|
||||
friend class GaussNewtonOptimizer;
|
||||
};
|
||||
|
||||
/**
|
||||
* This class performs Gauss-Newton nonlinear optimization
|
||||
*/
|
||||
|
|
@ -48,7 +58,7 @@ public:
|
|||
*/
|
||||
GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
|
||||
const GaussNewtonParams& params = GaussNewtonParams()) :
|
||||
NonlinearOptimizer(graph), params_(ensureHasOrdering(params)), state_(graph, initialValues) {}
|
||||
NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph, initialValues)), state_(graph, initialValues) {}
|
||||
|
||||
/** Standard constructor, requires a nonlinear factor graph, initial
|
||||
* variable assignments, and optimization parameters. For convenience this
|
||||
|
|
@ -74,20 +84,20 @@ public:
|
|||
* containing the updated variable assignments, which may be retrieved with
|
||||
* values().
|
||||
*/
|
||||
virtual void iterate() const;
|
||||
virtual void iterate();
|
||||
|
||||
/** Access the parameters */
|
||||
const GaussNewtonParams& params() const { return params_; }
|
||||
|
||||
/** Access the last state */
|
||||
const NonlinearOptimizerState& state() const { return state_; }
|
||||
const GaussNewtonState& state() const { return state_; }
|
||||
|
||||
/// @}
|
||||
|
||||
protected:
|
||||
|
||||
GaussNewtonParams params_;
|
||||
NonlinearOptimizerState state_;
|
||||
GaussNewtonState state_;
|
||||
|
||||
/** Access the parameters (base class version) */
|
||||
virtual const NonlinearOptimizerParams& _params() const { return params_; }
|
||||
|
|
@ -98,7 +108,7 @@ protected:
|
|||
/** Internal function for computing a COLAMD ordering if no ordering is specified */
|
||||
GaussNewtonParams ensureHasOrdering(GaussNewtonParams params, const NonlinearFactorGraph& graph, const Values& values) const {
|
||||
if(!params.ordering)
|
||||
params.ordering = graph.orderingCOLAMD(values);
|
||||
params.ordering = *graph.orderingCOLAMD(values);
|
||||
return params;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -27,10 +27,10 @@ using namespace std;
|
|||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
void LevenbergMarquardtOptimizer::iterate() const {
|
||||
void LevenbergMarquardtOptimizer::iterate() {
|
||||
|
||||
// Linearize graph
|
||||
GaussianFactorGraph::shared_ptr linear = graph_->linearize(state_.values, *params_.ordering);
|
||||
GaussianFactorGraph::shared_ptr linear = graph_.linearize(state_.values, *params_.ordering);
|
||||
|
||||
// Check whether to use QR
|
||||
bool useQR;
|
||||
|
|
@ -48,13 +48,13 @@ void LevenbergMarquardtOptimizer::iterate() const {
|
|||
// Keep increasing lambda until we make make progress
|
||||
while(true) {
|
||||
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA)
|
||||
cout << "trying lambda = " << lambda << endl;
|
||||
cout << "trying lambda = " << state_.lambda << endl;
|
||||
|
||||
// Add prior-factors
|
||||
// TODO: replace this dampening with a backsubstitution approach
|
||||
GaussianFactorGraph dampedSystem(*linear);
|
||||
{
|
||||
double sigma = 1.0 / sqrt(lambda);
|
||||
double sigma = 1.0 / sqrt(state_.lambda);
|
||||
dampedSystem.reserve(dampedSystem.size() + dimensions_.size());
|
||||
// for each of the variables, add a prior
|
||||
for(Index j=0; j<dimensions_.size(); ++j) {
|
||||
|
|
@ -114,7 +114,7 @@ void LevenbergMarquardtOptimizer::iterate() const {
|
|||
// Either we're not cautious, or the same lambda was worse than the current error.
|
||||
// The more adventurous lambda was worse too, so make lambda more conservative
|
||||
// and keep the same values.
|
||||
if(lambda >= params_.lambdaUpperBound) {
|
||||
if(state_.lambda >= params_.lambdaUpperBound) {
|
||||
if(nloVerbosity >= NonlinearOptimizerParams::ERROR)
|
||||
cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << endl;
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -22,6 +22,8 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
class LevenbergMarquardtOptimizer;
|
||||
|
||||
/** Parameters for Levenberg-Marquardt optimization. Note that this parameters
|
||||
* class inherits from NonlinearOptimizerParams, which specifies the parameters
|
||||
* common to all nonlinear optimization algorithms. This class also contains
|
||||
|
|
@ -66,6 +68,15 @@ public:
|
|||
|
||||
double lambda;
|
||||
|
||||
LevenbergMarquardtState() {}
|
||||
|
||||
virtual ~LevenbergMarquardtState() {}
|
||||
|
||||
protected:
|
||||
LevenbergMarquardtState(const NonlinearFactorGraph& graph, const Values& initialValues, const LevenbergMarquardtParams& params, unsigned int iterations = 0) :
|
||||
NonlinearOptimizerState(graph, values, iterations), lambda(params.lambdaInitial) {}
|
||||
|
||||
friend class LevenbergMarquardtOptimizer;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -90,8 +101,8 @@ public:
|
|||
*/
|
||||
LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
|
||||
const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) :
|
||||
NonlinearOptimizer(graph), params_(ensureHasOrdering(params)),
|
||||
state_(graph, initialValues), dimensions_(initialValues.dims(*params_.ordering)) {}
|
||||
NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph, initialValues)),
|
||||
state_(graph, initialValues, params_), dimensions_(initialValues.dims(*params_.ordering)) {}
|
||||
|
||||
/** Standard constructor, requires a nonlinear factor graph, initial
|
||||
* variable assignments, and optimization parameters. For convenience this
|
||||
|
|
@ -102,8 +113,12 @@ public:
|
|||
* @param params The optimization parameters
|
||||
*/
|
||||
LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) :
|
||||
NonlinearOptimizer(graph), state_(graph, initialValues), dimensions_(initialValues.dims(ordering)) {
|
||||
*params_.ordering = ordering; }
|
||||
NonlinearOptimizer(graph), dimensions_(initialValues.dims(ordering)) {
|
||||
*params_.ordering = ordering;
|
||||
state_ = LevenbergMarquardtState(graph, initialValues, params_); }
|
||||
|
||||
/** Access the current damping value */
|
||||
double lambda() const { return state_.lambda; }
|
||||
|
||||
/// @}
|
||||
|
||||
|
|
@ -117,7 +132,7 @@ public:
|
|||
* containing the updated variable assignments, which may be retrieved with
|
||||
* values().
|
||||
*/
|
||||
virtual void iterate() const;
|
||||
virtual void iterate();
|
||||
|
||||
/** Access the parameters */
|
||||
const LevenbergMarquardtParams& params() const { return params_; }
|
||||
|
|
@ -142,7 +157,7 @@ protected:
|
|||
/** Internal function for computing a COLAMD ordering if no ordering is specified */
|
||||
LevenbergMarquardtParams ensureHasOrdering(LevenbergMarquardtParams params, const NonlinearFactorGraph& graph, const Values& values) const {
|
||||
if(!params.ordering)
|
||||
params.ordering = graph.orderingCOLAMD(values);
|
||||
params.ordering = *graph.orderingCOLAMD(values);
|
||||
return params;
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
void NonlinearOptimizer::defaultOptimize() {
|
||||
|
||||
const NonlinearOptimizerParams& params = this->params();
|
||||
const NonlinearOptimizerParams& params = this->_params();
|
||||
double currentError = this->error();
|
||||
|
||||
// check if we're already close enough
|
||||
|
|
|
|||
|
|
@ -22,6 +22,79 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
class NonlinearOptimizer;
|
||||
|
||||
/** The common parameters for Nonlinear optimizers. Most optimizers
|
||||
* deriving from NonlinearOptimizer also subclass the parameters.
|
||||
*/
|
||||
class NonlinearOptimizerParams {
|
||||
public:
|
||||
/** See NonlinearOptimizerParams::verbosity */
|
||||
enum Verbosity {
|
||||
SILENT,
|
||||
ERROR,
|
||||
VALUES,
|
||||
DELTA,
|
||||
LINEAR
|
||||
};
|
||||
|
||||
size_t maxIterations; ///< The maximum iterations to stop iterating (default 100)
|
||||
double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5)
|
||||
double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5)
|
||||
double errorTol; ///< The maximum total error to stop iterating (default 0.0)
|
||||
Verbosity verbosity; ///< The printing verbosity during optimization (default SILENT)
|
||||
|
||||
NonlinearOptimizerParams() :
|
||||
maxIterations(100.0), relativeErrorTol(1e-5), absoluteErrorTol(1e-5),
|
||||
errorTol(0.0), verbosity(SILENT) {}
|
||||
|
||||
virtual void print(const std::string& str = "") const {
|
||||
std::cout << str << "\n";
|
||||
std::cout << "relative decrease threshold: " << relativeErrorTol << "\n";
|
||||
std::cout << "absolute decrease threshold: " << absoluteErrorTol << "\n";
|
||||
std::cout << " total error threshold: " << errorTol << "\n";
|
||||
std::cout << " maximum iterations: " << maxIterations << "\n";
|
||||
std::cout << " verbosity level: " << verbosity << std::endl;
|
||||
}
|
||||
|
||||
virtual ~NonlinearOptimizerParams() {}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Base class for a nonlinear optimization state, including the current estimate
|
||||
* of the variable values, error, and number of iterations. Optimizers derived
|
||||
* from NonlinearOptimizer usually also define a derived state class containing
|
||||
* additional state specific to the algorithm (for example, Dogleg state
|
||||
* contains the current trust region radius).
|
||||
*/
|
||||
class NonlinearOptimizerState {
|
||||
public:
|
||||
|
||||
/** The current estimate of the variable values. */
|
||||
Values values;
|
||||
|
||||
/** The factor graph error on the current values. */
|
||||
double error;
|
||||
|
||||
/** The number of optimization iterations performed. */
|
||||
unsigned int iterations;
|
||||
|
||||
NonlinearOptimizerState() {}
|
||||
|
||||
/** Virtual destructor */
|
||||
virtual ~NonlinearOptimizerState() {}
|
||||
|
||||
protected:
|
||||
NonlinearOptimizerState(const NonlinearFactorGraph& graph, const Values& values, unsigned int iterations = 0) :
|
||||
values(values), error(graph.error(values)), iterations(iterations) {}
|
||||
|
||||
NonlinearOptimizerState(const Values& values, double error, unsigned int iterations) :
|
||||
values(values), error(error), iterations(iterations) {}
|
||||
|
||||
friend class NonlinearOptimizer;
|
||||
};
|
||||
|
||||
/**
|
||||
* This is the abstract interface for classes that can optimize for the
|
||||
* maximum-likelihood estimate of a NonlinearFactorGraph.
|
||||
|
|
@ -113,7 +186,7 @@ public:
|
|||
* process, you may call iterate() and check_convergence() yourself, and if
|
||||
* needed modify the optimization state between iterations.
|
||||
*/
|
||||
virtual const Values& optimize() const { return defaultOptimize(); }
|
||||
virtual const Values& optimize() { defaultOptimize(); return values(); }
|
||||
|
||||
double error() const { return _state().error; }
|
||||
|
||||
|
|
@ -133,7 +206,7 @@ public:
|
|||
* containing the updated variable assignments, which may be retrieved with
|
||||
* values().
|
||||
*/
|
||||
virtual void iterate() const = 0;
|
||||
virtual void iterate() = 0;
|
||||
|
||||
/// @}
|
||||
|
||||
|
|
@ -155,73 +228,6 @@ protected:
|
|||
|
||||
};
|
||||
|
||||
|
||||
/** The common parameters for Nonlinear optimizers. Most optimizers
|
||||
* deriving from NonlinearOptimizer also subclass the parameters.
|
||||
*/
|
||||
class NonlinearOptimizerParams {
|
||||
public:
|
||||
/** See NonlinearOptimizerParams::verbosity */
|
||||
enum Verbosity {
|
||||
SILENT,
|
||||
ERROR,
|
||||
VALUES,
|
||||
DELTA,
|
||||
LINEAR
|
||||
};
|
||||
|
||||
size_t maxIterations; ///< The maximum iterations to stop iterating (default 100)
|
||||
double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5)
|
||||
double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5)
|
||||
double errorTol; ///< The maximum total error to stop iterating (default 0.0)
|
||||
Verbosity verbosity; ///< The printing verbosity during optimization (default SILENT)
|
||||
|
||||
NonlinearOptimizerParams() :
|
||||
maxIterations(100.0), relativeErrorTol(1e-5), absoluteErrorTol(1e-5),
|
||||
errorTol(0.0), verbosity(SILENT) {}
|
||||
|
||||
virtual void print(const std::string& str = "") const {
|
||||
std::cout << str << "\n";
|
||||
std::cout << "relative decrease threshold: " << relativeErrorTol << "\n";
|
||||
std::cout << "absolute decrease threshold: " << absoluteErrorTol << "\n";
|
||||
std::cout << " total error threshold: " << errorTol << "\n";
|
||||
std::cout << " maximum iterations: " << maxIterations << "\n";
|
||||
std::cout << " verbosity level: " << verbosity << std::endl;
|
||||
}
|
||||
|
||||
virtual ~NonlinearOptimizerParams() {}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Base class for a nonlinear optimization state, including the current estimate
|
||||
* of the variable values, error, and number of iterations. Optimizers derived
|
||||
* from NonlinearOptimizer usually also define a derived state class containing
|
||||
* additional state specific to the algorithm (for example, Dogleg state
|
||||
* contains the current trust region radius).
|
||||
*/
|
||||
class NonlinearOptimizerState {
|
||||
public:
|
||||
|
||||
/** The current estimate of the variable values. */
|
||||
Values values;
|
||||
|
||||
/** The factor graph error on the current values. */
|
||||
double error;
|
||||
|
||||
/** The number of optimization iterations performed. */
|
||||
unsigned int iterations;
|
||||
|
||||
NonlinearOptimizerState(const NonlinearFactorGraph& graph, const Values& values, unsigned int iterations = 0) :
|
||||
values(values), error(graph.error(values)), iterations(iterations) {}
|
||||
|
||||
NonlinearOptimizerState(const Values& values, double error, unsigned int iterations) :
|
||||
values(values), error(error), iterations(iterations) {}
|
||||
|
||||
/** Virtual destructor */
|
||||
virtual ~NonlinearOptimizerState() {}
|
||||
};
|
||||
|
||||
/** Check whether the relative error decrease is less than relativeErrorTreshold,
|
||||
* the absolute error decrease is less than absoluteErrorTreshold, <emph>or</emph>
|
||||
* the error itself is less than errorThreshold.
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ namespace pose2SLAM {
|
|||
|
||||
/// Optimize
|
||||
Values optimize(const Values& initialEstimate) {
|
||||
return LevenbergMarquardtOptimizer(*this).optimized(initialEstimate);
|
||||
return LevenbergMarquardtOptimizer(*this, initialEstimate).optimize();
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue