remove a custom constructor for LMParams, update matlab interface and tests accordingly
parent
d89a9e086c
commit
e437084349
1
gtsam.h
1
gtsam.h
|
@ -1011,7 +1011,6 @@ class Marginals {
|
||||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||||
class LevenbergMarquardtParams {
|
class LevenbergMarquardtParams {
|
||||||
LevenbergMarquardtParams();
|
LevenbergMarquardtParams();
|
||||||
LevenbergMarquardtParams(double initial, double factor, double bound, size_t verbose);
|
|
||||||
void print(string s) const;
|
void print(string s) const;
|
||||||
|
|
||||||
double getMaxIterations() const;
|
double getMaxIterations() const;
|
||||||
|
|
|
@ -46,13 +46,9 @@ public:
|
||||||
double lambdaUpperBound; ///< The maximum lambda to try before assuming the optimization has failed (default: 1e5)
|
double lambdaUpperBound; ///< The maximum lambda to try before assuming the optimization has failed (default: 1e5)
|
||||||
VerbosityLM verbosityLM; ///< The verbosity level for Levenberg-Marquardt (default: SILENT), see also NonlinearOptimizerParams::verbosity
|
VerbosityLM verbosityLM; ///< The verbosity level for Levenberg-Marquardt (default: SILENT), see also NonlinearOptimizerParams::verbosity
|
||||||
|
|
||||||
LevenbergMarquardtParams() :
|
LevenbergMarquardtParams() : lambdaInitial(1e-5), lambdaFactor(10.0), lambdaUpperBound(1e5), verbosityLM(SILENT) {}
|
||||||
lambdaInitial(1e-5), lambdaFactor(10.0), lambdaUpperBound(1e5), verbosityLM(SILENT) {}
|
|
||||||
|
|
||||||
LevenbergMarquardtParams(double initial, double factor, double bound, size_t verbose) :
|
|
||||||
lambdaInitial(initial), lambdaFactor(factor), lambdaUpperBound(bound), verbosityLM(VerbosityLM(verbose)) {}
|
|
||||||
|
|
||||||
virtual ~LevenbergMarquardtParams() {}
|
virtual ~LevenbergMarquardtParams() {}
|
||||||
|
|
||||||
virtual void print(const std::string& str = "") const;
|
virtual void print(const std::string& str = "") const;
|
||||||
|
|
||||||
inline double getlambdaInitial() const { return lambdaInitial; }
|
inline double getlambdaInitial() const { return lambdaInitial; }
|
||||||
|
|
|
@ -63,7 +63,7 @@ initialEstimate.print(sprintf('\nInitial estimate:\n '));
|
||||||
|
|
||||||
%% Fine grain optimization, allowing user to iterate step by step
|
%% Fine grain optimization, allowing user to iterate step by step
|
||||||
|
|
||||||
parameters = gtsamLevenbergMarquardtParams(1e-5, 1e-5, 0, 0);
|
parameters = gtsamLevenbergMarquardtParams;
|
||||||
parameters.setlambdaInitial(1.0);
|
parameters.setlambdaInitial(1.0);
|
||||||
parameters.setVerbosityLM('trylambda');
|
parameters.setVerbosityLM('trylambda');
|
||||||
|
|
||||||
|
|
|
@ -48,7 +48,7 @@ for j=1:size(truth.points,2)
|
||||||
end
|
end
|
||||||
|
|
||||||
%% Optimization
|
%% Optimization
|
||||||
parameters = gtsamLevenbergMarquardtParams(1e-5, 1e-5, 0, 0);
|
parameters = gtsamLevenbergMarquardtParams;
|
||||||
optimizer = graph.optimizer(initialEstimate, parameters);
|
optimizer = graph.optimizer(initialEstimate, parameters);
|
||||||
for i=1:5
|
for i=1:5
|
||||||
optimizer.iterate();
|
optimizer.iterate();
|
||||||
|
|
Loading…
Reference in New Issue