correct formatting

release/4.3a0
lcarlone 2020-11-27 22:54:51 -05:00
parent c4644a0d61
commit 7699f04820
1 changed files with 155 additions and 120 deletions

View File

@ -35,7 +35,7 @@ using symbol_shorthand::L;
static double tol = 1e-7; static double tol = 1e-7;
/* ************************************************************************* */ /* ************************************************************************* */
template <class BaseOptimizerParameters> template<class BaseOptimizerParameters>
class GncParams { class GncParams {
public: public:
/** Verbosity levels */ /** Verbosity levels */
@ -50,11 +50,14 @@ public:
using BaseOptimizer = GaussNewtonOptimizer; // BaseOptimizerParameters::OptimizerType; using BaseOptimizer = GaussNewtonOptimizer; // BaseOptimizerParameters::OptimizerType;
GncParams(const BaseOptimizerParameters& baseOptimizerParams): GncParams(const BaseOptimizerParameters& baseOptimizerParams) :
baseOptimizerParams(baseOptimizerParams) {} baseOptimizerParams(baseOptimizerParams) {
}
// default constructor // default constructor
GncParams(): baseOptimizerParams() {} GncParams() :
baseOptimizerParams() {
}
BaseOptimizerParameters baseOptimizerParams; BaseOptimizerParameters baseOptimizerParams;
/// any other specific GNC parameters: /// any other specific GNC parameters:
@ -62,29 +65,36 @@ public:
size_t maxIterations = 100; /* maximum number of iterations*/ size_t maxIterations = 100; /* maximum number of iterations*/
double barcSq = 1.0; /* a factor is considered an inlier if factor.error() < barcSq. Note that factor.error() whitens by the covariance*/ double barcSq = 1.0; /* a factor is considered an inlier if factor.error() < barcSq. Note that factor.error() whitens by the covariance*/
double muStep = 1.4; /* multiplicative factor to reduce/increase the mu in gnc */ double muStep = 1.4; /* multiplicative factor to reduce/increase the mu in gnc */
VerbosityGNC verbosityGNC = SILENT; /* verbosity level */ VerbosityGNC verbosityGNC = SILENT; /* verbosity level */
std::vector<size_t> knownInliers = std::vector<size_t>(); /* slots in the factor graph corresponding to measurements that we know are inliers */ std::vector<size_t> knownInliers = std::vector<size_t>(); /* slots in the factor graph corresponding to measurements that we know are inliers */
void setLossType(const RobustLossType type){ lossType = type; } void setLossType(const RobustLossType type) {
void setMaxIterations(const size_t maxIter){ lossType = type;
}
void setMaxIterations(const size_t maxIter) {
std::cout std::cout
<< "setMaxIterations: changing the max nr of iters might lead to less accurate solutions and is not recommended! " << "setMaxIterations: changing the max nr of iters might lead to less accurate solutions and is not recommended! "
<< std::endl; << std::endl;
maxIterations = maxIter; maxIterations = maxIter;
} }
void setInlierThreshold(const double inth){ barcSq = inth; } void setInlierThreshold(const double inth) {
void setMuStep(const double step){ muStep = step; } barcSq = inth;
void setVerbosityGNC(const VerbosityGNC verbosity) { verbosityGNC = verbosity; } }
void setMuStep(const double step) {
muStep = step;
}
void setVerbosityGNC(const VerbosityGNC verbosity) {
verbosityGNC = verbosity;
}
void setKnownInliers(const std::vector<size_t> knownIn) { void setKnownInliers(const std::vector<size_t> knownIn) {
for(size_t i=0; i<knownIn.size(); i++) for (size_t i = 0; i < knownIn.size(); i++)
knownInliers.push_back(knownIn[i]); knownInliers.push_back(knownIn[i]);
} }
/// equals /// equals
bool equals(const GncParams& other, double tol = 1e-9) const { bool equals(const GncParams& other, double tol = 1e-9) const {
return baseOptimizerParams.equals(other.baseOptimizerParams) return baseOptimizerParams.equals(other.baseOptimizerParams)
&& lossType == other.lossType && lossType == other.lossType && maxIterations == other.maxIterations
&& maxIterations == other.maxIterations
&& std::fabs(barcSq - other.barcSq) <= tol && std::fabs(barcSq - other.barcSq) <= tol
&& std::fabs(muStep - other.muStep) <= tol && std::fabs(muStep - other.muStep) <= tol
&& verbosityGNC == other.verbosityGNC && verbosityGNC == other.verbosityGNC
@ -94,17 +104,18 @@ public:
/// print function /// print function
void print(const std::string& str) const { void print(const std::string& str) const {
std::cout << str << "\n"; std::cout << str << "\n";
switch(lossType) { switch (lossType) {
case GM: std::cout << "lossType: Geman McClure" << "\n"; break; case GM:
std::cout << "lossType: Geman McClure" << "\n";
break;
default: default:
throw std::runtime_error( throw std::runtime_error("GncParams::print: unknown loss type.");
"GncParams::print: unknown loss type.");
} }
std::cout << "maxIterations: " << maxIterations << "\n"; std::cout << "maxIterations: " << maxIterations << "\n";
std::cout << "barcSq: " << barcSq << "\n"; std::cout << "barcSq: " << barcSq << "\n";
std::cout << "muStep: " << muStep << "\n"; std::cout << "muStep: " << muStep << "\n";
std::cout << "verbosityGNC: " << verbosityGNC << "\n"; std::cout << "verbosityGNC: " << verbosityGNC << "\n";
for(size_t i=0; i< knownInliers.size(); i++) for (size_t i = 0; i < knownInliers.size(); i++)
std::cout << "knownInliers: " << knownInliers[i] << "\n"; std::cout << "knownInliers: " << knownInliers[i] << "\n";
baseOptimizerParams.print(str); baseOptimizerParams.print(str);
} }
@ -123,21 +134,24 @@ private:
Vector weights_; // this could be a local variable in optimize, but it is useful to make it accessible from outside Vector weights_; // this could be a local variable in optimize, but it is useful to make it accessible from outside
public: public:
GncOptimizer(const NonlinearFactorGraph& graph, GncOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
const Values& initialValues, const GncParameters& params = GncParameters()) : const GncParameters& params = GncParameters()) :
state_(initialValues), params_(params) { state_(initialValues), params_(params) {
// make sure all noiseModels are Gaussian or convert to Gaussian // make sure all noiseModels are Gaussian or convert to Gaussian
nfg_.resize(graph.size()); nfg_.resize(graph.size());
for (size_t i = 0; i < graph.size(); i++) { for (size_t i = 0; i < graph.size(); i++) {
if(graph[i]){ if (graph[i]) {
NoiseModelFactor::shared_ptr factor = boost::dynamic_pointer_cast<NoiseModelFactor>(graph[i]); NoiseModelFactor::shared_ptr factor = boost::dynamic_pointer_cast<
noiseModel::Robust::shared_ptr robust = boost::dynamic_pointer_cast<noiseModel::Robust>(factor->noiseModel()); NoiseModelFactor>(graph[i]);
if(robust){ // if the factor has a robust loss, we have to change it: noiseModel::Robust::shared_ptr robust = boost::dynamic_pointer_cast<
noiseModel::Robust>(factor->noiseModel());
if (robust) { // if the factor has a robust loss, we have to change it:
SharedNoiseModel gaussianNoise = robust->noise(); SharedNoiseModel gaussianNoise = robust->noise();
NoiseModelFactor::shared_ptr gaussianFactor = factor->cloneWithNewNoiseModel(gaussianNoise); NoiseModelFactor::shared_ptr gaussianFactor =
factor->cloneWithNewNoiseModel(gaussianNoise);
nfg_[i] = gaussianFactor; nfg_[i] = gaussianFactor;
} else{ // else we directly push it back } else { // else we directly push it back
nfg_[i] = factor; nfg_[i] = factor;
} }
} }
@ -145,22 +159,30 @@ public:
} }
/// getter functions /// getter functions
NonlinearFactorGraph getFactors() const { return NonlinearFactorGraph(nfg_); } NonlinearFactorGraph getFactors() const {
Values getState() const { return Values(state_); } return NonlinearFactorGraph(nfg_);
GncParameters getParams() const { return GncParameters(params_); } }
Vector getWeights() const {return weights_;} Values getState() const {
return Values(state_);
}
GncParameters getParams() const {
return GncParameters(params_);
}
Vector getWeights() const {
return weights_;
}
/// implement GNC main loop, including graduating nonconvexity with mu /// implement GNC main loop, including graduating nonconvexity with mu
Values optimize() { Values optimize() {
// start by assuming all measurements are inliers // start by assuming all measurements are inliers
weights_ = Vector::Ones(nfg_.size()); weights_ = Vector::Ones(nfg_.size());
GaussNewtonOptimizer baseOptimizer(nfg_,state_); GaussNewtonOptimizer baseOptimizer(nfg_, state_);
Values result = baseOptimizer.optimize(); Values result = baseOptimizer.optimize();
double mu = initializeMu(); double mu = initializeMu();
for(size_t iter=0; iter < params_.maxIterations; iter++){ for (size_t iter = 0; iter < params_.maxIterations; iter++) {
// display info // display info
if (params_.verbosityGNC >= GncParameters::VerbosityGNC::VALUES){ if (params_.verbosityGNC >= GncParameters::VerbosityGNC::VALUES) {
result.print("result\n"); result.print("result\n");
std::cout << "mu: " << mu << std::endl; std::cout << "mu: " << mu << std::endl;
std::cout << "weights: " << weights_ << std::endl; std::cout << "weights: " << weights_ << std::endl;
@ -174,9 +196,9 @@ public:
result = baseOptimizer_iter.optimize(); result = baseOptimizer_iter.optimize();
// stopping condition // stopping condition
if( checkMuConvergence(mu) ) { if (checkMuConvergence(mu)) {
// display info // display info
if (params_.verbosityGNC >= GncParameters::VerbosityGNC::SUMMARY){ if (params_.verbosityGNC >= GncParameters::VerbosityGNC::SUMMARY) {
std::cout << "final iterations: " << iter << std::endl; std::cout << "final iterations: " << iter << std::endl;
std::cout << "final mu: " << mu << std::endl; std::cout << "final mu: " << mu << std::endl;
std::cout << "final weights: " << weights_ << std::endl; std::cout << "final weights: " << weights_ << std::endl;
@ -195,14 +217,14 @@ public:
// compute largest error across all factors // compute largest error across all factors
double rmax_sq = 0.0; double rmax_sq = 0.0;
for (size_t i = 0; i < nfg_.size(); i++) { for (size_t i = 0; i < nfg_.size(); i++) {
if(nfg_[i]){ if (nfg_[i]) {
rmax_sq = std::max(rmax_sq, nfg_[i]->error(state_)); rmax_sq = std::max(rmax_sq, nfg_[i]->error(state_));
} }
} }
// set initial mu // set initial mu
switch(params_.lossType) { switch (params_.lossType) {
case GncParameters::GM: case GncParameters::GM:
return 2*rmax_sq / params_.barcSq; // initial mu return 2 * rmax_sq / params_.barcSq; // initial mu
default: default:
throw std::runtime_error( throw std::runtime_error(
"GncOptimizer::initializeMu: called with unknown loss type."); "GncOptimizer::initializeMu: called with unknown loss type.");
@ -211,9 +233,9 @@ public:
/// update the gnc parameter mu to gradually increase nonconvexity /// update the gnc parameter mu to gradually increase nonconvexity
double updateMu(const double mu) const { double updateMu(const double mu) const {
switch(params_.lossType) { switch (params_.lossType) {
case GncParameters::GM: case GncParameters::GM:
return std::max(1.0 , mu / params_.muStep); // reduce mu, but saturate at 1 return std::max(1.0, mu / params_.muStep); // reduce mu, but saturate at 1
default: default:
throw std::runtime_error( throw std::runtime_error(
"GncOptimizer::updateMu: called with unknown loss type."); "GncOptimizer::updateMu: called with unknown loss type.");
@ -222,7 +244,7 @@ public:
/// check if we have reached the value of mu for which the surrogate loss matches the original loss /// check if we have reached the value of mu for which the surrogate loss matches the original loss
bool checkMuConvergence(const double mu) const { bool checkMuConvergence(const double mu) const {
switch(params_.lossType) { switch (params_.lossType) {
case GncParameters::GM: case GncParameters::GM:
return std::fabs(mu - 1.0) < 1e-9; // mu=1 recovers the original GM function return std::fabs(mu - 1.0) < 1e-9; // mu=1 recovers the original GM function
default: default:
@ -237,16 +259,20 @@ public:
NonlinearFactorGraph newGraph; NonlinearFactorGraph newGraph;
newGraph.resize(nfg_.size()); newGraph.resize(nfg_.size());
for (size_t i = 0; i < nfg_.size(); i++) { for (size_t i = 0; i < nfg_.size(); i++) {
if(nfg_[i]){ if (nfg_[i]) {
NoiseModelFactor::shared_ptr factor = boost::dynamic_pointer_cast<NoiseModelFactor>(nfg_[i]); NoiseModelFactor::shared_ptr factor = boost::dynamic_pointer_cast<
noiseModel::Gaussian::shared_ptr noiseModel = boost::dynamic_pointer_cast<noiseModel::Gaussian>(factor->noiseModel()); NoiseModelFactor>(nfg_[i]);
if(noiseModel){ noiseModel::Gaussian::shared_ptr noiseModel =
boost::dynamic_pointer_cast<noiseModel::Gaussian>(
factor->noiseModel());
if (noiseModel) {
Matrix newInfo = weights[i] * noiseModel->information(); Matrix newInfo = weights[i] * noiseModel->information();
SharedNoiseModel newNoiseModel = noiseModel::Gaussian::Information(newInfo); SharedNoiseModel newNoiseModel = noiseModel::Gaussian::Information(
newInfo);
newGraph[i] = factor->cloneWithNewNoiseModel(newNoiseModel); newGraph[i] = factor->cloneWithNewNoiseModel(newNoiseModel);
}else{ } else {
throw std::runtime_error( throw std::runtime_error(
"GncOptimizer::makeWeightedGraph: unexpected non-Gaussian noise model."); "GncOptimizer::makeWeightedGraph: unexpected non-Gaussian noise model.");
} }
} }
} }
@ -254,24 +280,27 @@ public:
} }
/// calculate gnc weights /// calculate gnc weights
Vector calculateWeights(const Values currentEstimate, const double mu){ Vector calculateWeights(const Values currentEstimate, const double mu) {
Vector weights = Vector::Ones(nfg_.size()); Vector weights = Vector::Ones(nfg_.size());
// do not update the weights that the user has decided are known inliers // do not update the weights that the user has decided are known inliers
std::vector<size_t> allWeights; std::vector<size_t> allWeights;
for (size_t k = 0; k < nfg_.size(); k++) {allWeights.push_back(k);} for (size_t k = 0; k < nfg_.size(); k++) {
allWeights.push_back(k);
}
std::vector<size_t> unknownWeights; std::vector<size_t> unknownWeights;
std::set_difference(allWeights.begin(), allWeights.end(), std::set_difference(allWeights.begin(), allWeights.end(),
params_.knownInliers.begin(), params_.knownInliers.end(), params_.knownInliers.begin(), params_.knownInliers.end(),
std::inserter(unknownWeights, unknownWeights.begin())); std::inserter(unknownWeights, unknownWeights.begin()));
// update weights of known inlier/outlier measurements // update weights of known inlier/outlier measurements
switch(params_.lossType) { switch (params_.lossType) {
case GncParameters::GM: // use eq (12) in GNC paper case GncParameters::GM: // use eq (12) in GNC paper
for (size_t k : unknownWeights) { for (size_t k : unknownWeights) {
if(nfg_[k]){ if (nfg_[k]) {
double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual
weights[k] = std::pow( ( mu*params_.barcSq )/( u2_k + mu*params_.barcSq ) , 2); weights[k] = std::pow(
(mu * params_.barcSq) / (u2_k + mu * params_.barcSq), 2);
} }
} }
return weights; return weights;
@ -284,7 +313,6 @@ public:
/* ************************************************************************* */ /* ************************************************************************* */
TEST(GncOptimizer, gncParamsConstructor) { TEST(GncOptimizer, gncParamsConstructor) {
//check params are correctly parsed //check params are correctly parsed
LevenbergMarquardtParams lmParams; LevenbergMarquardtParams lmParams;
GncParams<LevenbergMarquardtParams> gncParams1(lmParams); GncParams<LevenbergMarquardtParams> gncParams1(lmParams);
@ -296,7 +324,7 @@ TEST(GncOptimizer, gncParamsConstructor) {
// and check params become different if we change lmParams // and check params become different if we change lmParams
lmParams.setVerbosity("DELTA"); lmParams.setVerbosity("DELTA");
CHECK(! lmParams.equals(gncParams1.baseOptimizerParams)); CHECK(!lmParams.equals(gncParams1.baseOptimizerParams));
// and same for GN // and same for GN
GaussNewtonParams gnParams; GaussNewtonParams gnParams;
@ -310,7 +338,7 @@ TEST(GncOptimizer, gncParamsConstructor) {
// change something at the gncParams level // change something at the gncParams level
GncParams<GaussNewtonParams> gncParams2c(gncParams2b); GncParams<GaussNewtonParams> gncParams2c(gncParams2b);
gncParams2c.setLossType(GncParams<GaussNewtonParams>::RobustLossType::TLS); gncParams2c.setLossType(GncParams<GaussNewtonParams>::RobustLossType::TLS);
CHECK(! gncParams2c.equals(gncParams2b.baseOptimizerParams)); CHECK(!gncParams2c.equals(gncParams2b.baseOptimizerParams));
} }
/* ************************************************************************* */ /* ************************************************************************* */
@ -324,7 +352,8 @@ TEST(GncOptimizer, gncConstructor) {
LevenbergMarquardtParams lmParams; LevenbergMarquardtParams lmParams;
GncParams<LevenbergMarquardtParams> gncParams(lmParams); GncParams<LevenbergMarquardtParams> gncParams(lmParams);
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial, gncParams); auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
gncParams);
CHECK(gnc.getFactors().equals(fg)); CHECK(gnc.getFactors().equals(fg));
CHECK(gnc.getState().equals(initial)); CHECK(gnc.getState().equals(initial));
@ -333,7 +362,6 @@ TEST(GncOptimizer, gncConstructor) {
/* ************************************************************************* */ /* ************************************************************************* */
TEST(GncOptimizer, gncConstructorWithRobustGraphAsInput) { TEST(GncOptimizer, gncConstructorWithRobustGraphAsInput) {
// simple graph with Gaussian noise model
auto fg = example::sharedNonRobustFactorGraphWithOutliers(); auto fg = example::sharedNonRobustFactorGraphWithOutliers();
// same graph with robust noise model // same graph with robust noise model
auto fg_robust = example::sharedRobustFactorGraphWithOutliers(); auto fg_robust = example::sharedRobustFactorGraphWithOutliers();
@ -344,15 +372,15 @@ TEST(GncOptimizer, gncConstructorWithRobustGraphAsInput) {
LevenbergMarquardtParams lmParams; LevenbergMarquardtParams lmParams;
GncParams<LevenbergMarquardtParams> gncParams(lmParams); GncParams<LevenbergMarquardtParams> gncParams(lmParams);
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg_robust, initial, gncParams); auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg_robust,
initial, gncParams);
// make sure that when parsing the graph is transformed into one without robust loss // make sure that when parsing the graph is transformed into one without robust loss
CHECK( fg.equals(gnc.getFactors()) ); CHECK(fg.equals(gnc.getFactors()));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST(GncOptimizer, initializeMu) { TEST(GncOptimizer, initializeMu) {
// has to have Gaussian noise models !
auto fg = example::createReallyNonlinearFactorGraph(); auto fg = example::createReallyNonlinearFactorGraph();
Point2 p0(3, 3); Point2 p0(3, 3);
@ -361,8 +389,10 @@ TEST(GncOptimizer, initializeMu) {
LevenbergMarquardtParams lmParams; LevenbergMarquardtParams lmParams;
GncParams<LevenbergMarquardtParams> gncParams(lmParams); GncParams<LevenbergMarquardtParams> gncParams(lmParams);
gncParams.setLossType(GncParams<LevenbergMarquardtParams>::RobustLossType::GM); gncParams.setLossType(
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial, gncParams); GncParams<LevenbergMarquardtParams>::RobustLossType::GM);
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
gncParams);
EXPECT_DOUBLES_EQUAL(gnc.initializeMu(), 2 * 198.999, 1e-3); // according to rmk 5 in the gnc paper: m0 = 2 rmax^2 / barcSq (barcSq=1 in this example) EXPECT_DOUBLES_EQUAL(gnc.initializeMu(), 2 * 198.999, 1e-3); // according to rmk 5 in the gnc paper: m0 = 2 rmax^2 / barcSq (barcSq=1 in this example)
} }
@ -377,8 +407,10 @@ TEST(GncOptimizer, updateMu) {
LevenbergMarquardtParams lmParams; LevenbergMarquardtParams lmParams;
GncParams<LevenbergMarquardtParams> gncParams(lmParams); GncParams<LevenbergMarquardtParams> gncParams(lmParams);
gncParams.setLossType(GncParams<LevenbergMarquardtParams>::RobustLossType::GM); gncParams.setLossType(
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial, gncParams); GncParams<LevenbergMarquardtParams>::RobustLossType::GM);
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
gncParams);
double mu = 5.0; double mu = 5.0;
EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), mu / 1.4, tol); EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), mu / 1.4, tol);
@ -399,8 +431,10 @@ TEST(GncOptimizer, checkMuConvergence) {
LevenbergMarquardtParams lmParams; LevenbergMarquardtParams lmParams;
GncParams<LevenbergMarquardtParams> gncParams(lmParams); GncParams<LevenbergMarquardtParams> gncParams(lmParams);
gncParams.setLossType(GncParams<LevenbergMarquardtParams>::RobustLossType::GM); gncParams.setLossType(
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial, gncParams); GncParams<LevenbergMarquardtParams>::RobustLossType::GM);
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
gncParams);
double mu = 1.0; double mu = 1.0;
CHECK(gnc.checkMuConvergence(mu)); CHECK(gnc.checkMuConvergence(mu));
@ -408,67 +442,69 @@ TEST(GncOptimizer, checkMuConvergence) {
/* ************************************************************************* */ /* ************************************************************************* */
TEST(GncOptimizer, calculateWeights) { TEST(GncOptimizer, calculateWeights) {
// has to have Gaussian noise models ! auto fg = example::sharedNonRobustFactorGraphWithOutliers();
auto fg = example::sharedNonRobustFactorGraphWithOutliers();
Point2 p0(0, 0); Point2 p0(0, 0);
Values initial; Values initial;
initial.insert(X(1), p0); initial.insert(X(1), p0);
// we have 4 factors, 3 with zero errors (inliers), 1 with error 50 = 0.5 * 1/sigma^2 || [1;0] - [0;0] ||^2 (outlier) // we have 4 factors, 3 with zero errors (inliers), 1 with error 50 = 0.5 * 1/sigma^2 || [1;0] - [0;0] ||^2 (outlier)
Vector weights_expected = Vector::Zero(4); Vector weights_expected = Vector::Zero(4);
weights_expected[0] = 1.0; // zero error weights_expected[0] = 1.0; // zero error
weights_expected[1] = 1.0; // zero error weights_expected[1] = 1.0; // zero error
weights_expected[2] = 1.0; // zero error weights_expected[2] = 1.0; // zero error
weights_expected[3] = std::pow(1.0 / (50.0 + 1.0),2); // outlier, error = 50 weights_expected[3] = std::pow(1.0 / (50.0 + 1.0), 2); // outlier, error = 50
GaussNewtonParams gnParams; GaussNewtonParams gnParams;
GncParams<GaussNewtonParams> gncParams(gnParams); GncParams<GaussNewtonParams> gncParams(gnParams);
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams); auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams);
double mu = 1.0; double mu = 1.0;
Vector weights_actual = gnc.calculateWeights(initial,mu); Vector weights_actual = gnc.calculateWeights(initial, mu);
CHECK(assert_equal(weights_expected, weights_actual, tol)); CHECK(assert_equal(weights_expected, weights_actual, tol));
mu = 2.0; mu = 2.0;
double barcSq = 5.0; double barcSq = 5.0;
weights_expected[3] = std::pow(mu*barcSq / (50.0 + mu*barcSq),2); // outlier, error = 50 weights_expected[3] = std::pow(mu * barcSq / (50.0 + mu * barcSq), 2); // outlier, error = 50
gncParams.setInlierThreshold(barcSq); gncParams.setInlierThreshold(barcSq);
auto gnc2 = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams); auto gnc2 = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial,
weights_actual = gnc2.calculateWeights(initial,mu); gncParams);
CHECK(assert_equal(weights_expected, weights_actual, tol)); weights_actual = gnc2.calculateWeights(initial, mu);
CHECK(assert_equal(weights_expected, weights_actual, tol));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST(GncOptimizer, makeWeightedGraph) { TEST(GncOptimizer, makeWeightedGraph) {
// create original factor // create original factor
double sigma1 = 0.1; double sigma1 = 0.1;
NonlinearFactorGraph nfg = example::nonlinearFactorGraphWithGivenSigma(sigma1); NonlinearFactorGraph nfg = example::nonlinearFactorGraphWithGivenSigma(
sigma1);
// create expected // create expected
double sigma2 = 10; double sigma2 = 10;
NonlinearFactorGraph expected = example::nonlinearFactorGraphWithGivenSigma(sigma2); NonlinearFactorGraph expected = example::nonlinearFactorGraphWithGivenSigma(
sigma2);
// create weights // create weights
Vector weights = Vector::Ones(1); // original info:1/0.1^2 = 100. New info: 1/10^2 = 0.01. Ratio is 10-4 Vector weights = Vector::Ones(1); // original info:1/0.1^2 = 100. New info: 1/10^2 = 0.01. Ratio is 10-4
weights[0] = 1e-4; weights[0] = 1e-4;
// create actual // create actual
Point2 p0(3, 3); Point2 p0(3, 3);
Values initial; Values initial;
initial.insert(X(1), p0); initial.insert(X(1), p0);
LevenbergMarquardtParams lmParams; LevenbergMarquardtParams lmParams;
GncParams<LevenbergMarquardtParams> gncParams(lmParams); GncParams<LevenbergMarquardtParams> gncParams(lmParams);
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(nfg, initial, gncParams); auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(nfg, initial,
NonlinearFactorGraph actual = gnc.makeWeightedGraph(weights); gncParams);
NonlinearFactorGraph actual = gnc.makeWeightedGraph(weights);
// check it's all good // check it's all good
CHECK(assert_equal(expected, actual)); CHECK(assert_equal(expected, actual));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST(GncOptimizer, optimizeSimple) { TEST(GncOptimizer, optimizeSimple) {
// has to have Gaussian noise models !
auto fg = example::createReallyNonlinearFactorGraph(); auto fg = example::createReallyNonlinearFactorGraph();
Point2 p0(3, 3); Point2 p0(3, 3);
@ -477,7 +513,8 @@ TEST(GncOptimizer, optimizeSimple) {
LevenbergMarquardtParams lmParams; LevenbergMarquardtParams lmParams;
GncParams<LevenbergMarquardtParams> gncParams(lmParams); GncParams<LevenbergMarquardtParams> gncParams(lmParams);
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial, gncParams); auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
gncParams);
Values actual = gnc.optimize(); Values actual = gnc.optimize();
DOUBLES_EQUAL(0, fg.error(actual), tol); DOUBLES_EQUAL(0, fg.error(actual), tol);
@ -485,7 +522,6 @@ TEST(GncOptimizer, optimizeSimple) {
/* ************************************************************************* */ /* ************************************************************************* */
TEST(GncOptimizer, optimize) { TEST(GncOptimizer, optimize) {
// has to have Gaussian noise models !
auto fg = example::sharedNonRobustFactorGraphWithOutliers(); auto fg = example::sharedNonRobustFactorGraphWithOutliers();
Point2 p0(1, 0); Point2 p0(1, 0);
@ -497,26 +533,25 @@ TEST(GncOptimizer, optimize) {
GaussNewtonOptimizer gn(fg, initial, gnParams); GaussNewtonOptimizer gn(fg, initial, gnParams);
Values gn_results = gn.optimize(); Values gn_results = gn.optimize();
// converges to incorrect point due to lack of robustness to an outlier, ideal solution is Point2(0,0) // converges to incorrect point due to lack of robustness to an outlier, ideal solution is Point2(0,0)
CHECK(assert_equal(Point2(0.25,0.0), gn_results.at<Point2>(X(1)), 1e-3)); CHECK(assert_equal(Point2(0.25, 0.0), gn_results.at<Point2>(X(1)), 1e-3));
// try with robust loss function and standard GN // try with robust loss function and standard GN
auto fg_robust = example::sharedRobustFactorGraphWithOutliers(); // same as fg, but with factors wrapped in Geman McClure losses auto fg_robust = example::sharedRobustFactorGraphWithOutliers(); // same as fg, but with factors wrapped in Geman McClure losses
GaussNewtonOptimizer gn2(fg_robust, initial, gnParams); GaussNewtonOptimizer gn2(fg_robust, initial, gnParams);
Values gn2_results = gn2.optimize(); Values gn2_results = gn2.optimize();
// converges to incorrect point, this time due to the nonconvexity of the loss // converges to incorrect point, this time due to the nonconvexity of the loss
CHECK(assert_equal(Point2(0.999706,0.0), gn2_results.at<Point2>(X(1)), 1e-3)); CHECK(assert_equal(Point2(0.999706, 0.0), gn2_results.at<Point2>(X(1)), 1e-3));
// .. but graduated nonconvexity ensures both robustness and convergence in the face of nonconvexity // .. but graduated nonconvexity ensures both robustness and convergence in the face of nonconvexity
GncParams<GaussNewtonParams> gncParams(gnParams); GncParams<GaussNewtonParams> gncParams(gnParams);
// gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::VerbosityGNC::SUMMARY); // gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::VerbosityGNC::SUMMARY);
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams); auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams);
Values gnc_result = gnc.optimize(); Values gnc_result = gnc.optimize();
CHECK(assert_equal(Point2(0.0,0.0), gnc_result.at<Point2>(X(1)), 1e-3)); CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST(GncOptimizer, optimizeWithKnownInliers) { TEST(GncOptimizer, optimizeWithKnownInliers) {
// has to have Gaussian noise models !
auto fg = example::sharedNonRobustFactorGraphWithOutliers(); auto fg = example::sharedNonRobustFactorGraphWithOutliers();
Point2 p0(1, 0); Point2 p0(1, 0);
@ -535,7 +570,7 @@ TEST(GncOptimizer, optimizeWithKnownInliers) {
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams); auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams);
Values gnc_result = gnc.optimize(); Values gnc_result = gnc.optimize();
CHECK(assert_equal(Point2(0.0,0.0), gnc_result.at<Point2>(X(1)), 1e-3)); CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
// check weights were actually fixed: // check weights were actually fixed:
Vector finalWeights = gnc.getWeights(); Vector finalWeights = gnc.getWeights();