From 7699f04820cd94558a062666f7fdbb5b1502d3ba Mon Sep 17 00:00:00 2001 From: lcarlone Date: Fri, 27 Nov 2020 22:54:51 -0500 Subject: [PATCH] correct formatting --- tests/testGncOptimizer.cpp | 275 +++++++++++++++++++++---------------- 1 file changed, 155 insertions(+), 120 deletions(-) diff --git a/tests/testGncOptimizer.cpp b/tests/testGncOptimizer.cpp index 2e7692bec..65abc2042 100644 --- a/tests/testGncOptimizer.cpp +++ b/tests/testGncOptimizer.cpp @@ -35,7 +35,7 @@ using symbol_shorthand::L; static double tol = 1e-7; /* ************************************************************************* */ -template +template class GncParams { public: /** Verbosity levels */ @@ -50,11 +50,14 @@ public: using BaseOptimizer = GaussNewtonOptimizer; // BaseOptimizerParameters::OptimizerType; - GncParams(const BaseOptimizerParameters& baseOptimizerParams): - baseOptimizerParams(baseOptimizerParams) {} + GncParams(const BaseOptimizerParameters& baseOptimizerParams) : + baseOptimizerParams(baseOptimizerParams) { + } // default constructor - GncParams(): baseOptimizerParams() {} + GncParams() : + baseOptimizerParams() { + } BaseOptimizerParameters baseOptimizerParams; /// any other specific GNC parameters: @@ -62,29 +65,36 @@ public: size_t maxIterations = 100; /* maximum number of iterations*/ double barcSq = 1.0; /* a factor is considered an inlier if factor.error() < barcSq. Note that factor.error() whitens by the covariance*/ double muStep = 1.4; /* multiplicative factor to reduce/increase the mu in gnc */ - VerbosityGNC verbosityGNC = SILENT; /* verbosity level */ + VerbosityGNC verbosityGNC = SILENT; /* verbosity level */ std::vector knownInliers = std::vector(); /* slots in the factor graph corresponding to measurements that we know are inliers */ - void setLossType(const RobustLossType type){ lossType = type; } - void setMaxIterations(const size_t maxIter){ + void setLossType(const RobustLossType type) { + lossType = type; + } + void setMaxIterations(const size_t maxIter) { std::cout - << "setMaxIterations: changing the max nr of iters might lead to less accurate solutions and is not recommended! " - << std::endl; + << "setMaxIterations: changing the max nr of iters might lead to less accurate solutions and is not recommended! " + << std::endl; maxIterations = maxIter; } - void setInlierThreshold(const double inth){ barcSq = inth; } - void setMuStep(const double step){ muStep = step; } - void setVerbosityGNC(const VerbosityGNC verbosity) { verbosityGNC = verbosity; } + void setInlierThreshold(const double inth) { + barcSq = inth; + } + void setMuStep(const double step) { + muStep = step; + } + void setVerbosityGNC(const VerbosityGNC verbosity) { + verbosityGNC = verbosity; + } void setKnownInliers(const std::vector knownIn) { - for(size_t i=0; i(graph[i]); - noiseModel::Robust::shared_ptr robust = boost::dynamic_pointer_cast(factor->noiseModel()); - if(robust){ // if the factor has a robust loss, we have to change it: + if (graph[i]) { + NoiseModelFactor::shared_ptr factor = boost::dynamic_pointer_cast< + NoiseModelFactor>(graph[i]); + noiseModel::Robust::shared_ptr robust = boost::dynamic_pointer_cast< + noiseModel::Robust>(factor->noiseModel()); + if (robust) { // if the factor has a robust loss, we have to change it: SharedNoiseModel gaussianNoise = robust->noise(); - NoiseModelFactor::shared_ptr gaussianFactor = factor->cloneWithNewNoiseModel(gaussianNoise); + NoiseModelFactor::shared_ptr gaussianFactor = + factor->cloneWithNewNoiseModel(gaussianNoise); nfg_[i] = gaussianFactor; - } else{ // else we directly push it back + } else { // else we directly push it back nfg_[i] = factor; } } @@ -145,22 +159,30 @@ public: } /// getter functions - NonlinearFactorGraph getFactors() const { return NonlinearFactorGraph(nfg_); } - Values getState() const { return Values(state_); } - GncParameters getParams() const { return GncParameters(params_); } - Vector getWeights() const {return weights_;} + NonlinearFactorGraph getFactors() const { + return NonlinearFactorGraph(nfg_); + } + Values getState() const { + return Values(state_); + } + GncParameters getParams() const { + return GncParameters(params_); + } + Vector getWeights() const { + return weights_; + } /// implement GNC main loop, including graduating nonconvexity with mu Values optimize() { // start by assuming all measurements are inliers weights_ = Vector::Ones(nfg_.size()); - GaussNewtonOptimizer baseOptimizer(nfg_,state_); + GaussNewtonOptimizer baseOptimizer(nfg_, state_); Values result = baseOptimizer.optimize(); double mu = initializeMu(); - for(size_t iter=0; iter < params_.maxIterations; iter++){ + for (size_t iter = 0; iter < params_.maxIterations; iter++) { // display info - if (params_.verbosityGNC >= GncParameters::VerbosityGNC::VALUES){ + if (params_.verbosityGNC >= GncParameters::VerbosityGNC::VALUES) { result.print("result\n"); std::cout << "mu: " << mu << std::endl; std::cout << "weights: " << weights_ << std::endl; @@ -174,9 +196,9 @@ public: result = baseOptimizer_iter.optimize(); // stopping condition - if( checkMuConvergence(mu) ) { + if (checkMuConvergence(mu)) { // display info - if (params_.verbosityGNC >= GncParameters::VerbosityGNC::SUMMARY){ + if (params_.verbosityGNC >= GncParameters::VerbosityGNC::SUMMARY) { std::cout << "final iterations: " << iter << std::endl; std::cout << "final mu: " << mu << std::endl; std::cout << "final weights: " << weights_ << std::endl; @@ -195,14 +217,14 @@ public: // compute largest error across all factors double rmax_sq = 0.0; for (size_t i = 0; i < nfg_.size(); i++) { - if(nfg_[i]){ + if (nfg_[i]) { rmax_sq = std::max(rmax_sq, nfg_[i]->error(state_)); } } // set initial mu - switch(params_.lossType) { + switch (params_.lossType) { case GncParameters::GM: - return 2*rmax_sq / params_.barcSq; // initial mu + return 2 * rmax_sq / params_.barcSq; // initial mu default: throw std::runtime_error( "GncOptimizer::initializeMu: called with unknown loss type."); @@ -211,9 +233,9 @@ public: /// update the gnc parameter mu to gradually increase nonconvexity double updateMu(const double mu) const { - switch(params_.lossType) { + switch (params_.lossType) { case GncParameters::GM: - return std::max(1.0 , mu / params_.muStep); // reduce mu, but saturate at 1 + return std::max(1.0, mu / params_.muStep); // reduce mu, but saturate at 1 default: throw std::runtime_error( "GncOptimizer::updateMu: called with unknown loss type."); @@ -222,7 +244,7 @@ public: /// check if we have reached the value of mu for which the surrogate loss matches the original loss bool checkMuConvergence(const double mu) const { - switch(params_.lossType) { + switch (params_.lossType) { case GncParameters::GM: return std::fabs(mu - 1.0) < 1e-9; // mu=1 recovers the original GM function default: @@ -237,16 +259,20 @@ public: NonlinearFactorGraph newGraph; newGraph.resize(nfg_.size()); for (size_t i = 0; i < nfg_.size(); i++) { - if(nfg_[i]){ - NoiseModelFactor::shared_ptr factor = boost::dynamic_pointer_cast(nfg_[i]); - noiseModel::Gaussian::shared_ptr noiseModel = boost::dynamic_pointer_cast(factor->noiseModel()); - if(noiseModel){ + if (nfg_[i]) { + NoiseModelFactor::shared_ptr factor = boost::dynamic_pointer_cast< + NoiseModelFactor>(nfg_[i]); + noiseModel::Gaussian::shared_ptr noiseModel = + boost::dynamic_pointer_cast( + factor->noiseModel()); + if (noiseModel) { Matrix newInfo = weights[i] * noiseModel->information(); - SharedNoiseModel newNoiseModel = noiseModel::Gaussian::Information(newInfo); + SharedNoiseModel newNoiseModel = noiseModel::Gaussian::Information( + newInfo); newGraph[i] = factor->cloneWithNewNoiseModel(newNoiseModel); - }else{ + } else { throw std::runtime_error( - "GncOptimizer::makeWeightedGraph: unexpected non-Gaussian noise model."); + "GncOptimizer::makeWeightedGraph: unexpected non-Gaussian noise model."); } } } @@ -254,24 +280,27 @@ public: } /// calculate gnc weights - Vector calculateWeights(const Values currentEstimate, const double mu){ + Vector calculateWeights(const Values currentEstimate, const double mu) { Vector weights = Vector::Ones(nfg_.size()); // do not update the weights that the user has decided are known inliers std::vector allWeights; - for (size_t k = 0; k < nfg_.size(); k++) {allWeights.push_back(k);} + for (size_t k = 0; k < nfg_.size(); k++) { + allWeights.push_back(k); + } std::vector unknownWeights; std::set_difference(allWeights.begin(), allWeights.end(), params_.knownInliers.begin(), params_.knownInliers.end(), std::inserter(unknownWeights, unknownWeights.begin())); // update weights of known inlier/outlier measurements - switch(params_.lossType) { + switch (params_.lossType) { case GncParameters::GM: // use eq (12) in GNC paper for (size_t k : unknownWeights) { - if(nfg_[k]){ + if (nfg_[k]) { double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual - weights[k] = std::pow( ( mu*params_.barcSq )/( u2_k + mu*params_.barcSq ) , 2); + weights[k] = std::pow( + (mu * params_.barcSq) / (u2_k + mu * params_.barcSq), 2); } } return weights; @@ -284,7 +313,6 @@ public: /* ************************************************************************* */ TEST(GncOptimizer, gncParamsConstructor) { - //check params are correctly parsed LevenbergMarquardtParams lmParams; GncParams gncParams1(lmParams); @@ -296,7 +324,7 @@ TEST(GncOptimizer, gncParamsConstructor) { // and check params become different if we change lmParams lmParams.setVerbosity("DELTA"); - CHECK(! lmParams.equals(gncParams1.baseOptimizerParams)); + CHECK(!lmParams.equals(gncParams1.baseOptimizerParams)); // and same for GN GaussNewtonParams gnParams; @@ -310,7 +338,7 @@ TEST(GncOptimizer, gncParamsConstructor) { // change something at the gncParams level GncParams gncParams2c(gncParams2b); gncParams2c.setLossType(GncParams::RobustLossType::TLS); - CHECK(! gncParams2c.equals(gncParams2b.baseOptimizerParams)); + CHECK(!gncParams2c.equals(gncParams2b.baseOptimizerParams)); } /* ************************************************************************* */ @@ -324,7 +352,8 @@ TEST(GncOptimizer, gncConstructor) { LevenbergMarquardtParams lmParams; GncParams gncParams(lmParams); - auto gnc = GncOptimizer>(fg, initial, gncParams); + auto gnc = GncOptimizer>(fg, initial, + gncParams); CHECK(gnc.getFactors().equals(fg)); CHECK(gnc.getState().equals(initial)); @@ -333,7 +362,6 @@ TEST(GncOptimizer, gncConstructor) { /* ************************************************************************* */ TEST(GncOptimizer, gncConstructorWithRobustGraphAsInput) { - // simple graph with Gaussian noise model auto fg = example::sharedNonRobustFactorGraphWithOutliers(); // same graph with robust noise model auto fg_robust = example::sharedRobustFactorGraphWithOutliers(); @@ -344,15 +372,15 @@ TEST(GncOptimizer, gncConstructorWithRobustGraphAsInput) { LevenbergMarquardtParams lmParams; GncParams gncParams(lmParams); - auto gnc = GncOptimizer>(fg_robust, initial, gncParams); + auto gnc = GncOptimizer>(fg_robust, + initial, gncParams); // make sure that when parsing the graph is transformed into one without robust loss - CHECK( fg.equals(gnc.getFactors()) ); + CHECK(fg.equals(gnc.getFactors())); } /* ************************************************************************* */ TEST(GncOptimizer, initializeMu) { - // has to have Gaussian noise models ! auto fg = example::createReallyNonlinearFactorGraph(); Point2 p0(3, 3); @@ -361,8 +389,10 @@ TEST(GncOptimizer, initializeMu) { LevenbergMarquardtParams lmParams; GncParams gncParams(lmParams); - gncParams.setLossType(GncParams::RobustLossType::GM); - auto gnc = GncOptimizer>(fg, initial, gncParams); + gncParams.setLossType( + GncParams::RobustLossType::GM); + auto gnc = GncOptimizer>(fg, initial, + gncParams); EXPECT_DOUBLES_EQUAL(gnc.initializeMu(), 2 * 198.999, 1e-3); // according to rmk 5 in the gnc paper: m0 = 2 rmax^2 / barcSq (barcSq=1 in this example) } @@ -377,8 +407,10 @@ TEST(GncOptimizer, updateMu) { LevenbergMarquardtParams lmParams; GncParams gncParams(lmParams); - gncParams.setLossType(GncParams::RobustLossType::GM); - auto gnc = GncOptimizer>(fg, initial, gncParams); + gncParams.setLossType( + GncParams::RobustLossType::GM); + auto gnc = GncOptimizer>(fg, initial, + gncParams); double mu = 5.0; EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), mu / 1.4, tol); @@ -399,8 +431,10 @@ TEST(GncOptimizer, checkMuConvergence) { LevenbergMarquardtParams lmParams; GncParams gncParams(lmParams); - gncParams.setLossType(GncParams::RobustLossType::GM); - auto gnc = GncOptimizer>(fg, initial, gncParams); + gncParams.setLossType( + GncParams::RobustLossType::GM); + auto gnc = GncOptimizer>(fg, initial, + gncParams); double mu = 1.0; CHECK(gnc.checkMuConvergence(mu)); @@ -408,67 +442,69 @@ TEST(GncOptimizer, checkMuConvergence) { /* ************************************************************************* */ TEST(GncOptimizer, calculateWeights) { - // has to have Gaussian noise models ! - auto fg = example::sharedNonRobustFactorGraphWithOutliers(); + auto fg = example::sharedNonRobustFactorGraphWithOutliers(); - Point2 p0(0, 0); - Values initial; - initial.insert(X(1), p0); + Point2 p0(0, 0); + Values initial; + initial.insert(X(1), p0); - // we have 4 factors, 3 with zero errors (inliers), 1 with error 50 = 0.5 * 1/sigma^2 || [1;0] - [0;0] ||^2 (outlier) - Vector weights_expected = Vector::Zero(4); - weights_expected[0] = 1.0; // zero error - weights_expected[1] = 1.0; // zero error - weights_expected[2] = 1.0; // zero error - weights_expected[3] = std::pow(1.0 / (50.0 + 1.0),2); // outlier, error = 50 + // we have 4 factors, 3 with zero errors (inliers), 1 with error 50 = 0.5 * 1/sigma^2 || [1;0] - [0;0] ||^2 (outlier) + Vector weights_expected = Vector::Zero(4); + weights_expected[0] = 1.0; // zero error + weights_expected[1] = 1.0; // zero error + weights_expected[2] = 1.0; // zero error + weights_expected[3] = std::pow(1.0 / (50.0 + 1.0), 2); // outlier, error = 50 - GaussNewtonParams gnParams; - GncParams gncParams(gnParams); - auto gnc = GncOptimizer>(fg, initial, gncParams); - double mu = 1.0; - Vector weights_actual = gnc.calculateWeights(initial,mu); - CHECK(assert_equal(weights_expected, weights_actual, tol)); + GaussNewtonParams gnParams; + GncParams gncParams(gnParams); + auto gnc = GncOptimizer>(fg, initial, gncParams); + double mu = 1.0; + Vector weights_actual = gnc.calculateWeights(initial, mu); + CHECK(assert_equal(weights_expected, weights_actual, tol)); - mu = 2.0; - double barcSq = 5.0; - weights_expected[3] = std::pow(mu*barcSq / (50.0 + mu*barcSq),2); // outlier, error = 50 - gncParams.setInlierThreshold(barcSq); - auto gnc2 = GncOptimizer>(fg, initial, gncParams); - weights_actual = gnc2.calculateWeights(initial,mu); - CHECK(assert_equal(weights_expected, weights_actual, tol)); + mu = 2.0; + double barcSq = 5.0; + weights_expected[3] = std::pow(mu * barcSq / (50.0 + mu * barcSq), 2); // outlier, error = 50 + gncParams.setInlierThreshold(barcSq); + auto gnc2 = GncOptimizer>(fg, initial, + gncParams); + weights_actual = gnc2.calculateWeights(initial, mu); + CHECK(assert_equal(weights_expected, weights_actual, tol)); } /* ************************************************************************* */ TEST(GncOptimizer, makeWeightedGraph) { // create original factor - double sigma1 = 0.1; - NonlinearFactorGraph nfg = example::nonlinearFactorGraphWithGivenSigma(sigma1); + double sigma1 = 0.1; + NonlinearFactorGraph nfg = example::nonlinearFactorGraphWithGivenSigma( + sigma1); - // create expected - double sigma2 = 10; - NonlinearFactorGraph expected = example::nonlinearFactorGraphWithGivenSigma(sigma2); + // create expected + double sigma2 = 10; + NonlinearFactorGraph expected = example::nonlinearFactorGraphWithGivenSigma( + sigma2); - // create weights - Vector weights = Vector::Ones(1); // original info:1/0.1^2 = 100. New info: 1/10^2 = 0.01. Ratio is 10-4 - weights[0] = 1e-4; + // create weights + Vector weights = Vector::Ones(1); // original info:1/0.1^2 = 100. New info: 1/10^2 = 0.01. Ratio is 10-4 + weights[0] = 1e-4; - // create actual - Point2 p0(3, 3); - Values initial; - initial.insert(X(1), p0); + // create actual + Point2 p0(3, 3); + Values initial; + initial.insert(X(1), p0); - LevenbergMarquardtParams lmParams; - GncParams gncParams(lmParams); - auto gnc = GncOptimizer>(nfg, initial, gncParams); - NonlinearFactorGraph actual = gnc.makeWeightedGraph(weights); + LevenbergMarquardtParams lmParams; + GncParams gncParams(lmParams); + auto gnc = GncOptimizer>(nfg, initial, + gncParams); + NonlinearFactorGraph actual = gnc.makeWeightedGraph(weights); - // check it's all good - CHECK(assert_equal(expected, actual)); + // check it's all good + CHECK(assert_equal(expected, actual)); } /* ************************************************************************* */ TEST(GncOptimizer, optimizeSimple) { - // has to have Gaussian noise models ! auto fg = example::createReallyNonlinearFactorGraph(); Point2 p0(3, 3); @@ -477,7 +513,8 @@ TEST(GncOptimizer, optimizeSimple) { LevenbergMarquardtParams lmParams; GncParams gncParams(lmParams); - auto gnc = GncOptimizer>(fg, initial, gncParams); + auto gnc = GncOptimizer>(fg, initial, + gncParams); Values actual = gnc.optimize(); DOUBLES_EQUAL(0, fg.error(actual), tol); @@ -485,7 +522,6 @@ TEST(GncOptimizer, optimizeSimple) { /* ************************************************************************* */ TEST(GncOptimizer, optimize) { - // has to have Gaussian noise models ! auto fg = example::sharedNonRobustFactorGraphWithOutliers(); Point2 p0(1, 0); @@ -497,26 +533,25 @@ TEST(GncOptimizer, optimize) { GaussNewtonOptimizer gn(fg, initial, gnParams); Values gn_results = gn.optimize(); // converges to incorrect point due to lack of robustness to an outlier, ideal solution is Point2(0,0) - CHECK(assert_equal(Point2(0.25,0.0), gn_results.at(X(1)), 1e-3)); + CHECK(assert_equal(Point2(0.25, 0.0), gn_results.at(X(1)), 1e-3)); // try with robust loss function and standard GN auto fg_robust = example::sharedRobustFactorGraphWithOutliers(); // same as fg, but with factors wrapped in Geman McClure losses GaussNewtonOptimizer gn2(fg_robust, initial, gnParams); Values gn2_results = gn2.optimize(); // converges to incorrect point, this time due to the nonconvexity of the loss - CHECK(assert_equal(Point2(0.999706,0.0), gn2_results.at(X(1)), 1e-3)); + CHECK(assert_equal(Point2(0.999706, 0.0), gn2_results.at(X(1)), 1e-3)); // .. but graduated nonconvexity ensures both robustness and convergence in the face of nonconvexity GncParams gncParams(gnParams); // gncParams.setVerbosityGNC(GncParams::VerbosityGNC::SUMMARY); auto gnc = GncOptimizer>(fg, initial, gncParams); Values gnc_result = gnc.optimize(); - CHECK(assert_equal(Point2(0.0,0.0), gnc_result.at(X(1)), 1e-3)); + CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at(X(1)), 1e-3)); } /* ************************************************************************* */ TEST(GncOptimizer, optimizeWithKnownInliers) { - // has to have Gaussian noise models ! auto fg = example::sharedNonRobustFactorGraphWithOutliers(); Point2 p0(1, 0); @@ -535,7 +570,7 @@ TEST(GncOptimizer, optimizeWithKnownInliers) { auto gnc = GncOptimizer>(fg, initial, gncParams); Values gnc_result = gnc.optimize(); - CHECK(assert_equal(Point2(0.0,0.0), gnc_result.at(X(1)), 1e-3)); + CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at(X(1)), 1e-3)); // check weights were actually fixed: Vector finalWeights = gnc.getWeights();