From d0a81f8441ba91eab1bbfdf7af1fa0d69db99c54 Mon Sep 17 00:00:00 2001 From: jingnanshi Date: Mon, 7 Dec 2020 16:04:36 -0500 Subject: [PATCH] mu initialization test & minor formatting fixes --- tests/testGncOptimizer.cpp | 135 +++++++++++++++++++++++-------------- 1 file changed, 84 insertions(+), 51 deletions(-) diff --git a/tests/testGncOptimizer.cpp b/tests/testGncOptimizer.cpp index 3f784b96e..ca40231c9 100644 --- a/tests/testGncOptimizer.cpp +++ b/tests/testGncOptimizer.cpp @@ -16,29 +16,32 @@ * @author Luca Carlone * @author Frank Dellaert * - * Implementation of the paper: Yang, Antonante, Tzoumas, Carlone, "Graduated Non-Convexity for Robust Spatial Perception: - * From Non-Minimal Solvers to Global Outlier Rejection", ICRA/RAL, 2020. (arxiv version: https://arxiv.org/pdf/1909.08605.pdf) + * Implementation of the paper: Yang, Antonante, Tzoumas, Carlone, "Graduated + * Non-Convexity for Robust Spatial Perception: From Non-Minimal Solvers to + * Global Outlier Rejection", ICRA/RAL, 2020. (arxiv version: + * https://arxiv.org/pdf/1909.08605.pdf) * * See also: - * Antonante, Tzoumas, Yang, Carlone, "Outlier-Robust Estimation: Hardness, Minimally-Tuned Algorithms, and Applications", - * arxiv: https://arxiv.org/pdf/2007.15109.pdf, 2020. + * Antonante, Tzoumas, Yang, Carlone, "Outlier-Robust Estimation: Hardness, + * Minimally-Tuned Algorithms, and Applications", arxiv: + * https://arxiv.org/pdf/2007.15109.pdf, 2020. */ -#include -#include -#include #include +#include +#include +#include using namespace std; using namespace gtsam; -using symbol_shorthand::X; using symbol_shorthand::L; +using symbol_shorthand::X; static double tol = 1e-7; /* ************************************************************************* */ TEST(GncOptimizer, gncParamsConstructor) { - //check params are correctly parsed + // check params are correctly parsed LevenbergMarquardtParams lmParams; GncParams gncParams1(lmParams); CHECK(lmParams.equals(gncParams1.baseOptimizerParams)); @@ -69,7 +72,8 @@ TEST(GncOptimizer, gncParamsConstructor) { /* ************************************************************************* */ TEST(GncOptimizer, gncConstructor) { // has to have Gaussian noise models ! - auto fg = example::createReallyNonlinearFactorGraph(); // just a unary factor on a 2D point + auto fg = example::createReallyNonlinearFactorGraph(); // just a unary factor + // on a 2D point Point2 p0(3, 3); Values initial; @@ -77,8 +81,8 @@ TEST(GncOptimizer, gncConstructor) { LevenbergMarquardtParams lmParams; GncParams gncParams(lmParams); - auto gnc = GncOptimizer>(fg, initial, - gncParams); + auto gnc = + GncOptimizer>(fg, initial, gncParams); CHECK(gnc.getFactors().equals(fg)); CHECK(gnc.getState().equals(initial)); @@ -97,10 +101,11 @@ TEST(GncOptimizer, gncConstructorWithRobustGraphAsInput) { LevenbergMarquardtParams lmParams; GncParams gncParams(lmParams); - auto gnc = GncOptimizer>(fg_robust, - initial, gncParams); + auto gnc = GncOptimizer>( + fg_robust, initial, gncParams); - // make sure that when parsing the graph is transformed into one without robust loss + // make sure that when parsing the graph is transformed into one without + // robust loss CHECK(fg.equals(gnc.getFactors())); } @@ -112,13 +117,25 @@ TEST(GncOptimizer, initializeMu) { Values initial; initial.insert(X(1), p0); + // testing GM mu initialization LevenbergMarquardtParams lmParams; GncParams gncParams(lmParams); gncParams.setLossType( GncParams::RobustLossType::GM); - auto gnc = GncOptimizer>(fg, initial, - gncParams); - EXPECT_DOUBLES_EQUAL(gnc.initializeMu(), 2 * 198.999, 1e-3); // according to rmk 5 in the gnc paper: m0 = 2 rmax^2 / barcSq (barcSq=1 in this example) + auto gnc_gm = + GncOptimizer>(fg, initial, gncParams); + // according to rmk 5 in the gnc paper: m0 = 2 rmax^2 / barcSq + // (barcSq=1 in this example) + EXPECT_DOUBLES_EQUAL(gnc_gm.initializeMu(), 2 * 198.999, 1e-3); + + // testing TLS mu initialization + gncParams.setLossType( + GncParams::RobustLossType::TLS); + auto gnc_tls = + GncOptimizer>(fg, initial, gncParams); + // according to rmk 5 in the gnc paper: m0 = barcSq / (2 * rmax^2 - barcSq) + // (barcSq=1 in this example) + EXPECT_DOUBLES_EQUAL(gnc_gm.initializeMu(), 1 / (2 * 198.999 - 1), 1e-3); } /* ************************************************************************* */ @@ -134,8 +151,8 @@ TEST(GncOptimizer, updateMu) { GncParams gncParams(lmParams); gncParams.setLossType( GncParams::RobustLossType::GM); - auto gnc = GncOptimizer>(fg, initial, - gncParams); + auto gnc = + GncOptimizer>(fg, initial, gncParams); double mu = 5.0; EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), mu / 1.4, tol); @@ -158,8 +175,8 @@ TEST(GncOptimizer, checkMuConvergence) { GncParams gncParams(lmParams); gncParams.setLossType( GncParams::RobustLossType::GM); - auto gnc = GncOptimizer>(fg, initial, - gncParams); + auto gnc = + GncOptimizer>(fg, initial, gncParams); double mu = 1.0; CHECK(gnc.checkMuConvergence(mu, 0)); @@ -175,11 +192,12 @@ TEST(GncOptimizer, calculateWeights) { Values initial; initial.insert(X(1), p0); - // we have 4 factors, 3 with zero errors (inliers), 1 with error 50 = 0.5 * 1/sigma^2 || [1;0] - [0;0] ||^2 (outlier) + // we have 4 factors, 3 with zero errors (inliers), 1 with error 50 = 0.5 * + // 1/sigma^2 || [1;0] - [0;0] ||^2 (outlier) Vector weights_expected = Vector::Zero(4); - weights_expected[0] = 1.0; // zero error - weights_expected[1] = 1.0; // zero error - weights_expected[2] = 1.0; // zero error + weights_expected[0] = 1.0; // zero error + weights_expected[1] = 1.0; // zero error + weights_expected[2] = 1.0; // zero error weights_expected[3] = std::pow(1.0 / (50.0 + 1.0), 2); // outlier, error = 50 GaussNewtonParams gnParams; @@ -191,10 +209,11 @@ TEST(GncOptimizer, calculateWeights) { mu = 2.0; double barcSq = 5.0; - weights_expected[3] = std::pow(mu * barcSq / (50.0 + mu * barcSq), 2); // outlier, error = 50 + weights_expected[3] = + std::pow(mu * barcSq / (50.0 + mu * barcSq), 2); // outlier, error = 50 gncParams.setInlierThreshold(barcSq); - auto gnc2 = GncOptimizer>(fg, initial, - gncParams); + auto gnc2 = + GncOptimizer>(fg, initial, gncParams); weights_actual = gnc2.calculateWeights(initial, mu); CHECK(assert_equal(weights_expected, weights_actual, tol)); } @@ -203,16 +222,17 @@ TEST(GncOptimizer, calculateWeights) { TEST(GncOptimizer, makeWeightedGraph) { // create original factor double sigma1 = 0.1; - NonlinearFactorGraph nfg = example::nonlinearFactorGraphWithGivenSigma( - sigma1); + NonlinearFactorGraph nfg = + example::nonlinearFactorGraphWithGivenSigma(sigma1); // create expected double sigma2 = 10; - NonlinearFactorGraph expected = example::nonlinearFactorGraphWithGivenSigma( - sigma2); + NonlinearFactorGraph expected = + example::nonlinearFactorGraphWithGivenSigma(sigma2); // create weights - Vector weights = Vector::Ones(1); // original info:1/0.1^2 = 100. New info: 1/10^2 = 0.01. Ratio is 10-4 + Vector weights = Vector::Ones( + 1); // original info:1/0.1^2 = 100. New info: 1/10^2 = 0.01. Ratio is 10-4 weights[0] = 1e-4; // create actual @@ -223,7 +243,7 @@ TEST(GncOptimizer, makeWeightedGraph) { LevenbergMarquardtParams lmParams; GncParams gncParams(lmParams); auto gnc = GncOptimizer>(nfg, initial, - gncParams); + gncParams); NonlinearFactorGraph actual = gnc.makeWeightedGraph(weights); // check it's all good @@ -240,8 +260,8 @@ TEST(GncOptimizer, optimizeSimple) { LevenbergMarquardtParams lmParams; GncParams gncParams(lmParams); - auto gnc = GncOptimizer>(fg, initial, - gncParams); + auto gnc = + GncOptimizer>(fg, initial, gncParams); Values actual = gnc.optimize(); DOUBLES_EQUAL(0, fg.error(actual), tol); @@ -259,17 +279,23 @@ TEST(GncOptimizer, optimize) { GaussNewtonParams gnParams; GaussNewtonOptimizer gn(fg, initial, gnParams); Values gn_results = gn.optimize(); - // converges to incorrect point due to lack of robustness to an outlier, ideal solution is Point2(0,0) + // converges to incorrect point due to lack of robustness to an outlier, ideal + // solution is Point2(0,0) CHECK(assert_equal(Point2(0.25, 0.0), gn_results.at(X(1)), 1e-3)); // try with robust loss function and standard GN - auto fg_robust = example::sharedRobustFactorGraphWithOutliers(); // same as fg, but with factors wrapped in Geman McClure losses + auto fg_robust = + example::sharedRobustFactorGraphWithOutliers(); // same as fg, but with + // factors wrapped in + // Geman McClure losses GaussNewtonOptimizer gn2(fg_robust, initial, gnParams); Values gn2_results = gn2.optimize(); // converges to incorrect point, this time due to the nonconvexity of the loss - CHECK(assert_equal(Point2(0.999706, 0.0), gn2_results.at(X(1)), 1e-3)); + CHECK( + assert_equal(Point2(0.999706, 0.0), gn2_results.at(X(1)), 1e-3)); - // .. but graduated nonconvexity ensures both robustness and convergence in the face of nonconvexity + // .. but graduated nonconvexity ensures both robustness and convergence in + // the face of nonconvexity GncParams gncParams(gnParams); // gncParams.setVerbosityGNC(GncParams::VerbosityGNC::SUMMARY); auto gnc = GncOptimizer>(fg, initial, gncParams); @@ -315,31 +341,38 @@ TEST(GncOptimizer, optimizeSmallPoseGraph) { boost::tie(graph, initial) = load2D(filename); // Add a Gaussian prior on first poses Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin - SharedDiagonal priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.01, 0.01, 0.01)); - graph -> addPrior(0, priorMean, priorNoise); + SharedDiagonal priorNoise = + noiseModel::Diagonal::Sigmas(Vector3(0.01, 0.01, 0.01)); + graph->addPrior(0, priorMean, priorNoise); /// get expected values by optimizing outlier-free graph Values expected = LevenbergMarquardtOptimizer(*graph, *initial).optimize(); // add a few outliers - SharedDiagonal betweenNoise = noiseModel::Diagonal::Sigmas(Vector3(0.1, 0.1, 0.01)); - graph->push_back( BetweenFactor(90 , 50 , Pose2(), betweenNoise) ); // some arbitrary and incorrect between factor + SharedDiagonal betweenNoise = + noiseModel::Diagonal::Sigmas(Vector3(0.1, 0.1, 0.01)); + graph->push_back(BetweenFactor( + 90, 50, Pose2(), + betweenNoise)); // some arbitrary and incorrect between factor /// get expected values by optimizing outlier-free graph - Values expectedWithOutliers = LevenbergMarquardtOptimizer(*graph, *initial).optimize(); + Values expectedWithOutliers = + LevenbergMarquardtOptimizer(*graph, *initial).optimize(); // as expected, the following test fails due to the presence of an outlier! // CHECK(assert_equal(expected, expectedWithOutliers, 1e-3)); // GNC - // Note: in difficult instances, we set the odometry measurements to be inliers, - // but this problem is simple enought to succeed even without that assumption - // std::vector knownInliers; + // Note: in difficult instances, we set the odometry measurements to be + // inliers, but this problem is simple enought to succeed even without that + // assumption std::vector knownInliers; GncParams gncParams; - auto gnc = GncOptimizer>(*graph, *initial, gncParams); + auto gnc = + GncOptimizer>(*graph, *initial, gncParams); Values actual = gnc.optimize(); // compare - CHECK(assert_equal(expected, actual, 1e-3)); // yay! we are robust to outliers! + CHECK( + assert_equal(expected, actual, 1e-3)); // yay! we are robust to outliers! } /* ************************************************************************* */