diff --git a/gtsam/nonlinear/GncOptimizer.h b/gtsam/nonlinear/GncOptimizer.h index bbc3b9f84..cfabf0ab6 100644 --- a/gtsam/nonlinear/GncOptimizer.h +++ b/gtsam/nonlinear/GncOptimizer.h @@ -176,10 +176,10 @@ class GncOptimizer { } // set initial mu switch (params_.lossType) { - case GncParameters::GM: + case GncLossType::GM: // surrogate cost is convex for large mu return 2 * rmax_sq / params_.barcSq; // initial mu - case GncParameters::TLS: + case GncLossType::TLS: /* initialize mu to the value specified in Remark 5 in GNC paper. surrogate cost is convex for mu close to zero degenerate case: 2 * rmax_sq - params_.barcSq < 0 (handled in the main loop) @@ -198,10 +198,10 @@ class GncOptimizer { /// Update the gnc parameter mu to gradually increase nonconvexity. double updateMu(const double mu) const { switch (params_.lossType) { - case GncParameters::GM: + case GncLossType::GM: // reduce mu, but saturate at 1 (original cost is recovered for mu -> 1) return std::max(1.0, mu / params_.muStep); - case GncParameters::TLS: + case GncLossType::TLS: // increases mu at each iteration (original cost is recovered for mu -> inf) return mu * params_.muStep; default: @@ -214,10 +214,10 @@ class GncOptimizer { bool checkMuConvergence(const double mu) const { bool muConverged = false; switch (params_.lossType) { - case GncParameters::GM: + case GncLossType::GM: muConverged = std::fabs(mu - 1.0) < 1e-9; // mu=1 recovers the original GM function break; - case GncParameters::TLS: + case GncLossType::TLS: muConverged = false; // for TLS there is no stopping condition on mu (it must tend to infinity) break; default: @@ -242,10 +242,10 @@ class GncOptimizer { bool checkWeightsConvergence(const Vector& weights) const { bool weightsConverged = false; switch (params_.lossType) { - case GncParameters::GM: + case GncLossType::GM: weightsConverged = false; // for GM, there is no clear binary convergence for the weights break; - case GncParameters::TLS: + case GncLossType::TLS: weightsConverged = true; for (size_t i = 0; i < weights.size(); i++) { if (std::fabs(weights[i] - std::round(weights[i])) @@ -315,7 +315,7 @@ class GncOptimizer { // update weights of known inlier/outlier measurements switch (params_.lossType) { - case GncParameters::GM: { // use eq (12) in GNC paper + case GncLossType::GM: { // use eq (12) in GNC paper for (size_t k : unknownWeights) { if (nfg_[k]) { double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual @@ -325,7 +325,7 @@ class GncOptimizer { } return weights; } - case GncParameters::TLS: { // use eq (14) in GNC paper + case GncLossType::TLS: { // use eq (14) in GNC paper double upperbound = (mu + 1) / mu * params_.barcSq; double lowerbound = mu / (mu + 1) * params_.barcSq; for (size_t k : unknownWeights) { diff --git a/gtsam/nonlinear/GncParams.h b/gtsam/nonlinear/GncParams.h index 9c4f21b81..5f130ddf2 100644 --- a/gtsam/nonlinear/GncParams.h +++ b/gtsam/nonlinear/GncParams.h @@ -32,6 +32,12 @@ namespace gtsam { /* ************************************************************************* */ +/// Choice of robust loss function for GNC. +enum GncLossType { + GM /*Geman McClure*/, + TLS /*Truncated least squares*/ +}; + template class GncParams { public: @@ -45,12 +51,6 @@ class GncParams { VALUES }; - /// Choice of robust loss function for GNC. - enum GncLossType { - GM /*Geman McClure*/, - TLS /*Truncated least squares*/ - }; - /// Constructor. GncParams(const BaseOptimizerParameters& baseOptimizerParams) : baseOptimizerParams(baseOptimizerParams) { diff --git a/tests/testGncOptimizer.cpp b/tests/testGncOptimizer.cpp index f46563b91..738c77936 100644 --- a/tests/testGncOptimizer.cpp +++ b/tests/testGncOptimizer.cpp @@ -66,7 +66,7 @@ TEST(GncOptimizer, gncParamsConstructor) { // change something at the gncParams level GncParams gncParams2c(gncParams2b); - gncParams2c.setLossType(GncParams::GncLossType::GM); + gncParams2c.setLossType(GncLossType::GM); CHECK(!gncParams2c.equals(gncParams2b.baseOptimizerParams)); } @@ -119,7 +119,7 @@ TEST(GncOptimizer, initializeMu) { // testing GM mu initialization GncParams gncParams; - gncParams.setLossType(GncParams::GncLossType::GM); + gncParams.setLossType(GncLossType::GM); auto gnc_gm = GncOptimizer>(fg, initial, gncParams); // according to rmk 5 in the gnc paper: m0 = 2 rmax^2 / barcSq @@ -127,7 +127,7 @@ TEST(GncOptimizer, initializeMu) { EXPECT_DOUBLES_EQUAL(gnc_gm.initializeMu(), 2 * 198.999, 1e-3); // testing TLS mu initialization - gncParams.setLossType(GncParams::GncLossType::TLS); + gncParams.setLossType(GncLossType::TLS); auto gnc_tls = GncOptimizer>(fg, initial, gncParams); // according to rmk 5 in the gnc paper: m0 = barcSq / (2 * rmax^2 - barcSq) @@ -145,7 +145,7 @@ TEST(GncOptimizer, updateMuGM) { initial.insert(X(1), p0); GncParams gncParams; - gncParams.setLossType(GncParams::GncLossType::GM); + gncParams.setLossType(GncLossType::GM); gncParams.setMuStep(1.4); auto gnc = GncOptimizer>(fg, initial, gncParams); @@ -169,7 +169,7 @@ TEST(GncOptimizer, updateMuTLS) { GncParams gncParams; gncParams.setMuStep(1.4); - gncParams.setLossType(GncParams::GncLossType::TLS); + gncParams.setLossType(GncLossType::TLS); auto gnc = GncOptimizer>(fg, initial, gncParams); @@ -188,7 +188,7 @@ TEST(GncOptimizer, checkMuConvergence) { { GncParams gncParams; - gncParams.setLossType(GncParams::GncLossType::GM); + gncParams.setLossType(GncLossType::GM); auto gnc = GncOptimizer>(fg, initial, gncParams); @@ -198,7 +198,7 @@ TEST(GncOptimizer, checkMuConvergence) { { GncParams gncParams; gncParams.setLossType( - GncParams::GncLossType::TLS); + GncLossType::TLS); auto gnc = GncOptimizer>(fg, initial, gncParams); @@ -251,7 +251,7 @@ TEST(GncOptimizer, checkWeightsConvergence) { { GncParams gncParams; - gncParams.setLossType(GncParams::GncLossType::GM); + gncParams.setLossType(GncLossType::GM); auto gnc = GncOptimizer>(fg, initial, gncParams); @@ -261,7 +261,7 @@ TEST(GncOptimizer, checkWeightsConvergence) { { GncParams gncParams; gncParams.setLossType( - GncParams::GncLossType::TLS); + GncLossType::TLS); auto gnc = GncOptimizer>(fg, initial, gncParams); @@ -272,7 +272,7 @@ TEST(GncOptimizer, checkWeightsConvergence) { { GncParams gncParams; gncParams.setLossType( - GncParams::GncLossType::TLS); + GncLossType::TLS); auto gnc = GncOptimizer>(fg, initial, gncParams); @@ -283,7 +283,7 @@ TEST(GncOptimizer, checkWeightsConvergence) { { GncParams gncParams; gncParams.setLossType( - GncParams::GncLossType::TLS); + GncLossType::TLS); gncParams.setWeightsTol(0.1); auto gnc = GncOptimizer>(fg, initial, gncParams); @@ -305,7 +305,7 @@ TEST(GncOptimizer, checkConvergenceTLS) { GncParams gncParams; gncParams.setRelativeCostTol(1e-5); - gncParams.setLossType(GncParams::GncLossType::TLS); + gncParams.setLossType(GncLossType::TLS); auto gnc = GncOptimizer>(fg, initial, gncParams); @@ -331,7 +331,7 @@ TEST(GncOptimizer, calculateWeightsGM) { GaussNewtonParams gnParams; GncParams gncParams(gnParams); - gncParams.setLossType(GncParams::GncLossType::GM); + gncParams.setLossType(GncLossType::GM); auto gnc = GncOptimizer>(fg, initial, gncParams); double mu = 1.0; Vector weights_actual = gnc.calculateWeights(initial, mu); @@ -364,7 +364,7 @@ TEST(GncOptimizer, calculateWeightsTLS) { GaussNewtonParams gnParams; GncParams gncParams(gnParams); - gncParams.setLossType(GncParams::GncLossType::TLS); + gncParams.setLossType(GncLossType::TLS); auto gnc = GncOptimizer>(fg, initial, gncParams); double mu = 1.0; Vector weights_actual = gnc.calculateWeights(initial, mu); @@ -397,7 +397,7 @@ TEST(GncOptimizer, calculateWeightsTLS2) { // actual: GaussNewtonParams gnParams; GncParams gncParams(gnParams); - gncParams.setLossType(GncParams::GncLossType::TLS); + gncParams.setLossType(GncLossType::TLS); gncParams.setInlierCostThreshold(0.51); // if inlier threshold is slightly larger than 0.5, then measurement is inlier auto gnc = GncOptimizer>(nfg, initial, gncParams); @@ -413,7 +413,7 @@ TEST(GncOptimizer, calculateWeightsTLS2) { // actual: GaussNewtonParams gnParams; GncParams gncParams(gnParams); - gncParams.setLossType(GncParams::GncLossType::TLS); + gncParams.setLossType(GncLossType::TLS); gncParams.setInlierCostThreshold(0.49); // if inlier threshold is slightly below 0.5, then measurement is outlier auto gnc = GncOptimizer>(nfg, initial, gncParams); @@ -429,7 +429,7 @@ TEST(GncOptimizer, calculateWeightsTLS2) { // actual: GaussNewtonParams gnParams; GncParams gncParams(gnParams); - gncParams.setLossType(GncParams::GncLossType::TLS); + gncParams.setLossType(GncLossType::TLS); gncParams.setInlierCostThreshold(0.5); // if inlier threshold is slightly below 0.5, then measurement is outlier auto gnc = GncOptimizer>(nfg, initial, gncParams); @@ -538,7 +538,7 @@ TEST(GncOptimizer, optimizeWithKnownInliers) { { GncParams gncParams; gncParams.setKnownInliers(knownInliers); - gncParams.setLossType(GncParams::GncLossType::GM); + gncParams.setLossType(GncLossType::GM); //gncParams.setVerbosityGNC(GncParams::Verbosity::SUMMARY); auto gnc = GncOptimizer>(fg, initial, gncParams); @@ -555,7 +555,7 @@ TEST(GncOptimizer, optimizeWithKnownInliers) { { GncParams gncParams; gncParams.setKnownInliers(knownInliers); - gncParams.setLossType(GncParams::GncLossType::TLS); + gncParams.setLossType(GncLossType::TLS); // gncParams.setVerbosityGNC(GncParams::Verbosity::SUMMARY); auto gnc = GncOptimizer>(fg, initial, gncParams); @@ -574,7 +574,7 @@ TEST(GncOptimizer, optimizeWithKnownInliers) { // if we set the threshold large, they are all inliers GncParams gncParams; gncParams.setKnownInliers(knownInliers); - gncParams.setLossType(GncParams::GncLossType::TLS); + gncParams.setLossType(GncLossType::TLS); //gncParams.setVerbosityGNC(GncParams::Verbosity::VALUES); gncParams.setInlierCostThreshold(100.0); auto gnc = GncOptimizer>(fg, initial,