diff --git a/gtsam/nonlinear/GncOptimizer.h b/gtsam/nonlinear/GncOptimizer.h index f28394545..bbc3b9f84 100644 --- a/gtsam/nonlinear/GncOptimizer.h +++ b/gtsam/nonlinear/GncOptimizer.h @@ -34,21 +34,22 @@ namespace gtsam { /* ************************************************************************* */ template class GncOptimizer { -public: - /** For each parameter, specify the corresponding optimizer: e.g., GaussNewtonParams -> GaussNewtonOptimizer */ + public: + /// For each parameter, specify the corresponding optimizer: e.g., GaussNewtonParams -> GaussNewtonOptimizer. typedef typename GncParameters::OptimizerType BaseOptimizer; -private: - NonlinearFactorGraph nfg_; - Values state_; - GncParameters params_; - Vector weights_; // this could be a local variable in optimize, but it is useful to make it accessible from outside + private: + NonlinearFactorGraph nfg_; ///< Original factor graph to be solved by GNC. + Values state_; ///< Initial values to be used at each iteration by GNC. + GncParameters params_; ///< GNC parameters. + Vector weights_; ///< Weights associated to each factor in GNC (this could be a local variable in optimize, but it is useful to make it accessible from outside). -public: - /// Constructor + public: + /// Constructor. GncOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, - const GncParameters& params = GncParameters()) : - state_(initialValues), params_(params) { + const GncParameters& params = GncParameters()) + : state_(initialValues), + params_(params) { // make sure all noiseModels are Gaussian or convert to Gaussian nfg_.resize(graph.size()); @@ -58,35 +59,39 @@ public: NoiseModelFactor>(graph[i]); noiseModel::Robust::shared_ptr robust = boost::dynamic_pointer_cast< noiseModel::Robust>(factor->noiseModel()); - if (robust) { // if the factor has a robust loss, we have to change it: + if (robust) { // if the factor has a robust loss, we have to change it: SharedNoiseModel gaussianNoise = robust->noise(); - NoiseModelFactor::shared_ptr gaussianFactor = - factor->cloneWithNewNoiseModel(gaussianNoise); + NoiseModelFactor::shared_ptr gaussianFactor = factor + ->cloneWithNewNoiseModel(gaussianNoise); nfg_[i] = gaussianFactor; - } else { // else we directly push it back + } else { // else we directly push it back nfg_[i] = factor; } } } } - /// Access a copy of the internal factor graph + /// Access a copy of the internal factor graph. NonlinearFactorGraph getFactors() const { return NonlinearFactorGraph(nfg_); } - /// Access a copy of the internal values + + /// Access a copy of the internal values. Values getState() const { return Values(state_); } - /// Access a copy of the parameters + + /// Access a copy of the parameters. GncParameters getParams() const { return GncParameters(params_); } - /// Access a copy of the GNC weights + + /// Access a copy of the GNC weights. Vector getWeights() const { return weights_; } - /// Compute optimal solution using graduated non-convexity + + /// Compute optimal solution using graduated non-convexity. Values optimize() { // start by assuming all measurements are inliers weights_ = Vector::Ones(nfg_.size()); @@ -94,7 +99,7 @@ public: Values result = baseOptimizer.optimize(); double mu = initializeMu(); double prev_cost = nfg_.error(result); - double cost = 0.0; // this will be updated in the main loop + double cost = 0.0; // this will be updated in the main loop // handle the degenerate case that corresponds to small // maximum residual errors at initialization @@ -103,7 +108,8 @@ public: if (mu <= 0) { if (params_.verbosity >= GncParameters::Verbosity::SUMMARY) { std::cout << "GNC Optimizer stopped because maximum residual at " - "initialization is small." << std::endl; + "initialization is small." + << std::endl; } if (params_.verbosity >= GncParameters::Verbosity::VALUES) { result.print("result\n"); @@ -132,7 +138,9 @@ public: // stopping condition cost = graph_iter.error(result); - if (checkConvergence(mu, weights_, cost, prev_cost)) { break; } + if (checkConvergence(mu, weights_, cost, prev_cost)) { + break; + } // update mu mu = updateMu(mu); @@ -157,7 +165,7 @@ public: return result; } - /// initialize the gnc parameter mu such that loss is approximately convex (remark 5 in GNC paper) + /// Initialize the gnc parameter mu such that loss is approximately convex (remark 5 in GNC paper). double initializeMu() const { // compute largest error across all factors double rmax_sq = 0.0; @@ -168,75 +176,80 @@ public: } // set initial mu switch (params_.lossType) { - case GncParameters::GM: - // surrogate cost is convex for large mu - return 2 * rmax_sq / params_.barcSq; // initial mu - case GncParameters::TLS: - // initialize mu to the value specified in Remark 5 in GNC paper. - // surrogate cost is convex for mu close to zero - // degenerate case: 2 * rmax_sq - params_.barcSq < 0 (handled in the main loop) - // according to remark mu = params_.barcSq / (2 * rmax_sq - params_.barcSq) = params_.barcSq/ excessResidual - // however, if the denominator is 0 or negative, we return mu = -1 which leads to termination of the main GNC loop - return (2 * rmax_sq - params_.barcSq) > 0 ? params_.barcSq / (2 * rmax_sq - params_.barcSq) : -1; - default: - throw std::runtime_error( - "GncOptimizer::initializeMu: called with unknown loss type."); + case GncParameters::GM: + // surrogate cost is convex for large mu + return 2 * rmax_sq / params_.barcSq; // initial mu + case GncParameters::TLS: + /* initialize mu to the value specified in Remark 5 in GNC paper. + surrogate cost is convex for mu close to zero + degenerate case: 2 * rmax_sq - params_.barcSq < 0 (handled in the main loop) + according to remark mu = params_.barcSq / (2 * rmax_sq - params_.barcSq) = params_.barcSq/ excessResidual + however, if the denominator is 0 or negative, we return mu = -1 which leads to termination of the main GNC loop + */ + return + (2 * rmax_sq - params_.barcSq) > 0 ? + params_.barcSq / (2 * rmax_sq - params_.barcSq) : -1; + default: + throw std::runtime_error( + "GncOptimizer::initializeMu: called with unknown loss type."); } } - /// update the gnc parameter mu to gradually increase nonconvexity + /// Update the gnc parameter mu to gradually increase nonconvexity. double updateMu(const double mu) const { switch (params_.lossType) { - case GncParameters::GM: - // reduce mu, but saturate at 1 (original cost is recovered for mu -> 1) - return std::max(1.0, mu / params_.muStep); - case GncParameters::TLS: - // increases mu at each iteration (original cost is recovered for mu -> inf) - return mu * params_.muStep; - default: - throw std::runtime_error( - "GncOptimizer::updateMu: called with unknown loss type."); + case GncParameters::GM: + // reduce mu, but saturate at 1 (original cost is recovered for mu -> 1) + return std::max(1.0, mu / params_.muStep); + case GncParameters::TLS: + // increases mu at each iteration (original cost is recovered for mu -> inf) + return mu * params_.muStep; + default: + throw std::runtime_error( + "GncOptimizer::updateMu: called with unknown loss type."); } } - /// check if we have reached the value of mu for which the surrogate loss matches the original loss + /// Check if we have reached the value of mu for which the surrogate loss matches the original loss. bool checkMuConvergence(const double mu) const { bool muConverged = false; switch (params_.lossType) { - case GncParameters::GM: - muConverged = std::fabs(mu - 1.0) < 1e-9; // mu=1 recovers the original GM function - break; - case GncParameters::TLS: - muConverged = false; // for TLS there is no stopping condition on mu (it must tend to infinity) - break; - default: - throw std::runtime_error( - "GncOptimizer::checkMuConvergence: called with unknown loss type."); + case GncParameters::GM: + muConverged = std::fabs(mu - 1.0) < 1e-9; // mu=1 recovers the original GM function + break; + case GncParameters::TLS: + muConverged = false; // for TLS there is no stopping condition on mu (it must tend to infinity) + break; + default: + throw std::runtime_error( + "GncOptimizer::checkMuConvergence: called with unknown loss type."); } if (muConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY) std::cout << "muConverged = true " << std::endl; return muConverged; } - /// check convergence of relative cost differences + /// Check convergence of relative cost differences. bool checkCostConvergence(const double cost, const double prev_cost) const { - bool costConverged = std::fabs(cost - prev_cost) / std::max(prev_cost,1e-7) < params_.relativeCostTol; + bool costConverged = std::fabs(cost - prev_cost) / std::max(prev_cost, 1e-7) + < params_.relativeCostTol; if (costConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY) std::cout << "checkCostConvergence = true " << std::endl; return costConverged; } - /// check convergence of weights to binary values + /// Check convergence of weights to binary values. bool checkWeightsConvergence(const Vector& weights) const { - bool weightsConverged = false; - switch (params_.lossType) { + bool weightsConverged = false; + switch (params_.lossType) { case GncParameters::GM: - weightsConverged = false; // for GM, there is no clear binary convergence for the weights + weightsConverged = false; // for GM, there is no clear binary convergence for the weights break; case GncParameters::TLS: weightsConverged = true; - for(size_t i=0; i params_.weightsTol ){ + for (size_t i = 0; i < weights.size(); i++) { + if (std::fabs(weights[i] - std::round(weights[i])) + > params_.weightsTol) { weightsConverged = false; break; } @@ -245,23 +258,21 @@ public: default: throw std::runtime_error( "GncOptimizer::checkWeightsConvergence: called with unknown loss type."); - } - if (weightsConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY) - std::cout << "weightsConverged = true " << std::endl; - return weightsConverged; } - - /// check for convergence between consecutive GNC iterations - bool checkConvergence(const double mu, - const Vector& weights, - const double cost, - const double prev_cost) const { - return checkCostConvergence(cost,prev_cost) || - checkWeightsConvergence(weights) || - checkMuConvergence(mu); + if (weightsConverged + && params_.verbosity >= GncParameters::Verbosity::SUMMARY) + std::cout << "weightsConverged = true " << std::endl; + return weightsConverged; } - /// create a graph where each factor is weighted by the gnc weights + /// Check for convergence between consecutive GNC iterations. + bool checkConvergence(const double mu, const Vector& weights, + const double cost, const double prev_cost) const { + return checkCostConvergence(cost, prev_cost) + || checkWeightsConvergence(weights) || checkMuConvergence(mu); + } + + /// Create a graph where each factor is weighted by the gnc weights. NonlinearFactorGraph makeWeightedGraph(const Vector& weights) const { // make sure all noiseModels are Gaussian or convert to Gaussian NonlinearFactorGraph newGraph; @@ -287,7 +298,7 @@ public: return newGraph; } - /// calculate gnc weights + /// Calculate gnc weights. Vector calculateWeights(const Values& currentEstimate, const double mu) { Vector weights = Vector::Ones(nfg_.size()); @@ -298,42 +309,43 @@ public: } std::vector unknownWeights; std::set_difference(allWeights.begin(), allWeights.end(), - params_.knownInliers.begin(), params_.knownInliers.end(), - std::inserter(unknownWeights, unknownWeights.begin())); + params_.knownInliers.begin(), + params_.knownInliers.end(), + std::inserter(unknownWeights, unknownWeights.begin())); // update weights of known inlier/outlier measurements switch (params_.lossType) { - case GncParameters::GM: { // use eq (12) in GNC paper - for (size_t k : unknownWeights) { - if (nfg_[k]) { - double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual - weights[k] = std::pow( - (mu * params_.barcSq) / (u2_k + mu * params_.barcSq), 2); - } - } - return weights; - } - case GncParameters::TLS: { // use eq (14) in GNC paper - double upperbound = (mu + 1) / mu * params_.barcSq; - double lowerbound = mu / (mu + 1) * params_.barcSq; - for (size_t k : unknownWeights) { - if (nfg_[k]) { - double u2_k = nfg_[k]->error( - currentEstimate); // squared (and whitened) residual - if (u2_k >= upperbound) { - weights[k] = 0; - } else if (u2_k <= lowerbound) { - weights[k] = 1; - } else { - weights[k] = std::sqrt(params_.barcSq * mu * (mu + 1) / u2_k) - mu; + case GncParameters::GM: { // use eq (12) in GNC paper + for (size_t k : unknownWeights) { + if (nfg_[k]) { + double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual + weights[k] = std::pow( + (mu * params_.barcSq) / (u2_k + mu * params_.barcSq), 2); } } + return weights; } - return weights; - } - default: - throw std::runtime_error( - "GncOptimizer::calculateWeights: called with unknown loss type."); + case GncParameters::TLS: { // use eq (14) in GNC paper + double upperbound = (mu + 1) / mu * params_.barcSq; + double lowerbound = mu / (mu + 1) * params_.barcSq; + for (size_t k : unknownWeights) { + if (nfg_[k]) { + double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual + if (u2_k >= upperbound) { + weights[k] = 0; + } else if (u2_k <= lowerbound) { + weights[k] = 1; + } else { + weights[k] = std::sqrt(params_.barcSq * mu * (mu + 1) / u2_k) + - mu; + } + } + } + return weights; + } + default: + throw std::runtime_error( + "GncOptimizer::calculateWeights: called with unknown loss type."); } } }; diff --git a/gtsam/nonlinear/GncParams.h b/gtsam/nonlinear/GncParams.h index 30ceef4c7..9c4f21b81 100644 --- a/gtsam/nonlinear/GncParams.h +++ b/gtsam/nonlinear/GncParams.h @@ -34,47 +34,50 @@ namespace gtsam { /* ************************************************************************* */ template class GncParams { -public: - /** For each parameter, specify the corresponding optimizer: e.g., GaussNewtonParams -> GaussNewtonOptimizer */ + public: + /// For each parameter, specify the corresponding optimizer: e.g., GaussNewtonParams -> GaussNewtonOptimizer. typedef typename BaseOptimizerParameters::OptimizerType OptimizerType; - /** Verbosity levels */ + /// Verbosity levels enum Verbosity { - SILENT = 0, SUMMARY, VALUES + SILENT = 0, + SUMMARY, + VALUES }; - /** Choice of robust loss function for GNC */ + /// Choice of robust loss function for GNC. enum GncLossType { - GM /*Geman McClure*/, TLS /*Truncated least squares*/ + GM /*Geman McClure*/, + TLS /*Truncated least squares*/ }; - /// Constructor - GncParams(const BaseOptimizerParameters& baseOptimizerParams) : - baseOptimizerParams(baseOptimizerParams) { + /// Constructor. + GncParams(const BaseOptimizerParameters& baseOptimizerParams) + : baseOptimizerParams(baseOptimizerParams) { } - /// Default constructor - GncParams() : - baseOptimizerParams() { + /// Default constructor. + GncParams() + : baseOptimizerParams() { } - /// GNC parameters - BaseOptimizerParameters baseOptimizerParams; /*optimization parameters used to solve the weighted least squares problem at each GNC iteration*/ + /// GNC parameters. + BaseOptimizerParameters baseOptimizerParams; ///< Optimization parameters used to solve the weighted least squares problem at each GNC iteration /// any other specific GNC parameters: - GncLossType lossType = TLS; /* default loss*/ - size_t maxIterations = 100; /* maximum number of iterations*/ - double barcSq = 1.0; /* a factor is considered an inlier if factor.error() < barcSq. Note that factor.error() whitens by the covariance*/ - double muStep = 1.4; /* multiplicative factor to reduce/increase the mu in gnc */ - double relativeCostTol = 1e-5; ///< if relative cost change is below this threshold, stop iterating - double weightsTol = 1e-4; ///< if the weights are within weightsTol from being binary, stop iterating (only for TLS) - Verbosity verbosity = SILENT; /* verbosity level */ - std::vector knownInliers = std::vector(); /* slots in the factor graph corresponding to measurements that we know are inliers */ + GncLossType lossType = TLS; ///< Default loss + size_t maxIterations = 100; ///< Maximum number of iterations + double barcSq = 1.0; ///< A factor is considered an inlier if factor.error() < barcSq. Note that factor.error() whitens by the covariance + double muStep = 1.4; ///< Multiplicative factor to reduce/increase the mu in gnc + double relativeCostTol = 1e-5; ///< If relative cost change is below this threshold, stop iterating + double weightsTol = 1e-4; ///< If the weights are within weightsTol from being binary, stop iterating (only for TLS) + Verbosity verbosity = SILENT; ///< Verbosity level + std::vector knownInliers = std::vector(); ///< Slots in the factor graph corresponding to measurements that we know are inliers - /// Set the robust loss function to be used in GNC (chosen among the ones in GncLossType) + /// Set the robust loss function to be used in GNC (chosen among the ones in GncLossType). void setLossType(const GncLossType type) { lossType = type; } - /// Set the maximum number of iterations in GNC (changing the max nr of iters might lead to less accurate solutions and is not recommended) + /// Set the maximum number of iterations in GNC (changing the max nr of iters might lead to less accurate solutions and is not recommended). void setMaxIterations(const size_t maxIter) { std::cout << "setMaxIterations: changing the max nr of iters might lead to less accurate solutions and is not recommended! " @@ -85,22 +88,24 @@ public: * the inlier threshold is the largest value of f(x) for the corresponding measurement to be considered an inlier. * In other words, an inlier at x is such that 0.5 * || r(x) ||^2_Omega <= barcSq. * Assuming a isotropic measurement covariance sigma^2 * Identity, the cost becomes: 0.5 * 1/sigma^2 || r(x) ||^2 <= barcSq. - * Hence || r(x) ||^2 <= 2 * barcSq * sigma^2 + * Hence || r(x) ||^2 <= 2 * barcSq * sigma^2. * */ void setInlierCostThreshold(const double inth) { barcSq = inth; } - /// Set the graduated non-convexity step: at each GNC iteration, mu is updated as mu <- mu * muStep + /// Set the graduated non-convexity step: at each GNC iteration, mu is updated as mu <- mu * muStep. void setMuStep(const double step) { muStep = step; } - /// Set the maximum relative difference in mu values to stop iterating - void setRelativeCostTol(double value) { relativeCostTol = value; + /// Set the maximum relative difference in mu values to stop iterating. + void setRelativeCostTol(double value) { + relativeCostTol = value; } - /// Set the maximum difference between the weights and their rounding in {0,1} to stop iterating - void setWeightsTol(double value) { weightsTol = value; + /// Set the maximum difference between the weights and their rounding in {0,1} to stop iterating. + void setWeightsTol(double value) { + weightsTol = value; } - /// Set the verbosity level + /// Set the verbosity level. void setVerbosityGNC(const Verbosity value) { verbosity = value; } @@ -108,33 +113,32 @@ public: * corresponds to the slots in the factor graph. For instance, if you have a nonlinear factor graph nfg, * and you provide knownIn = {0, 2, 15}, GNC will not apply outlier rejection to nfg[0], nfg[2], and nfg[15]. * This functionality is commonly used in SLAM when one may assume the odometry is outlier free, and - * only apply GNC to prune outliers from the loop closures + * only apply GNC to prune outliers from the loop closures. * */ void setKnownInliers(const std::vector& knownIn) { for (size_t i = 0; i < knownIn.size(); i++) knownInliers.push_back(knownIn[i]); } - /// equals + /// Equals. bool equals(const GncParams& other, double tol = 1e-9) const { return baseOptimizerParams.equals(other.baseOptimizerParams) && lossType == other.lossType && maxIterations == other.maxIterations && std::fabs(barcSq - other.barcSq) <= tol && std::fabs(muStep - other.muStep) <= tol - && verbosity == other.verbosity - && knownInliers == other.knownInliers; + && verbosity == other.verbosity && knownInliers == other.knownInliers; } - /// print function + /// Print. void print(const std::string& str) const { std::cout << str << "\n"; switch (lossType) { - case GM: - std::cout << "lossType: Geman McClure" << "\n"; - break; - case TLS: - std::cout << "lossType: Truncated Least-squares" << "\n"; - break; - default: - throw std::runtime_error("GncParams::print: unknown loss type."); + case GM: + std::cout << "lossType: Geman McClure" << "\n"; + break; + case TLS: + std::cout << "lossType: Truncated Least-squares" << "\n"; + break; + default: + throw std::runtime_error("GncParams::print: unknown loss type."); } std::cout << "maxIterations: " << maxIterations << "\n"; std::cout << "barcSq: " << barcSq << "\n"; diff --git a/tests/testGncOptimizer.cpp b/tests/testGncOptimizer.cpp index 15a83eb4b..f46563b91 100644 --- a/tests/testGncOptimizer.cpp +++ b/tests/testGncOptimizer.cpp @@ -73,16 +73,16 @@ TEST(GncOptimizer, gncParamsConstructor) { /* ************************************************************************* */ TEST(GncOptimizer, gncConstructor) { // has to have Gaussian noise models ! - auto fg = example::createReallyNonlinearFactorGraph(); // just a unary factor - // on a 2D point + auto fg = example::createReallyNonlinearFactorGraph(); // just a unary factor + // on a 2D point Point2 p0(3, 3); Values initial; initial.insert(X(1), p0); GncParams gncParams; - auto gnc = - GncOptimizer>(fg, initial, gncParams); + auto gnc = GncOptimizer>(fg, initial, + gncParams); CHECK(gnc.getFactors().equals(fg)); CHECK(gnc.getState().equals(initial)); @@ -100,8 +100,9 @@ TEST(GncOptimizer, gncConstructorWithRobustGraphAsInput) { initial.insert(X(1), p0); GncParams gncParams; - auto gnc = GncOptimizer>( - fg_robust, initial, gncParams); + auto gnc = GncOptimizer>(fg_robust, + initial, + gncParams); // make sure that when parsing the graph is transformed into one without // robust loss @@ -118,19 +119,17 @@ TEST(GncOptimizer, initializeMu) { // testing GM mu initialization GncParams gncParams; - gncParams.setLossType( - GncParams::GncLossType::GM); - auto gnc_gm = - GncOptimizer>(fg, initial, gncParams); + gncParams.setLossType(GncParams::GncLossType::GM); + auto gnc_gm = GncOptimizer>(fg, initial, + gncParams); // according to rmk 5 in the gnc paper: m0 = 2 rmax^2 / barcSq // (barcSq=1 in this example) EXPECT_DOUBLES_EQUAL(gnc_gm.initializeMu(), 2 * 198.999, 1e-3); // testing TLS mu initialization - gncParams.setLossType( - GncParams::GncLossType::TLS); - auto gnc_tls = - GncOptimizer>(fg, initial, gncParams); + gncParams.setLossType(GncParams::GncLossType::TLS); + auto gnc_tls = GncOptimizer>(fg, initial, + gncParams); // according to rmk 5 in the gnc paper: m0 = barcSq / (2 * rmax^2 - barcSq) // (barcSq=1 in this example) EXPECT_DOUBLES_EQUAL(gnc_tls.initializeMu(), 1 / (2 * 198.999 - 1), 1e-3); @@ -146,11 +145,10 @@ TEST(GncOptimizer, updateMuGM) { initial.insert(X(1), p0); GncParams gncParams; - gncParams.setLossType( - GncParams::GncLossType::GM); + gncParams.setLossType(GncParams::GncLossType::GM); gncParams.setMuStep(1.4); - auto gnc = - GncOptimizer>(fg, initial, gncParams); + auto gnc = GncOptimizer>(fg, initial, + gncParams); double mu = 5.0; EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), mu / 1.4, tol); @@ -171,10 +169,9 @@ TEST(GncOptimizer, updateMuTLS) { GncParams gncParams; gncParams.setMuStep(1.4); - gncParams.setLossType( - GncParams::GncLossType::TLS); - auto gnc = - GncOptimizer>(fg, initial, gncParams); + gncParams.setLossType(GncParams::GncLossType::TLS); + auto gnc = GncOptimizer>(fg, initial, + gncParams); double mu = 5.0; EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), mu * 1.4, tol); @@ -190,24 +187,23 @@ TEST(GncOptimizer, checkMuConvergence) { initial.insert(X(1), p0); { - GncParams gncParams; - gncParams.setLossType( - GncParams::GncLossType::GM); - auto gnc = - GncOptimizer>(fg, initial, gncParams); + GncParams gncParams; + gncParams.setLossType(GncParams::GncLossType::GM); + auto gnc = GncOptimizer>(fg, initial, + gncParams); - double mu = 1.0; - CHECK(gnc.checkMuConvergence(mu)); + double mu = 1.0; + CHECK(gnc.checkMuConvergence(mu)); } { - GncParams gncParams; - gncParams.setLossType( - GncParams::GncLossType::TLS); - auto gnc = - GncOptimizer>(fg, initial, gncParams); + GncParams gncParams; + gncParams.setLossType( + GncParams::GncLossType::TLS); + auto gnc = GncOptimizer>(fg, initial, + gncParams); - double mu = 1.0; - CHECK(!gnc.checkMuConvergence(mu)); //always false for TLS + double mu = 1.0; + CHECK(!gnc.checkMuConvergence(mu)); //always false for TLS } } @@ -221,26 +217,26 @@ TEST(GncOptimizer, checkCostConvergence) { initial.insert(X(1), p0); { - GncParams gncParams; - gncParams.setRelativeCostTol(0.49); - auto gnc = - GncOptimizer>(fg, initial, gncParams); + GncParams gncParams; + gncParams.setRelativeCostTol(0.49); + auto gnc = GncOptimizer>(fg, initial, + gncParams); - double prev_cost = 1.0; - double cost = 0.5; - // relative cost reduction = 0.5 > 0.49, hence checkCostConvergence = false - CHECK(!gnc.checkCostConvergence(cost, prev_cost)); + double prev_cost = 1.0; + double cost = 0.5; + // relative cost reduction = 0.5 > 0.49, hence checkCostConvergence = false + CHECK(!gnc.checkCostConvergence(cost, prev_cost)); } { - GncParams gncParams; - gncParams.setRelativeCostTol(0.51); - auto gnc = - GncOptimizer>(fg, initial, gncParams); + GncParams gncParams; + gncParams.setRelativeCostTol(0.51); + auto gnc = GncOptimizer>(fg, initial, + gncParams); - double prev_cost = 1.0; - double cost = 0.5; - // relative cost reduction = 0.5 < 0.51, hence checkCostConvergence = true - CHECK(gnc.checkCostConvergence(cost, prev_cost)); + double prev_cost = 1.0; + double cost = 0.5; + // relative cost reduction = 0.5 < 0.51, hence checkCostConvergence = true + CHECK(gnc.checkCostConvergence(cost, prev_cost)); } } @@ -254,48 +250,47 @@ TEST(GncOptimizer, checkWeightsConvergence) { initial.insert(X(1), p0); { - GncParams gncParams; - gncParams.setLossType( - GncParams::GncLossType::GM); - auto gnc = - GncOptimizer>(fg, initial, gncParams); + GncParams gncParams; + gncParams.setLossType(GncParams::GncLossType::GM); + auto gnc = GncOptimizer>(fg, initial, + gncParams); - Vector weights = Vector::Ones(fg.size()); - CHECK(!gnc.checkWeightsConvergence(weights)); //always false for GM + Vector weights = Vector::Ones(fg.size()); + CHECK(!gnc.checkWeightsConvergence(weights)); //always false for GM } { - GncParams gncParams; - gncParams.setLossType( - GncParams::GncLossType::TLS); - auto gnc = - GncOptimizer>(fg, initial, gncParams); + GncParams gncParams; + gncParams.setLossType( + GncParams::GncLossType::TLS); + auto gnc = GncOptimizer>(fg, initial, + gncParams); - Vector weights = Vector::Ones(fg.size()); - // weights are binary, so checkWeightsConvergence = true - CHECK(gnc.checkWeightsConvergence(weights)); + Vector weights = Vector::Ones(fg.size()); + // weights are binary, so checkWeightsConvergence = true + CHECK(gnc.checkWeightsConvergence(weights)); } { - GncParams gncParams; - gncParams.setLossType( - GncParams::GncLossType::TLS); - auto gnc = - GncOptimizer>(fg, initial, gncParams); + GncParams gncParams; + gncParams.setLossType( + GncParams::GncLossType::TLS); + auto gnc = GncOptimizer>(fg, initial, + gncParams); - Vector weights = Vector::Ones(fg.size()); - weights[0] = 0.9; // more than weightsTol = 1e-4 from 1, hence checkWeightsConvergence = false - CHECK(!gnc.checkWeightsConvergence(weights)); + Vector weights = Vector::Ones(fg.size()); + weights[0] = 0.9; // more than weightsTol = 1e-4 from 1, hence checkWeightsConvergence = false + CHECK(!gnc.checkWeightsConvergence(weights)); } { - GncParams gncParams; - gncParams.setLossType( - GncParams::GncLossType::TLS); - gncParams.setWeightsTol(0.1); - auto gnc = - GncOptimizer>(fg, initial, gncParams); + GncParams gncParams; + gncParams.setLossType( + GncParams::GncLossType::TLS); + gncParams.setWeightsTol(0.1); + auto gnc = GncOptimizer>(fg, initial, + gncParams); - Vector weights = Vector::Ones(fg.size()); - weights[0] = 0.9; // exactly weightsTol = 0.1 from 1, hence checkWeightsConvergence = true - CHECK(gnc.checkWeightsConvergence(weights)); + Vector weights = Vector::Ones(fg.size()); + weights[0] = 0.9; // exactly weightsTol = 0.1 from 1, hence checkWeightsConvergence = true + CHECK(gnc.checkWeightsConvergence(weights)); } } @@ -310,10 +305,9 @@ TEST(GncOptimizer, checkConvergenceTLS) { GncParams gncParams; gncParams.setRelativeCostTol(1e-5); - gncParams.setLossType( - GncParams::GncLossType::TLS); - auto gnc = - GncOptimizer>(fg, initial, gncParams); + gncParams.setLossType(GncParams::GncLossType::TLS); + auto gnc = GncOptimizer>(fg, initial, + gncParams); CHECK(gnc.checkCostConvergence(1.0, 1.0)); CHECK(!gnc.checkCostConvergence(1.0, 2.0)); @@ -333,12 +327,11 @@ TEST(GncOptimizer, calculateWeightsGM) { weights_expected[0] = 1.0; // zero error weights_expected[1] = 1.0; // zero error weights_expected[2] = 1.0; // zero error - weights_expected[3] = std::pow(1.0 / (50.0 + 1.0), 2); // outlier, error = 50 + weights_expected[3] = std::pow(1.0 / (50.0 + 1.0), 2); // outlier, error = 50 GaussNewtonParams gnParams; GncParams gncParams(gnParams); - gncParams.setLossType( - GncParams::GncLossType::GM); + gncParams.setLossType(GncParams::GncLossType::GM); auto gnc = GncOptimizer>(fg, initial, gncParams); double mu = 1.0; Vector weights_actual = gnc.calculateWeights(initial, mu); @@ -346,11 +339,10 @@ TEST(GncOptimizer, calculateWeightsGM) { mu = 2.0; double barcSq = 5.0; - weights_expected[3] = - std::pow(mu * barcSq / (50.0 + mu * barcSq), 2); // outlier, error = 50 + weights_expected[3] = std::pow(mu * barcSq / (50.0 + mu * barcSq), 2); // outlier, error = 50 gncParams.setInlierCostThreshold(barcSq); - auto gnc2 = - GncOptimizer>(fg, initial, gncParams); + auto gnc2 = GncOptimizer>(fg, initial, + gncParams); weights_actual = gnc2.calculateWeights(initial, mu); CHECK(assert_equal(weights_expected, weights_actual, tol)); } @@ -372,8 +364,7 @@ TEST(GncOptimizer, calculateWeightsTLS) { GaussNewtonParams gnParams; GncParams gncParams(gnParams); - gncParams.setLossType( - GncParams::GncLossType::TLS); + gncParams.setLossType(GncParams::GncLossType::TLS); auto gnc = GncOptimizer>(fg, initial, gncParams); double mu = 1.0; Vector weights_actual = gnc.calculateWeights(initial, mu); @@ -391,45 +382,44 @@ TEST(GncOptimizer, calculateWeightsTLS2) { // create very simple factor graph with a single factor 0.5 * 1/sigma^2 * || x - [1;0] ||^2 double sigma = 1; - SharedDiagonal noise = - noiseModel::Diagonal::Sigmas(Vector2(sigma, sigma)); + SharedDiagonal noise = noiseModel::Diagonal::Sigmas(Vector2(sigma, sigma)); NonlinearFactorGraph nfg; - nfg.add(PriorFactor(X(1),x_prior,noise)); + nfg.add(PriorFactor(X(1), x_prior, noise)); // cost of the factor: - DOUBLES_EQUAL(0.5 * 1/(sigma*sigma), nfg.error(initial), tol); + DOUBLES_EQUAL(0.5 * 1 / (sigma * sigma), nfg.error(initial), tol); // check the TLS weights are correct: CASE 1: residual below barcsq { - // expected: - Vector weights_expected = Vector::Zero(1); - weights_expected[0] = 1.0; // inlier - // actual: - GaussNewtonParams gnParams; - GncParams gncParams(gnParams); - gncParams.setLossType( - GncParams::GncLossType::TLS); - gncParams.setInlierCostThreshold(0.51); // if inlier threshold is slightly larger than 0.5, then measurement is inlier - auto gnc = GncOptimizer>(nfg, initial, gncParams); - double mu = 1e6; - Vector weights_actual = gnc.calculateWeights(initial, mu); - CHECK(assert_equal(weights_expected, weights_actual, tol)); + // expected: + Vector weights_expected = Vector::Zero(1); + weights_expected[0] = 1.0; // inlier + // actual: + GaussNewtonParams gnParams; + GncParams gncParams(gnParams); + gncParams.setLossType(GncParams::GncLossType::TLS); + gncParams.setInlierCostThreshold(0.51); // if inlier threshold is slightly larger than 0.5, then measurement is inlier + auto gnc = GncOptimizer>(nfg, initial, + gncParams); + double mu = 1e6; + Vector weights_actual = gnc.calculateWeights(initial, mu); + CHECK(assert_equal(weights_expected, weights_actual, tol)); } // check the TLS weights are correct: CASE 2: residual above barcsq { - // expected: - Vector weights_expected = Vector::Zero(1); - weights_expected[0] = 0.0; // outlier - // actual: - GaussNewtonParams gnParams; - GncParams gncParams(gnParams); - gncParams.setLossType( - GncParams::GncLossType::TLS); - gncParams.setInlierCostThreshold(0.49); // if inlier threshold is slightly below 0.5, then measurement is outlier - auto gnc = GncOptimizer>(nfg, initial, gncParams); - double mu = 1e6; // very large mu recovers original TLS cost - Vector weights_actual = gnc.calculateWeights(initial, mu); - CHECK(assert_equal(weights_expected, weights_actual, tol)); + // expected: + Vector weights_expected = Vector::Zero(1); + weights_expected[0] = 0.0; // outlier + // actual: + GaussNewtonParams gnParams; + GncParams gncParams(gnParams); + gncParams.setLossType(GncParams::GncLossType::TLS); + gncParams.setInlierCostThreshold(0.49); // if inlier threshold is slightly below 0.5, then measurement is outlier + auto gnc = GncOptimizer>(nfg, initial, + gncParams); + double mu = 1e6; // very large mu recovers original TLS cost + Vector weights_actual = gnc.calculateWeights(initial, mu); + CHECK(assert_equal(weights_expected, weights_actual, tol)); } // check the TLS weights are correct: CASE 2: residual at barcsq { @@ -439,11 +429,11 @@ TEST(GncOptimizer, calculateWeightsTLS2) { // actual: GaussNewtonParams gnParams; GncParams gncParams(gnParams); - gncParams.setLossType( - GncParams::GncLossType::TLS); - gncParams.setInlierCostThreshold(0.5); // if inlier threshold is slightly below 0.5, then measurement is outlier - auto gnc = GncOptimizer>(nfg, initial, gncParams); - double mu = 1e6; // very large mu recovers original TLS cost + gncParams.setLossType(GncParams::GncLossType::TLS); + gncParams.setInlierCostThreshold(0.5); // if inlier threshold is slightly below 0.5, then measurement is outlier + auto gnc = GncOptimizer>(nfg, initial, + gncParams); + double mu = 1e6; // very large mu recovers original TLS cost Vector weights_actual = gnc.calculateWeights(initial, mu); CHECK(assert_equal(weights_expected, weights_actual, 1e-5)); } @@ -453,17 +443,16 @@ TEST(GncOptimizer, calculateWeightsTLS2) { TEST(GncOptimizer, makeWeightedGraph) { // create original factor double sigma1 = 0.1; - NonlinearFactorGraph nfg = - example::nonlinearFactorGraphWithGivenSigma(sigma1); + NonlinearFactorGraph nfg = example::nonlinearFactorGraphWithGivenSigma( + sigma1); // create expected double sigma2 = 10; - NonlinearFactorGraph expected = - example::nonlinearFactorGraphWithGivenSigma(sigma2); + NonlinearFactorGraph expected = example::nonlinearFactorGraphWithGivenSigma( + sigma2); // create weights - Vector weights = Vector::Ones( - 1); // original info:1/0.1^2 = 100. New info: 1/10^2 = 0.01. Ratio is 10-4 + Vector weights = Vector::Ones(1); // original info:1/0.1^2 = 100. New info: 1/10^2 = 0.01. Ratio is 10-4 weights[0] = 1e-4; // create actual @@ -491,8 +480,8 @@ TEST(GncOptimizer, optimizeSimple) { LevenbergMarquardtParams lmParams; GncParams gncParams(lmParams); - auto gnc = - GncOptimizer>(fg, initial, gncParams); + auto gnc = GncOptimizer>(fg, initial, + gncParams); Values actual = gnc.optimize(); DOUBLES_EQUAL(0, fg.error(actual), tol); @@ -515,15 +504,13 @@ TEST(GncOptimizer, optimize) { CHECK(assert_equal(Point2(0.25, 0.0), gn_results.at(X(1)), 1e-3)); // try with robust loss function and standard GN - auto fg_robust = - example::sharedRobustFactorGraphWithOutliers(); // same as fg, but with - // factors wrapped in - // Geman McClure losses + auto fg_robust = example::sharedRobustFactorGraphWithOutliers(); // same as fg, but with + // factors wrapped in + // Geman McClure losses GaussNewtonOptimizer gn2(fg_robust, initial, gnParams); Values gn2_results = gn2.optimize(); // converges to incorrect point, this time due to the nonconvexity of the loss - CHECK( - assert_equal(Point2(0.999706, 0.0), gn2_results.at(X(1)), 1e-3)); + CHECK(assert_equal(Point2(0.999706, 0.0), gn2_results.at(X(1)), 1e-3)); // .. but graduated nonconvexity ensures both robustness and convergence in // the face of nonconvexity @@ -549,59 +536,59 @@ TEST(GncOptimizer, optimizeWithKnownInliers) { // nonconvexity with known inliers { - GncParams gncParams; - gncParams.setKnownInliers(knownInliers); - gncParams.setLossType( - GncParams::GncLossType::GM); - //gncParams.setVerbosityGNC(GncParams::Verbosity::SUMMARY); - auto gnc = GncOptimizer>(fg, initial, gncParams); + GncParams gncParams; + gncParams.setKnownInliers(knownInliers); + gncParams.setLossType(GncParams::GncLossType::GM); + //gncParams.setVerbosityGNC(GncParams::Verbosity::SUMMARY); + auto gnc = GncOptimizer>(fg, initial, + gncParams); - Values gnc_result = gnc.optimize(); - CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at(X(1)), 1e-3)); + Values gnc_result = gnc.optimize(); + CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at(X(1)), 1e-3)); - // check weights were actually fixed: - Vector finalWeights = gnc.getWeights(); - DOUBLES_EQUAL(1.0, finalWeights[0], tol); - DOUBLES_EQUAL(1.0, finalWeights[1], tol); - DOUBLES_EQUAL(1.0, finalWeights[2], tol); + // check weights were actually fixed: + Vector finalWeights = gnc.getWeights(); + DOUBLES_EQUAL(1.0, finalWeights[0], tol); + DOUBLES_EQUAL(1.0, finalWeights[1], tol); + DOUBLES_EQUAL(1.0, finalWeights[2], tol); } { - GncParams gncParams; - gncParams.setKnownInliers(knownInliers); - gncParams.setLossType( - GncParams::GncLossType::TLS); - // gncParams.setVerbosityGNC(GncParams::Verbosity::SUMMARY); - auto gnc = GncOptimizer>(fg, initial, gncParams); + GncParams gncParams; + gncParams.setKnownInliers(knownInliers); + gncParams.setLossType(GncParams::GncLossType::TLS); + // gncParams.setVerbosityGNC(GncParams::Verbosity::SUMMARY); + auto gnc = GncOptimizer>(fg, initial, + gncParams); - Values gnc_result = gnc.optimize(); - CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at(X(1)), 1e-3)); + Values gnc_result = gnc.optimize(); + CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at(X(1)), 1e-3)); - // check weights were actually fixed: - Vector finalWeights = gnc.getWeights(); - DOUBLES_EQUAL(1.0, finalWeights[0], tol); - DOUBLES_EQUAL(1.0, finalWeights[1], tol); - DOUBLES_EQUAL(1.0, finalWeights[2], tol); - DOUBLES_EQUAL(0.0, finalWeights[3], tol); + // check weights were actually fixed: + Vector finalWeights = gnc.getWeights(); + DOUBLES_EQUAL(1.0, finalWeights[0], tol); + DOUBLES_EQUAL(1.0, finalWeights[1], tol); + DOUBLES_EQUAL(1.0, finalWeights[2], tol); + DOUBLES_EQUAL(0.0, finalWeights[3], tol); } { - // if we set the threshold large, they are all inliers - GncParams gncParams; - gncParams.setKnownInliers(knownInliers); - gncParams.setLossType( - GncParams::GncLossType::TLS); - //gncParams.setVerbosityGNC(GncParams::Verbosity::VALUES); - gncParams.setInlierCostThreshold( 100.0 ); - auto gnc = GncOptimizer>(fg, initial, gncParams); + // if we set the threshold large, they are all inliers + GncParams gncParams; + gncParams.setKnownInliers(knownInliers); + gncParams.setLossType(GncParams::GncLossType::TLS); + //gncParams.setVerbosityGNC(GncParams::Verbosity::VALUES); + gncParams.setInlierCostThreshold(100.0); + auto gnc = GncOptimizer>(fg, initial, + gncParams); - Values gnc_result = gnc.optimize(); - CHECK(assert_equal(Point2(0.25, 0.0), gnc_result.at(X(1)), 1e-3)); + Values gnc_result = gnc.optimize(); + CHECK(assert_equal(Point2(0.25, 0.0), gnc_result.at(X(1)), 1e-3)); - // check weights were actually fixed: - Vector finalWeights = gnc.getWeights(); - DOUBLES_EQUAL(1.0, finalWeights[0], tol); - DOUBLES_EQUAL(1.0, finalWeights[1], tol); - DOUBLES_EQUAL(1.0, finalWeights[2], tol); - DOUBLES_EQUAL(1.0, finalWeights[3], tol); + // check weights were actually fixed: + Vector finalWeights = gnc.getWeights(); + DOUBLES_EQUAL(1.0, finalWeights[0], tol); + DOUBLES_EQUAL(1.0, finalWeights[1], tol); + DOUBLES_EQUAL(1.0, finalWeights[2], tol); + DOUBLES_EQUAL(1.0, finalWeights[3], tol); } } @@ -613,24 +600,22 @@ TEST(GncOptimizer, optimizeSmallPoseGraph) { Values::shared_ptr initial; boost::tie(graph, initial) = load2D(filename); // Add a Gaussian prior on first poses - Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin - SharedDiagonal priorNoise = - noiseModel::Diagonal::Sigmas(Vector3(0.01, 0.01, 0.01)); + Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin + SharedDiagonal priorNoise = noiseModel::Diagonal::Sigmas( + Vector3(0.01, 0.01, 0.01)); graph->addPrior(0, priorMean, priorNoise); /// get expected values by optimizing outlier-free graph Values expected = LevenbergMarquardtOptimizer(*graph, *initial).optimize(); // add a few outliers - SharedDiagonal betweenNoise = - noiseModel::Diagonal::Sigmas(Vector3(0.1, 0.1, 0.01)); - graph->push_back(BetweenFactor( - 90, 50, Pose2(), - betweenNoise)); // some arbitrary and incorrect between factor + SharedDiagonal betweenNoise = noiseModel::Diagonal::Sigmas( + Vector3(0.1, 0.1, 0.01)); + graph->push_back(BetweenFactor(90, 50, Pose2(), betweenNoise)); // some arbitrary and incorrect between factor /// get expected values by optimizing outlier-free graph - Values expectedWithOutliers = - LevenbergMarquardtOptimizer(*graph, *initial).optimize(); + Values expectedWithOutliers = LevenbergMarquardtOptimizer(*graph, *initial) + .optimize(); // as expected, the following test fails due to the presence of an outlier! // CHECK(assert_equal(expected, expectedWithOutliers, 1e-3)); @@ -639,13 +624,12 @@ TEST(GncOptimizer, optimizeSmallPoseGraph) { // inliers, but this problem is simple enought to succeed even without that // assumption std::vector knownInliers; GncParams gncParams; - auto gnc = - GncOptimizer>(*graph, *initial, gncParams); + auto gnc = GncOptimizer>(*graph, *initial, + gncParams); Values actual = gnc.optimize(); // compare - CHECK( - assert_equal(expected, actual, 1e-3)); // yay! we are robust to outliers! + CHECK(assert_equal(expected, actual, 1e-3)); // yay! we are robust to outliers! } /* ************************************************************************* */