changed barcsq to be a vector, such that the user can provide a bound for each factor
							parent
							
								
									d267368b28
								
							
						
					
					
						commit
						be86b9b5d7
					
				|  | @ -43,6 +43,7 @@ class GncOptimizer { | |||
|   Values state_; ///< Initial values to be used at each iteration by GNC.
 | ||||
|   GncParameters params_; ///< GNC parameters.
 | ||||
|   Vector weights_;  ///< Weights associated to each factor in GNC (this could be a local variable in optimize, but it is useful to make it accessible from outside).
 | ||||
|   Vector barcSq_;  ///< Inlier thresholds. A factor is considered an inlier if factor.error() < barcSq. Note that factor.error() whitens by the covariance. Also note the code allows a threshold for each factor.
 | ||||
| 
 | ||||
|  public: | ||||
|   /// Constructor.
 | ||||
|  | @ -53,6 +54,7 @@ class GncOptimizer { | |||
| 
 | ||||
|     // make sure all noiseModels are Gaussian or convert to Gaussian
 | ||||
|     nfg_.resize(graph.size()); | ||||
|     barcSq_ = Vector::Ones(graph.size()); | ||||
|     for (size_t i = 0; i < graph.size(); i++) { | ||||
|       if (graph[i]) { | ||||
|         NoiseModelFactor::shared_ptr factor = boost::dynamic_pointer_cast< | ||||
|  | @ -65,6 +67,26 @@ class GncOptimizer { | |||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /** Set the maximum weighted residual error for an inlier (same for all factors). For a factor in the form f(x) = 0.5 * || r(x) ||^2_Omega,
 | ||||
|    * the inlier threshold is the largest value of f(x) for the corresponding measurement to be considered an inlier. | ||||
|    * In other words, an inlier at x is such that 0.5 * || r(x) ||^2_Omega <= barcSq. | ||||
|    * Assuming a isotropic measurement covariance sigma^2 * Identity, the cost becomes: 0.5 * 1/sigma^2 || r(x) ||^2 <= barcSq. | ||||
|    * Hence || r(x) ||^2 <= 2 * barcSq * sigma^2. | ||||
|    * */ | ||||
|   void setInlierCostThresholds(const double inth) { | ||||
|     barcSq_ = inth * Vector::Ones(nfg_.size()); | ||||
|   } | ||||
| 
 | ||||
|   /** Set the maximum weighted residual error for an inlier (one for each factor). For a factor in the form f(x) = 0.5 * || r(x) ||^2_Omega,
 | ||||
|    * the inlier threshold is the largest value of f(x) for the corresponding measurement to be considered an inlier. | ||||
|    * In other words, an inlier at x is such that 0.5 * || r(x) ||^2_Omega <= barcSq. | ||||
|    * Assuming a isotropic measurement covariance sigma^2 * Identity, the cost becomes: 0.5 * 1/sigma^2 || r(x) ||^2 <= barcSq. | ||||
|    * Hence || r(x) ||^2 <= 2 * barcSq * sigma^2. | ||||
|    * */ | ||||
|   void setInlierCostThresholds(const Vector& inthVec) { | ||||
|     barcSq_ = inthVec; | ||||
|   } | ||||
| 
 | ||||
|   /// Access a copy of the internal factor graph.
 | ||||
|   const NonlinearFactorGraph& getFactors() const { return nfg_; } | ||||
| 
 | ||||
|  | @ -77,6 +99,17 @@ class GncOptimizer { | |||
|   /// Access a copy of the GNC weights.
 | ||||
|   const Vector& getWeights() const { return weights_;} | ||||
| 
 | ||||
|   /// Get the inlier threshold.
 | ||||
|   const Vector& getInlierCostThresholds() const {return barcSq_;} | ||||
| 
 | ||||
|   /// Equals.
 | ||||
|   bool equals(const GncOptimizer& other, double tol = 1e-9) const { | ||||
|     return nfg_.equals(other.getFactors()) | ||||
|         && equal(weights_, other.getWeights()) | ||||
|         && params_.equals(other.getParams()) | ||||
|         && equal(barcSq_, other.getInlierCostThresholds()); | ||||
|   } | ||||
| 
 | ||||
|   /// Compute optimal solution using graduated non-convexity.
 | ||||
|   Values optimize() { | ||||
|     // start by assuming all measurements are inliers
 | ||||
|  | @ -153,18 +186,18 @@ class GncOptimizer { | |||
| 
 | ||||
|   /// Initialize the gnc parameter mu such that loss is approximately convex (remark 5 in GNC paper).
 | ||||
|   double initializeMu() const { | ||||
|     // compute largest error across all factors
 | ||||
|     double rmax_sq = 0.0; | ||||
|     for (size_t i = 0; i < nfg_.size(); i++) { | ||||
|       if (nfg_[i]) { | ||||
|         rmax_sq = std::max(rmax_sq, nfg_[i]->error(state_)); | ||||
|       } | ||||
|     } | ||||
| 
 | ||||
|     double mu_init = 0.0; | ||||
|     // set initial mu
 | ||||
|     switch (params_.lossType) { | ||||
|       case GncLossType::GM: | ||||
|         // surrogate cost is convex for large mu
 | ||||
|         return 2 * rmax_sq / params_.barcSq;  // initial mu
 | ||||
|         // surrogate cost is convex for large mu. initialize as in remark 5 in GNC paper
 | ||||
|         for (size_t k = 0; k < nfg_.size(); k++) { | ||||
|           if (nfg_[k]) { | ||||
|             mu_init = std::max(mu_init, 2 * nfg_[k]->error(state_) / barcSq_[k]); | ||||
|           } | ||||
|         } | ||||
|         return mu_init;  // initial mu
 | ||||
|       case GncLossType::TLS: | ||||
|         /* initialize mu to the value specified in Remark 5 in GNC paper.
 | ||||
|          surrogate cost is convex for mu close to zero | ||||
|  | @ -172,9 +205,15 @@ class GncOptimizer { | |||
|          according to remark mu = params_.barcSq / (2 * rmax_sq - params_.barcSq) = params_.barcSq/ excessResidual | ||||
|          however, if the denominator is 0 or negative, we return mu = -1 which leads to termination of the main GNC loop | ||||
|          */ | ||||
|         return | ||||
|             (2 * rmax_sq - params_.barcSq) > 0 ? | ||||
|                 params_.barcSq / (2 * rmax_sq - params_.barcSq) : -1; | ||||
|         mu_init = std::numeric_limits<double>::infinity(); | ||||
|         for (size_t k = 0; k < nfg_.size(); k++) { | ||||
|           if (nfg_[k]) { | ||||
|             double rk = nfg_[k]->error(state_); | ||||
|             mu_init = (2 * rk - barcSq_[k]) > 0 ? // if positive, update mu, otherwise keep same
 | ||||
|                 std::min(mu_init, barcSq_[k] / (2 * rk - barcSq_[k]) ) : mu_init; | ||||
|           } | ||||
|         } | ||||
|         return mu_init > 0 && !isinf(mu_init) ? mu_init : -1; | ||||
|       default: | ||||
|         throw std::runtime_error( | ||||
|             "GncOptimizer::initializeMu: called with unknown loss type."); | ||||
|  | @ -305,14 +344,14 @@ class GncOptimizer { | |||
|           if (nfg_[k]) { | ||||
|             double u2_k = nfg_[k]->error(currentEstimate);  // squared (and whitened) residual
 | ||||
|             weights[k] = std::pow( | ||||
|                 (mu * params_.barcSq) / (u2_k + mu * params_.barcSq), 2); | ||||
|                 (mu * barcSq_[k]) / (u2_k + mu * barcSq_[k]), 2); | ||||
|           } | ||||
|         } | ||||
|         return weights; | ||||
|       } | ||||
|       case GncLossType::TLS: {  // use eq (14) in GNC paper
 | ||||
|         double upperbound = (mu + 1) / mu * params_.barcSq; | ||||
|         double lowerbound = mu / (mu + 1) * params_.barcSq; | ||||
|         double upperbound = (mu + 1) / mu * barcSq_.maxCoeff(); | ||||
|         double lowerbound = mu / (mu + 1) * barcSq_.minCoeff(); | ||||
|         for (size_t k : unknownWeights) { | ||||
|           if (nfg_[k]) { | ||||
|             double u2_k = nfg_[k]->error(currentEstimate);  // squared (and whitened) residual
 | ||||
|  | @ -321,7 +360,7 @@ class GncOptimizer { | |||
|             } else if (u2_k <= lowerbound) { | ||||
|               weights[k] = 1; | ||||
|             } else { | ||||
|               weights[k] = std::sqrt(params_.barcSq * mu * (mu + 1) / u2_k) | ||||
|               weights[k] = std::sqrt(barcSq_[k] * mu * (mu + 1) / u2_k) | ||||
|                   - mu; | ||||
|             } | ||||
|           } | ||||
|  |  | |||
|  | @ -66,7 +66,6 @@ class GncParams { | |||
|   /// any other specific GNC parameters:
 | ||||
|   GncLossType lossType = TLS;  ///< Default loss
 | ||||
|   size_t maxIterations = 100;  ///<  Maximum number of iterations
 | ||||
|   double barcSq = 1.0;  ///< A factor is considered an inlier if factor.error() < barcSq. Note that factor.error() whitens by the covariance
 | ||||
|   double muStep = 1.4;  ///< Multiplicative factor to reduce/increase the mu in gnc
 | ||||
|   double relativeCostTol = 1e-5;  ///< If relative cost change is below this threshold, stop iterating
 | ||||
|   double weightsTol = 1e-4;  ///< If the weights are within weightsTol from being binary, stop iterating (only for TLS)
 | ||||
|  | @ -86,16 +85,6 @@ class GncParams { | |||
|     maxIterations = maxIter; | ||||
|   } | ||||
| 
 | ||||
|   /** Set the maximum weighted residual error for an inlier. For a factor in the form f(x) = 0.5 * || r(x) ||^2_Omega,
 | ||||
|    * the inlier threshold is the largest value of f(x) for the corresponding measurement to be considered an inlier. | ||||
|    * In other words, an inlier at x is such that 0.5 * || r(x) ||^2_Omega <= barcSq. | ||||
|    * Assuming a isotropic measurement covariance sigma^2 * Identity, the cost becomes: 0.5 * 1/sigma^2 || r(x) ||^2 <= barcSq. | ||||
|    * Hence || r(x) ||^2 <= 2 * barcSq * sigma^2. | ||||
|    * */ | ||||
|   void setInlierCostThreshold(const double inth) { | ||||
|     barcSq = inth; | ||||
|   } | ||||
| 
 | ||||
|   /// Set the graduated non-convexity step: at each GNC iteration, mu is updated as mu <- mu * muStep.
 | ||||
|   void setMuStep(const double step) { | ||||
|     muStep = step; | ||||
|  | @ -131,7 +120,6 @@ class GncParams { | |||
|   bool equals(const GncParams& other, double tol = 1e-9) const { | ||||
|     return baseOptimizerParams.equals(other.baseOptimizerParams) | ||||
|         && lossType == other.lossType && maxIterations == other.maxIterations | ||||
|         && std::fabs(barcSq - other.barcSq) <= tol | ||||
|         && std::fabs(muStep - other.muStep) <= tol | ||||
|         && verbosity == other.verbosity && knownInliers == other.knownInliers; | ||||
|   } | ||||
|  | @ -150,7 +138,6 @@ class GncParams { | |||
|         throw std::runtime_error("GncParams::print: unknown loss type."); | ||||
|     } | ||||
|     std::cout << "maxIterations: " << maxIterations << "\n"; | ||||
|     std::cout << "barcSq: " << barcSq << "\n"; | ||||
|     std::cout << "muStep: " << muStep << "\n"; | ||||
|     std::cout << "relativeCostTol: " << relativeCostTol << "\n"; | ||||
|     std::cout << "weightsTol: " << weightsTol << "\n"; | ||||
|  |  | |||
|  | @ -87,6 +87,12 @@ TEST(GncOptimizer, gncConstructor) { | |||
|   CHECK(gnc.getFactors().equals(fg)); | ||||
|   CHECK(gnc.getState().equals(initial)); | ||||
|   CHECK(gnc.getParams().equals(gncParams)); | ||||
| 
 | ||||
|   auto gnc2 = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial, | ||||
|                                                                  gncParams); | ||||
| 
 | ||||
|   // check the equal works
 | ||||
|   CHECK(gnc.equals(gnc2)); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
|  | @ -340,9 +346,10 @@ TEST(GncOptimizer, calculateWeightsGM) { | |||
|   mu = 2.0; | ||||
|   double barcSq = 5.0; | ||||
|   weights_expected[3] = std::pow(mu * barcSq / (50.0 + mu * barcSq), 2);  // outlier, error = 50
 | ||||
|   gncParams.setInlierCostThreshold(barcSq); | ||||
| 
 | ||||
|   auto gnc2 = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, | ||||
|                                                          gncParams); | ||||
|   gnc2.setInlierCostThresholds(barcSq); | ||||
|   weights_actual = gnc2.calculateWeights(initial, mu); | ||||
|   CHECK(assert_equal(weights_expected, weights_actual, tol)); | ||||
| } | ||||
|  | @ -398,9 +405,10 @@ TEST(GncOptimizer, calculateWeightsTLS2) { | |||
|     GaussNewtonParams gnParams; | ||||
|     GncParams<GaussNewtonParams> gncParams(gnParams); | ||||
|     gncParams.setLossType(GncLossType::TLS); | ||||
|     gncParams.setInlierCostThreshold(0.51);  // if inlier threshold is slightly larger than 0.5, then measurement is inlier
 | ||||
|     auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(nfg, initial, | ||||
|                                                           gncParams); | ||||
|     gnc.setInlierCostThresholds(0.51);  // if inlier threshold is slightly larger than 0.5, then measurement is inlier
 | ||||
| 
 | ||||
|     double mu = 1e6; | ||||
|     Vector weights_actual = gnc.calculateWeights(initial, mu); | ||||
|     CHECK(assert_equal(weights_expected, weights_actual, tol)); | ||||
|  | @ -414,9 +422,9 @@ TEST(GncOptimizer, calculateWeightsTLS2) { | |||
|     GaussNewtonParams gnParams; | ||||
|     GncParams<GaussNewtonParams> gncParams(gnParams); | ||||
|     gncParams.setLossType(GncLossType::TLS); | ||||
|     gncParams.setInlierCostThreshold(0.49);  // if inlier threshold is slightly below 0.5, then measurement is outlier
 | ||||
|     auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(nfg, initial, | ||||
|                                                           gncParams); | ||||
|     gnc.setInlierCostThresholds(0.49);  // if inlier threshold is slightly below 0.5, then measurement is outlier
 | ||||
|     double mu = 1e6;  // very large mu recovers original TLS cost
 | ||||
|     Vector weights_actual = gnc.calculateWeights(initial, mu); | ||||
|     CHECK(assert_equal(weights_expected, weights_actual, tol)); | ||||
|  | @ -430,9 +438,9 @@ TEST(GncOptimizer, calculateWeightsTLS2) { | |||
|     GaussNewtonParams gnParams; | ||||
|     GncParams<GaussNewtonParams> gncParams(gnParams); | ||||
|     gncParams.setLossType(GncLossType::TLS); | ||||
|     gncParams.setInlierCostThreshold(0.5);  // if inlier threshold is slightly below 0.5, then measurement is outlier
 | ||||
|     auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(nfg, initial, | ||||
|                                                           gncParams); | ||||
|     gnc.setInlierCostThresholds(0.5);  // if inlier threshold is slightly below 0.5, then measurement is outlier
 | ||||
|     double mu = 1e6;  // very large mu recovers original TLS cost
 | ||||
|     Vector weights_actual = gnc.calculateWeights(initial, mu); | ||||
|     CHECK(assert_equal(weights_expected, weights_actual, 1e-5)); | ||||
|  | @ -576,9 +584,9 @@ TEST(GncOptimizer, optimizeWithKnownInliers) { | |||
|     gncParams.setKnownInliers(knownInliers); | ||||
|     gncParams.setLossType(GncLossType::TLS); | ||||
|     //gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::VALUES);
 | ||||
|     gncParams.setInlierCostThreshold(100.0); | ||||
|     auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, | ||||
|                                                           gncParams); | ||||
|     gnc.setInlierCostThresholds(100.0); | ||||
| 
 | ||||
|     Values gnc_result = gnc.optimize(); | ||||
|     CHECK(assert_equal(Point2(0.25, 0.0), gnc_result.at<Point2>(X(1)), 1e-3)); | ||||
|  | @ -592,6 +600,56 @@ TEST(GncOptimizer, optimizeWithKnownInliers) { | |||
|   } | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST(GncOptimizer, setWeights) { | ||||
|   auto fg = example::sharedNonRobustFactorGraphWithOutliers(); | ||||
| 
 | ||||
|   Point2 p0(1, 0); | ||||
|   Values initial; | ||||
|   initial.insert(X(1), p0); | ||||
| 
 | ||||
|   std::vector<size_t> knownInliers; | ||||
|   knownInliers.push_back(0); | ||||
|   knownInliers.push_back(1); | ||||
|   knownInliers.push_back(2); | ||||
|   { | ||||
|     GncParams<GaussNewtonParams> gncParams; | ||||
|     gncParams.setKnownInliers(knownInliers); | ||||
|     gncParams.setLossType(GncLossType::GM); | ||||
|     //gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
 | ||||
|     auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, | ||||
|         gncParams); | ||||
|     gnc.setInlierCostThresholds(2.0); | ||||
|     Values gnc_result = gnc.optimize(); | ||||
|     CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3)); | ||||
| 
 | ||||
|     // check weights were actually fixed:
 | ||||
|     Vector finalWeights = gnc.getWeights(); | ||||
|     DOUBLES_EQUAL(1.0, finalWeights[0], tol); | ||||
|     DOUBLES_EQUAL(1.0, finalWeights[1], tol); | ||||
|     DOUBLES_EQUAL(1.0, finalWeights[2], tol); | ||||
|     CHECK(assert_equal(2.0 * Vector::Ones(fg.size()), gnc.getInlierCostThresholds(), 1e-3)); | ||||
|   } | ||||
|   { | ||||
|     GncParams<GaussNewtonParams> gncParams; | ||||
|     gncParams.setKnownInliers(knownInliers); | ||||
|     gncParams.setLossType(GncLossType::GM); | ||||
|     //gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
 | ||||
|     auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, | ||||
|         gncParams); | ||||
|     gnc.setInlierCostThresholds(2.0 * Vector::Ones(fg.size())); | ||||
|     Values gnc_result = gnc.optimize(); | ||||
|     CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3)); | ||||
| 
 | ||||
|     // check weights were actually fixed:
 | ||||
|     Vector finalWeights = gnc.getWeights(); | ||||
|     DOUBLES_EQUAL(1.0, finalWeights[0], tol); | ||||
|     DOUBLES_EQUAL(1.0, finalWeights[1], tol); | ||||
|     DOUBLES_EQUAL(1.0, finalWeights[2], tol); | ||||
|     CHECK(assert_equal(2.0 * Vector::Ones(fg.size()), gnc.getInlierCostThresholds(), 1e-3)); | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST(GncOptimizer, optimizeSmallPoseGraph) { | ||||
|   /// load small pose graph
 | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue