correct formatting
							parent
							
								
									c4644a0d61
								
							
						
					
					
						commit
						7699f04820
					
				|  | @ -35,7 +35,7 @@ using symbol_shorthand::L; | |||
| static double tol = 1e-7; | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| template <class BaseOptimizerParameters> | ||||
| template<class BaseOptimizerParameters> | ||||
| class GncParams { | ||||
| public: | ||||
|   /** Verbosity levels */ | ||||
|  | @ -50,11 +50,14 @@ public: | |||
| 
 | ||||
|   using BaseOptimizer = GaussNewtonOptimizer; // BaseOptimizerParameters::OptimizerType;
 | ||||
| 
 | ||||
|   GncParams(const BaseOptimizerParameters& baseOptimizerParams): | ||||
|     baseOptimizerParams(baseOptimizerParams) {} | ||||
|   GncParams(const BaseOptimizerParameters& baseOptimizerParams) : | ||||
|       baseOptimizerParams(baseOptimizerParams) { | ||||
|   } | ||||
| 
 | ||||
|   // default constructor
 | ||||
|   GncParams(): baseOptimizerParams() {} | ||||
|   GncParams() : | ||||
|       baseOptimizerParams() { | ||||
|   } | ||||
| 
 | ||||
|   BaseOptimizerParameters baseOptimizerParams; | ||||
|   /// any other specific GNC parameters:
 | ||||
|  | @ -62,29 +65,36 @@ public: | |||
|   size_t maxIterations = 100; /* maximum number of iterations*/ | ||||
|   double barcSq = 1.0; /* a factor is considered an inlier if factor.error() < barcSq. Note that factor.error() whitens by the covariance*/ | ||||
|   double muStep = 1.4; /* multiplicative factor to reduce/increase the mu in gnc */ | ||||
|   VerbosityGNC verbosityGNC = SILENT;  /* verbosity level */ | ||||
|   VerbosityGNC verbosityGNC = SILENT; /* verbosity level */ | ||||
|   std::vector<size_t> knownInliers = std::vector<size_t>(); /* slots in the factor graph corresponding to measurements that we know are inliers */ | ||||
| 
 | ||||
|   void setLossType(const RobustLossType type){ lossType = type; } | ||||
|   void setMaxIterations(const size_t maxIter){ | ||||
|   void setLossType(const RobustLossType type) { | ||||
|     lossType = type; | ||||
|   } | ||||
|   void setMaxIterations(const size_t maxIter) { | ||||
|     std::cout | ||||
|     << "setMaxIterations: changing the max nr of iters might lead to less accurate solutions and is not recommended! " | ||||
|     << std::endl; | ||||
|         << "setMaxIterations: changing the max nr of iters might lead to less accurate solutions and is not recommended! " | ||||
|         << std::endl; | ||||
|     maxIterations = maxIter; | ||||
|   } | ||||
|   void setInlierThreshold(const double inth){ barcSq = inth; } | ||||
|   void setMuStep(const double step){ muStep = step; } | ||||
|   void setVerbosityGNC(const VerbosityGNC verbosity) { verbosityGNC = verbosity; } | ||||
|   void setInlierThreshold(const double inth) { | ||||
|     barcSq = inth; | ||||
|   } | ||||
|   void setMuStep(const double step) { | ||||
|     muStep = step; | ||||
|   } | ||||
|   void setVerbosityGNC(const VerbosityGNC verbosity) { | ||||
|     verbosityGNC = verbosity; | ||||
|   } | ||||
|   void setKnownInliers(const std::vector<size_t> knownIn) { | ||||
|     for(size_t i=0; i<knownIn.size(); i++) | ||||
|     for (size_t i = 0; i < knownIn.size(); i++) | ||||
|       knownInliers.push_back(knownIn[i]); | ||||
|   } | ||||
| 
 | ||||
|   /// equals
 | ||||
|   bool equals(const GncParams& other, double tol = 1e-9) const { | ||||
|     return baseOptimizerParams.equals(other.baseOptimizerParams) | ||||
|         && lossType == other.lossType | ||||
|         && maxIterations == other.maxIterations | ||||
|         && lossType == other.lossType && maxIterations == other.maxIterations | ||||
|         && std::fabs(barcSq - other.barcSq) <= tol | ||||
|         && std::fabs(muStep - other.muStep) <= tol | ||||
|         && verbosityGNC == other.verbosityGNC | ||||
|  | @ -94,17 +104,18 @@ public: | |||
|   /// print function
 | ||||
|   void print(const std::string& str) const { | ||||
|     std::cout << str << "\n"; | ||||
|     switch(lossType) { | ||||
|     case GM: std::cout << "lossType: Geman McClure" << "\n"; break; | ||||
|     switch (lossType) { | ||||
|     case GM: | ||||
|       std::cout << "lossType: Geman McClure" << "\n"; | ||||
|       break; | ||||
|     default: | ||||
|       throw std::runtime_error( | ||||
|           "GncParams::print: unknown loss type."); | ||||
|       throw std::runtime_error("GncParams::print: unknown loss type."); | ||||
|     } | ||||
|     std::cout << "maxIterations: " << maxIterations << "\n"; | ||||
|     std::cout << "barcSq: " << barcSq << "\n"; | ||||
|     std::cout << "muStep: " << muStep << "\n"; | ||||
|     std::cout << "verbosityGNC: " << verbosityGNC << "\n"; | ||||
|     for(size_t i=0; i< knownInliers.size(); i++) | ||||
|     for (size_t i = 0; i < knownInliers.size(); i++) | ||||
|       std::cout << "knownInliers: " << knownInliers[i] << "\n"; | ||||
|     baseOptimizerParams.print(str); | ||||
|   } | ||||
|  | @ -123,21 +134,24 @@ private: | |||
|   Vector weights_; // this could be a local variable in optimize, but it is useful to make it accessible from outside
 | ||||
| 
 | ||||
| public: | ||||
|   GncOptimizer(const NonlinearFactorGraph& graph, | ||||
|       const Values& initialValues, const GncParameters& params = GncParameters()) : | ||||
|         state_(initialValues), params_(params) { | ||||
|   GncOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, | ||||
|       const GncParameters& params = GncParameters()) : | ||||
|       state_(initialValues), params_(params) { | ||||
| 
 | ||||
|     // make sure all noiseModels are Gaussian or convert to Gaussian
 | ||||
|     nfg_.resize(graph.size()); | ||||
|     for (size_t i = 0; i < graph.size(); i++) { | ||||
|       if(graph[i]){ | ||||
|         NoiseModelFactor::shared_ptr factor = boost::dynamic_pointer_cast<NoiseModelFactor>(graph[i]); | ||||
|         noiseModel::Robust::shared_ptr robust = boost::dynamic_pointer_cast<noiseModel::Robust>(factor->noiseModel()); | ||||
|         if(robust){ // if the factor has a robust loss, we have to change it:
 | ||||
|       if (graph[i]) { | ||||
|         NoiseModelFactor::shared_ptr factor = boost::dynamic_pointer_cast< | ||||
|             NoiseModelFactor>(graph[i]); | ||||
|         noiseModel::Robust::shared_ptr robust = boost::dynamic_pointer_cast< | ||||
|             noiseModel::Robust>(factor->noiseModel()); | ||||
|         if (robust) { // if the factor has a robust loss, we have to change it:
 | ||||
|           SharedNoiseModel gaussianNoise = robust->noise(); | ||||
|           NoiseModelFactor::shared_ptr gaussianFactor = factor->cloneWithNewNoiseModel(gaussianNoise); | ||||
|           NoiseModelFactor::shared_ptr gaussianFactor = | ||||
|               factor->cloneWithNewNoiseModel(gaussianNoise); | ||||
|           nfg_[i] = gaussianFactor; | ||||
|         } else{ // else we directly push it back
 | ||||
|         } else { // else we directly push it back
 | ||||
|           nfg_[i] = factor; | ||||
|         } | ||||
|       } | ||||
|  | @ -145,22 +159,30 @@ public: | |||
|   } | ||||
| 
 | ||||
|   /// getter functions
 | ||||
|   NonlinearFactorGraph getFactors() const { return NonlinearFactorGraph(nfg_); } | ||||
|   Values getState() const { return Values(state_); } | ||||
|   GncParameters getParams() const { return GncParameters(params_); } | ||||
|   Vector getWeights() const {return weights_;} | ||||
|   NonlinearFactorGraph getFactors() const { | ||||
|     return NonlinearFactorGraph(nfg_); | ||||
|   } | ||||
|   Values getState() const { | ||||
|     return Values(state_); | ||||
|   } | ||||
|   GncParameters getParams() const { | ||||
|     return GncParameters(params_); | ||||
|   } | ||||
|   Vector getWeights() const { | ||||
|     return weights_; | ||||
|   } | ||||
| 
 | ||||
|   /// implement GNC main loop, including graduating nonconvexity with mu
 | ||||
|   Values optimize() { | ||||
|     // start by assuming all measurements are inliers
 | ||||
|     weights_ = Vector::Ones(nfg_.size()); | ||||
|     GaussNewtonOptimizer baseOptimizer(nfg_,state_); | ||||
|     GaussNewtonOptimizer baseOptimizer(nfg_, state_); | ||||
|     Values result = baseOptimizer.optimize(); | ||||
|     double mu = initializeMu(); | ||||
|     for(size_t iter=0; iter < params_.maxIterations; iter++){ | ||||
|     for (size_t iter = 0; iter < params_.maxIterations; iter++) { | ||||
| 
 | ||||
|       // display info
 | ||||
|       if (params_.verbosityGNC >= GncParameters::VerbosityGNC::VALUES){ | ||||
|       if (params_.verbosityGNC >= GncParameters::VerbosityGNC::VALUES) { | ||||
|         result.print("result\n"); | ||||
|         std::cout << "mu: " << mu << std::endl; | ||||
|         std::cout << "weights: " << weights_ << std::endl; | ||||
|  | @ -174,9 +196,9 @@ public: | |||
|       result = baseOptimizer_iter.optimize(); | ||||
| 
 | ||||
|       // stopping condition
 | ||||
|       if( checkMuConvergence(mu) ) { | ||||
|       if (checkMuConvergence(mu)) { | ||||
|         // display info
 | ||||
|         if (params_.verbosityGNC >= GncParameters::VerbosityGNC::SUMMARY){ | ||||
|         if (params_.verbosityGNC >= GncParameters::VerbosityGNC::SUMMARY) { | ||||
|           std::cout << "final iterations: " << iter << std::endl; | ||||
|           std::cout << "final mu: " << mu << std::endl; | ||||
|           std::cout << "final weights: " << weights_ << std::endl; | ||||
|  | @ -195,14 +217,14 @@ public: | |||
|     // compute largest error across all factors
 | ||||
|     double rmax_sq = 0.0; | ||||
|     for (size_t i = 0; i < nfg_.size(); i++) { | ||||
|       if(nfg_[i]){ | ||||
|       if (nfg_[i]) { | ||||
|         rmax_sq = std::max(rmax_sq, nfg_[i]->error(state_)); | ||||
|       } | ||||
|     } | ||||
|     // set initial mu
 | ||||
|     switch(params_.lossType) { | ||||
|     switch (params_.lossType) { | ||||
|     case GncParameters::GM: | ||||
|       return  2*rmax_sq / params_.barcSq; // initial mu
 | ||||
|       return 2 * rmax_sq / params_.barcSq; // initial mu
 | ||||
|     default: | ||||
|       throw std::runtime_error( | ||||
|           "GncOptimizer::initializeMu: called with unknown loss type."); | ||||
|  | @ -211,9 +233,9 @@ public: | |||
| 
 | ||||
|   /// update the gnc parameter mu to gradually increase nonconvexity
 | ||||
|   double updateMu(const double mu) const { | ||||
|     switch(params_.lossType) { | ||||
|     switch (params_.lossType) { | ||||
|     case GncParameters::GM: | ||||
|       return  std::max(1.0 , mu / params_.muStep); // reduce mu, but saturate at 1
 | ||||
|       return std::max(1.0, mu / params_.muStep); // reduce mu, but saturate at 1
 | ||||
|     default: | ||||
|       throw std::runtime_error( | ||||
|           "GncOptimizer::updateMu: called with unknown loss type."); | ||||
|  | @ -222,7 +244,7 @@ public: | |||
| 
 | ||||
|   /// check if we have reached the value of mu for which the surrogate loss matches the original loss
 | ||||
|   bool checkMuConvergence(const double mu) const { | ||||
|     switch(params_.lossType) { | ||||
|     switch (params_.lossType) { | ||||
|     case GncParameters::GM: | ||||
|       return std::fabs(mu - 1.0) < 1e-9; // mu=1 recovers the original GM function
 | ||||
|     default: | ||||
|  | @ -237,16 +259,20 @@ public: | |||
|     NonlinearFactorGraph newGraph; | ||||
|     newGraph.resize(nfg_.size()); | ||||
|     for (size_t i = 0; i < nfg_.size(); i++) { | ||||
|       if(nfg_[i]){ | ||||
|         NoiseModelFactor::shared_ptr factor = boost::dynamic_pointer_cast<NoiseModelFactor>(nfg_[i]); | ||||
|         noiseModel::Gaussian::shared_ptr noiseModel = boost::dynamic_pointer_cast<noiseModel::Gaussian>(factor->noiseModel()); | ||||
|         if(noiseModel){ | ||||
|       if (nfg_[i]) { | ||||
|         NoiseModelFactor::shared_ptr factor = boost::dynamic_pointer_cast< | ||||
|             NoiseModelFactor>(nfg_[i]); | ||||
|         noiseModel::Gaussian::shared_ptr noiseModel = | ||||
|             boost::dynamic_pointer_cast<noiseModel::Gaussian>( | ||||
|                 factor->noiseModel()); | ||||
|         if (noiseModel) { | ||||
|           Matrix newInfo = weights[i] * noiseModel->information(); | ||||
|           SharedNoiseModel newNoiseModel = noiseModel::Gaussian::Information(newInfo); | ||||
|           SharedNoiseModel newNoiseModel = noiseModel::Gaussian::Information( | ||||
|               newInfo); | ||||
|           newGraph[i] = factor->cloneWithNewNoiseModel(newNoiseModel); | ||||
|         }else{ | ||||
|         } else { | ||||
|           throw std::runtime_error( | ||||
|                     "GncOptimizer::makeWeightedGraph: unexpected non-Gaussian noise model."); | ||||
|               "GncOptimizer::makeWeightedGraph: unexpected non-Gaussian noise model."); | ||||
|         } | ||||
|       } | ||||
|     } | ||||
|  | @ -254,24 +280,27 @@ public: | |||
|   } | ||||
| 
 | ||||
|   /// calculate gnc weights
 | ||||
|   Vector calculateWeights(const Values currentEstimate, const double mu){ | ||||
|   Vector calculateWeights(const Values currentEstimate, const double mu) { | ||||
|     Vector weights = Vector::Ones(nfg_.size()); | ||||
| 
 | ||||
|     // do not update the weights that the user has decided are known inliers
 | ||||
|     std::vector<size_t> allWeights; | ||||
|     for (size_t k = 0; k < nfg_.size(); k++) {allWeights.push_back(k);} | ||||
|     for (size_t k = 0; k < nfg_.size(); k++) { | ||||
|       allWeights.push_back(k); | ||||
|     } | ||||
|     std::vector<size_t> unknownWeights; | ||||
|     std::set_difference(allWeights.begin(), allWeights.end(), | ||||
|         params_.knownInliers.begin(), params_.knownInliers.end(), | ||||
|         std::inserter(unknownWeights, unknownWeights.begin())); | ||||
| 
 | ||||
|     // update weights of known inlier/outlier measurements
 | ||||
|     switch(params_.lossType) { | ||||
|     switch (params_.lossType) { | ||||
|     case GncParameters::GM: // use eq (12) in GNC paper
 | ||||
|       for (size_t k : unknownWeights) { | ||||
|         if(nfg_[k]){ | ||||
|         if (nfg_[k]) { | ||||
|           double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual
 | ||||
|           weights[k] = std::pow( ( mu*params_.barcSq )/( u2_k + mu*params_.barcSq ) , 2); | ||||
|           weights[k] = std::pow( | ||||
|               (mu * params_.barcSq) / (u2_k + mu * params_.barcSq), 2); | ||||
|         } | ||||
|       } | ||||
|       return weights; | ||||
|  | @ -284,7 +313,6 @@ public: | |||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST(GncOptimizer, gncParamsConstructor) { | ||||
| 
 | ||||
|   //check params are correctly parsed
 | ||||
|   LevenbergMarquardtParams lmParams; | ||||
|   GncParams<LevenbergMarquardtParams> gncParams1(lmParams); | ||||
|  | @ -296,7 +324,7 @@ TEST(GncOptimizer, gncParamsConstructor) { | |||
| 
 | ||||
|   // and check params become different if we change lmParams
 | ||||
|   lmParams.setVerbosity("DELTA"); | ||||
|   CHECK(! lmParams.equals(gncParams1.baseOptimizerParams)); | ||||
|   CHECK(!lmParams.equals(gncParams1.baseOptimizerParams)); | ||||
| 
 | ||||
|   // and same for GN
 | ||||
|   GaussNewtonParams gnParams; | ||||
|  | @ -310,7 +338,7 @@ TEST(GncOptimizer, gncParamsConstructor) { | |||
|   // change something at the gncParams level
 | ||||
|   GncParams<GaussNewtonParams> gncParams2c(gncParams2b); | ||||
|   gncParams2c.setLossType(GncParams<GaussNewtonParams>::RobustLossType::TLS); | ||||
|   CHECK(! gncParams2c.equals(gncParams2b.baseOptimizerParams)); | ||||
|   CHECK(!gncParams2c.equals(gncParams2b.baseOptimizerParams)); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
|  | @ -324,7 +352,8 @@ TEST(GncOptimizer, gncConstructor) { | |||
| 
 | ||||
|   LevenbergMarquardtParams lmParams; | ||||
|   GncParams<LevenbergMarquardtParams> gncParams(lmParams); | ||||
|   auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial, gncParams); | ||||
|   auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial, | ||||
|       gncParams); | ||||
| 
 | ||||
|   CHECK(gnc.getFactors().equals(fg)); | ||||
|   CHECK(gnc.getState().equals(initial)); | ||||
|  | @ -333,7 +362,6 @@ TEST(GncOptimizer, gncConstructor) { | |||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST(GncOptimizer, gncConstructorWithRobustGraphAsInput) { | ||||
|   // simple graph with Gaussian noise model
 | ||||
|   auto fg = example::sharedNonRobustFactorGraphWithOutliers(); | ||||
|   // same graph with robust noise model
 | ||||
|   auto fg_robust = example::sharedRobustFactorGraphWithOutliers(); | ||||
|  | @ -344,15 +372,15 @@ TEST(GncOptimizer, gncConstructorWithRobustGraphAsInput) { | |||
| 
 | ||||
|   LevenbergMarquardtParams lmParams; | ||||
|   GncParams<LevenbergMarquardtParams> gncParams(lmParams); | ||||
|   auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg_robust, initial, gncParams); | ||||
|   auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg_robust, | ||||
|       initial, gncParams); | ||||
| 
 | ||||
|   // make sure that when parsing the graph is transformed into one without robust loss
 | ||||
|   CHECK( fg.equals(gnc.getFactors()) ); | ||||
|   CHECK(fg.equals(gnc.getFactors())); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST(GncOptimizer, initializeMu) { | ||||
|   // has to have Gaussian noise models !
 | ||||
|   auto fg = example::createReallyNonlinearFactorGraph(); | ||||
| 
 | ||||
|   Point2 p0(3, 3); | ||||
|  | @ -361,8 +389,10 @@ TEST(GncOptimizer, initializeMu) { | |||
| 
 | ||||
|   LevenbergMarquardtParams lmParams; | ||||
|   GncParams<LevenbergMarquardtParams> gncParams(lmParams); | ||||
|   gncParams.setLossType(GncParams<LevenbergMarquardtParams>::RobustLossType::GM); | ||||
|   auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial, gncParams); | ||||
|   gncParams.setLossType( | ||||
|       GncParams<LevenbergMarquardtParams>::RobustLossType::GM); | ||||
|   auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial, | ||||
|       gncParams); | ||||
|   EXPECT_DOUBLES_EQUAL(gnc.initializeMu(), 2 * 198.999, 1e-3); // according to rmk 5 in the gnc paper: m0 = 2 rmax^2 / barcSq (barcSq=1 in this example)
 | ||||
| } | ||||
| 
 | ||||
|  | @ -377,8 +407,10 @@ TEST(GncOptimizer, updateMu) { | |||
| 
 | ||||
|   LevenbergMarquardtParams lmParams; | ||||
|   GncParams<LevenbergMarquardtParams> gncParams(lmParams); | ||||
|   gncParams.setLossType(GncParams<LevenbergMarquardtParams>::RobustLossType::GM); | ||||
|   auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial, gncParams); | ||||
|   gncParams.setLossType( | ||||
|       GncParams<LevenbergMarquardtParams>::RobustLossType::GM); | ||||
|   auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial, | ||||
|       gncParams); | ||||
| 
 | ||||
|   double mu = 5.0; | ||||
|   EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), mu / 1.4, tol); | ||||
|  | @ -399,8 +431,10 @@ TEST(GncOptimizer, checkMuConvergence) { | |||
| 
 | ||||
|   LevenbergMarquardtParams lmParams; | ||||
|   GncParams<LevenbergMarquardtParams> gncParams(lmParams); | ||||
|   gncParams.setLossType(GncParams<LevenbergMarquardtParams>::RobustLossType::GM); | ||||
|   auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial, gncParams); | ||||
|   gncParams.setLossType( | ||||
|       GncParams<LevenbergMarquardtParams>::RobustLossType::GM); | ||||
|   auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial, | ||||
|       gncParams); | ||||
| 
 | ||||
|   double mu = 1.0; | ||||
|   CHECK(gnc.checkMuConvergence(mu)); | ||||
|  | @ -408,67 +442,69 @@ TEST(GncOptimizer, checkMuConvergence) { | |||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST(GncOptimizer, calculateWeights) { | ||||
|   // has to have Gaussian noise models !
 | ||||
|     auto fg = example::sharedNonRobustFactorGraphWithOutliers(); | ||||
|   auto fg = example::sharedNonRobustFactorGraphWithOutliers(); | ||||
| 
 | ||||
|     Point2 p0(0, 0); | ||||
|     Values initial; | ||||
|     initial.insert(X(1), p0); | ||||
|   Point2 p0(0, 0); | ||||
|   Values initial; | ||||
|   initial.insert(X(1), p0); | ||||
| 
 | ||||
|     // we have 4 factors, 3 with zero errors (inliers), 1 with error 50 = 0.5 * 1/sigma^2 || [1;0] - [0;0] ||^2 (outlier)
 | ||||
|     Vector weights_expected = Vector::Zero(4); | ||||
|     weights_expected[0] = 1.0; // zero error
 | ||||
|     weights_expected[1] = 1.0; // zero error
 | ||||
|     weights_expected[2] = 1.0; // zero error
 | ||||
|     weights_expected[3] = std::pow(1.0 / (50.0 + 1.0),2); // outlier, error = 50
 | ||||
|   // we have 4 factors, 3 with zero errors (inliers), 1 with error 50 = 0.5 * 1/sigma^2 || [1;0] - [0;0] ||^2 (outlier)
 | ||||
|   Vector weights_expected = Vector::Zero(4); | ||||
|   weights_expected[0] = 1.0; // zero error
 | ||||
|   weights_expected[1] = 1.0; // zero error
 | ||||
|   weights_expected[2] = 1.0; // zero error
 | ||||
|   weights_expected[3] = std::pow(1.0 / (50.0 + 1.0), 2); // outlier, error = 50
 | ||||
| 
 | ||||
|     GaussNewtonParams gnParams; | ||||
|     GncParams<GaussNewtonParams> gncParams(gnParams); | ||||
|     auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams); | ||||
|     double mu = 1.0; | ||||
|     Vector weights_actual = gnc.calculateWeights(initial,mu); | ||||
|     CHECK(assert_equal(weights_expected, weights_actual, tol)); | ||||
|   GaussNewtonParams gnParams; | ||||
|   GncParams<GaussNewtonParams> gncParams(gnParams); | ||||
|   auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams); | ||||
|   double mu = 1.0; | ||||
|   Vector weights_actual = gnc.calculateWeights(initial, mu); | ||||
|   CHECK(assert_equal(weights_expected, weights_actual, tol)); | ||||
| 
 | ||||
|     mu = 2.0; | ||||
|     double barcSq = 5.0; | ||||
|     weights_expected[3] = std::pow(mu*barcSq / (50.0 + mu*barcSq),2); // outlier, error = 50
 | ||||
|     gncParams.setInlierThreshold(barcSq); | ||||
|     auto gnc2 = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams); | ||||
|     weights_actual = gnc2.calculateWeights(initial,mu); | ||||
|     CHECK(assert_equal(weights_expected, weights_actual, tol)); | ||||
|   mu = 2.0; | ||||
|   double barcSq = 5.0; | ||||
|   weights_expected[3] = std::pow(mu * barcSq / (50.0 + mu * barcSq), 2); // outlier, error = 50
 | ||||
|   gncParams.setInlierThreshold(barcSq); | ||||
|   auto gnc2 = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, | ||||
|       gncParams); | ||||
|   weights_actual = gnc2.calculateWeights(initial, mu); | ||||
|   CHECK(assert_equal(weights_expected, weights_actual, tol)); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST(GncOptimizer, makeWeightedGraph) { | ||||
|   // create original factor
 | ||||
|     double sigma1 = 0.1; | ||||
|     NonlinearFactorGraph nfg = example::nonlinearFactorGraphWithGivenSigma(sigma1); | ||||
|   double sigma1 = 0.1; | ||||
|   NonlinearFactorGraph nfg = example::nonlinearFactorGraphWithGivenSigma( | ||||
|       sigma1); | ||||
| 
 | ||||
|     // create expected
 | ||||
|     double sigma2 = 10; | ||||
|     NonlinearFactorGraph expected = example::nonlinearFactorGraphWithGivenSigma(sigma2); | ||||
|   // create expected
 | ||||
|   double sigma2 = 10; | ||||
|   NonlinearFactorGraph expected = example::nonlinearFactorGraphWithGivenSigma( | ||||
|       sigma2); | ||||
| 
 | ||||
|     // create weights
 | ||||
|     Vector weights = Vector::Ones(1); // original info:1/0.1^2 = 100. New info: 1/10^2 = 0.01. Ratio is 10-4
 | ||||
|     weights[0] = 1e-4; | ||||
|   // create weights
 | ||||
|   Vector weights = Vector::Ones(1); // original info:1/0.1^2 = 100. New info: 1/10^2 = 0.01. Ratio is 10-4
 | ||||
|   weights[0] = 1e-4; | ||||
| 
 | ||||
|     // create actual
 | ||||
|     Point2 p0(3, 3); | ||||
|     Values initial; | ||||
|     initial.insert(X(1), p0); | ||||
|   // create actual
 | ||||
|   Point2 p0(3, 3); | ||||
|   Values initial; | ||||
|   initial.insert(X(1), p0); | ||||
| 
 | ||||
|     LevenbergMarquardtParams lmParams; | ||||
|     GncParams<LevenbergMarquardtParams> gncParams(lmParams); | ||||
|     auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(nfg, initial, gncParams); | ||||
|     NonlinearFactorGraph actual = gnc.makeWeightedGraph(weights); | ||||
|   LevenbergMarquardtParams lmParams; | ||||
|   GncParams<LevenbergMarquardtParams> gncParams(lmParams); | ||||
|   auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(nfg, initial, | ||||
|       gncParams); | ||||
|   NonlinearFactorGraph actual = gnc.makeWeightedGraph(weights); | ||||
| 
 | ||||
|     // check it's all good
 | ||||
|     CHECK(assert_equal(expected, actual)); | ||||
|   // check it's all good
 | ||||
|   CHECK(assert_equal(expected, actual)); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST(GncOptimizer, optimizeSimple) { | ||||
|   // has to have Gaussian noise models !
 | ||||
|   auto fg = example::createReallyNonlinearFactorGraph(); | ||||
| 
 | ||||
|   Point2 p0(3, 3); | ||||
|  | @ -477,7 +513,8 @@ TEST(GncOptimizer, optimizeSimple) { | |||
| 
 | ||||
|   LevenbergMarquardtParams lmParams; | ||||
|   GncParams<LevenbergMarquardtParams> gncParams(lmParams); | ||||
|   auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial, gncParams); | ||||
|   auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial, | ||||
|       gncParams); | ||||
| 
 | ||||
|   Values actual = gnc.optimize(); | ||||
|   DOUBLES_EQUAL(0, fg.error(actual), tol); | ||||
|  | @ -485,7 +522,6 @@ TEST(GncOptimizer, optimizeSimple) { | |||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST(GncOptimizer, optimize) { | ||||
|   // has to have Gaussian noise models !
 | ||||
|   auto fg = example::sharedNonRobustFactorGraphWithOutliers(); | ||||
| 
 | ||||
|   Point2 p0(1, 0); | ||||
|  | @ -497,26 +533,25 @@ TEST(GncOptimizer, optimize) { | |||
|   GaussNewtonOptimizer gn(fg, initial, gnParams); | ||||
|   Values gn_results = gn.optimize(); | ||||
|   // converges to incorrect point due to lack of robustness to an outlier, ideal solution is Point2(0,0)
 | ||||
|   CHECK(assert_equal(Point2(0.25,0.0), gn_results.at<Point2>(X(1)), 1e-3)); | ||||
|   CHECK(assert_equal(Point2(0.25, 0.0), gn_results.at<Point2>(X(1)), 1e-3)); | ||||
| 
 | ||||
|   // try with robust loss function and standard GN
 | ||||
|   auto fg_robust = example::sharedRobustFactorGraphWithOutliers(); // same as fg, but with factors wrapped in Geman McClure losses
 | ||||
|   GaussNewtonOptimizer gn2(fg_robust, initial, gnParams); | ||||
|   Values gn2_results = gn2.optimize(); | ||||
|   // converges to incorrect point, this time due to the nonconvexity of the loss
 | ||||
|   CHECK(assert_equal(Point2(0.999706,0.0), gn2_results.at<Point2>(X(1)), 1e-3)); | ||||
|   CHECK(assert_equal(Point2(0.999706, 0.0), gn2_results.at<Point2>(X(1)), 1e-3)); | ||||
| 
 | ||||
|   // .. but graduated nonconvexity ensures both robustness and convergence in the face of nonconvexity
 | ||||
|   GncParams<GaussNewtonParams> gncParams(gnParams); | ||||
|   // gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::VerbosityGNC::SUMMARY);
 | ||||
|   auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams); | ||||
|   Values gnc_result = gnc.optimize(); | ||||
|   CHECK(assert_equal(Point2(0.0,0.0), gnc_result.at<Point2>(X(1)), 1e-3)); | ||||
|   CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3)); | ||||
| } | ||||
| 
 | ||||
| /* ************************************************************************* */ | ||||
| TEST(GncOptimizer, optimizeWithKnownInliers) { | ||||
|   // has to have Gaussian noise models !
 | ||||
|   auto fg = example::sharedNonRobustFactorGraphWithOutliers(); | ||||
| 
 | ||||
|   Point2 p0(1, 0); | ||||
|  | @ -535,7 +570,7 @@ TEST(GncOptimizer, optimizeWithKnownInliers) { | |||
|   auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams); | ||||
| 
 | ||||
|   Values gnc_result = gnc.optimize(); | ||||
|   CHECK(assert_equal(Point2(0.0,0.0), gnc_result.at<Point2>(X(1)), 1e-3)); | ||||
|   CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3)); | ||||
| 
 | ||||
|   // check weights were actually fixed:
 | ||||
|   Vector finalWeights = gnc.getWeights(); | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue