Merge pull request #684 from borglab/feature/gncImprovements
changed barcsq to a vector to allow each factor to have a different inlier thresholdrelease/4.3a0
commit
8261326e78
|
@ -28,8 +28,17 @@
|
|||
|
||||
#include <gtsam/nonlinear/GncParams.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <boost/math/distributions/chi_squared.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
/*
|
||||
* Quantile of chi-squared distribution with given degrees of freedom at probability alpha.
|
||||
* Equivalent to chi2inv in Matlab.
|
||||
*/
|
||||
static double Chi2inv(const double alpha, const size_t dofs) {
|
||||
boost::math::chi_squared_distribution<double> chi2(dofs);
|
||||
return boost::math::quantile(chi2, alpha);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class GncParameters>
|
||||
|
@ -43,6 +52,7 @@ class GncOptimizer {
|
|||
Values state_; ///< Initial values to be used at each iteration by GNC.
|
||||
GncParameters params_; ///< GNC parameters.
|
||||
Vector weights_; ///< Weights associated to each factor in GNC (this could be a local variable in optimize, but it is useful to make it accessible from outside).
|
||||
Vector barcSq_; ///< Inlier thresholds. A factor is considered an inlier if factor.error() < barcSq_[i] (where i is the position of the factor in the factor graph. Note that factor.error() whitens by the covariance.
|
||||
|
||||
public:
|
||||
/// Constructor.
|
||||
|
@ -63,6 +73,40 @@ class GncOptimizer {
|
|||
nfg_[i] = robust ? factor-> cloneWithNewNoiseModel(robust->noise()) : factor;
|
||||
}
|
||||
}
|
||||
|
||||
// set default barcSq_ (inlier threshold)
|
||||
double alpha = 0.99; // with this (default) probability, inlier residuals are smaller than barcSq_
|
||||
setInlierCostThresholdsAtProbability(alpha);
|
||||
}
|
||||
|
||||
/** Set the maximum weighted residual error for an inlier (same for all factors). For a factor in the form f(x) = 0.5 * || r(x) ||^2_Omega,
|
||||
* the inlier threshold is the largest value of f(x) for the corresponding measurement to be considered an inlier.
|
||||
* In other words, an inlier at x is such that 0.5 * || r(x) ||^2_Omega <= barcSq.
|
||||
* Assuming an isotropic measurement covariance sigma^2 * Identity, the cost becomes: 0.5 * 1/sigma^2 || r(x) ||^2 <= barcSq.
|
||||
* Hence || r(x) ||^2 <= 2 * barcSq * sigma^2.
|
||||
* */
|
||||
void setInlierCostThresholds(const double inth) {
|
||||
barcSq_ = inth * Vector::Ones(nfg_.size());
|
||||
}
|
||||
|
||||
/** Set the maximum weighted residual error for an inlier (one for each factor). For a factor in the form f(x) = 0.5 * || r(x) ||^2_Omega,
|
||||
* the inlier threshold is the largest value of f(x) for the corresponding measurement to be considered an inlier.
|
||||
* In other words, an inlier at x is such that 0.5 * || r(x) ||^2_Omega <= barcSq.
|
||||
* */
|
||||
void setInlierCostThresholds(const Vector& inthVec) {
|
||||
barcSq_ = inthVec;
|
||||
}
|
||||
|
||||
/** Set the maximum weighted residual error threshold by specifying the probability
|
||||
* alpha that the inlier residuals are smaller than that threshold
|
||||
* */
|
||||
void setInlierCostThresholdsAtProbability(const double alpha) {
|
||||
barcSq_ = Vector::Ones(nfg_.size()); // initialize
|
||||
for (size_t k = 0; k < nfg_.size(); k++) {
|
||||
if (nfg_[k]) {
|
||||
barcSq_[k] = 0.5 * Chi2inv(alpha, nfg_[k]->dim()); // 0.5 derives from the error definition in gtsam
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Access a copy of the internal factor graph.
|
||||
|
@ -77,6 +121,17 @@ class GncOptimizer {
|
|||
/// Access a copy of the GNC weights.
|
||||
const Vector& getWeights() const { return weights_;}
|
||||
|
||||
/// Get the inlier threshold.
|
||||
const Vector& getInlierCostThresholds() const {return barcSq_;}
|
||||
|
||||
/// Equals.
|
||||
bool equals(const GncOptimizer& other, double tol = 1e-9) const {
|
||||
return nfg_.equals(other.getFactors())
|
||||
&& equal(weights_, other.getWeights())
|
||||
&& params_.equals(other.getParams())
|
||||
&& equal(barcSq_, other.getInlierCostThresholds());
|
||||
}
|
||||
|
||||
/// Compute optimal solution using graduated non-convexity.
|
||||
Values optimize() {
|
||||
// start by assuming all measurements are inliers
|
||||
|
@ -153,28 +208,38 @@ class GncOptimizer {
|
|||
|
||||
/// Initialize the gnc parameter mu such that loss is approximately convex (remark 5 in GNC paper).
|
||||
double initializeMu() const {
|
||||
// compute largest error across all factors
|
||||
double rmax_sq = 0.0;
|
||||
for (size_t i = 0; i < nfg_.size(); i++) {
|
||||
if (nfg_[i]) {
|
||||
rmax_sq = std::max(rmax_sq, nfg_[i]->error(state_));
|
||||
}
|
||||
}
|
||||
// set initial mu
|
||||
|
||||
double mu_init = 0.0;
|
||||
// initialize mu to the value specified in Remark 5 in GNC paper.
|
||||
switch (params_.lossType) {
|
||||
case GncLossType::GM:
|
||||
// surrogate cost is convex for large mu
|
||||
return 2 * rmax_sq / params_.barcSq; // initial mu
|
||||
/* surrogate cost is convex for large mu. initialize as in remark 5 in GNC paper.
|
||||
Since barcSq_ can be different for each factor, we compute the max of the quantity in remark 5 in GNC paper
|
||||
*/
|
||||
for (size_t k = 0; k < nfg_.size(); k++) {
|
||||
if (nfg_[k]) {
|
||||
mu_init = std::max(mu_init, 2 * nfg_[k]->error(state_) / barcSq_[k]);
|
||||
}
|
||||
}
|
||||
return mu_init; // initial mu
|
||||
case GncLossType::TLS:
|
||||
/* initialize mu to the value specified in Remark 5 in GNC paper.
|
||||
surrogate cost is convex for mu close to zero
|
||||
/* surrogate cost is convex for mu close to zero. initialize as in remark 5 in GNC paper.
|
||||
degenerate case: 2 * rmax_sq - params_.barcSq < 0 (handled in the main loop)
|
||||
according to remark mu = params_.barcSq / (2 * rmax_sq - params_.barcSq) = params_.barcSq/ excessResidual
|
||||
however, if the denominator is 0 or negative, we return mu = -1 which leads to termination of the main GNC loop
|
||||
however, if the denominator is 0 or negative, we return mu = -1 which leads to termination of the main GNC loop.
|
||||
Since barcSq_ can be different for each factor, we look for the minimimum (positive) quantity in remark 5 in GNC paper
|
||||
*/
|
||||
return
|
||||
(2 * rmax_sq - params_.barcSq) > 0 ?
|
||||
params_.barcSq / (2 * rmax_sq - params_.barcSq) : -1;
|
||||
mu_init = std::numeric_limits<double>::infinity();
|
||||
for (size_t k = 0; k < nfg_.size(); k++) {
|
||||
if (nfg_[k]) {
|
||||
double rk = nfg_[k]->error(state_);
|
||||
mu_init = (2 * rk - barcSq_[k]) > 0 ? // if positive, update mu, otherwise keep same
|
||||
std::min(mu_init, barcSq_[k] / (2 * rk - barcSq_[k]) ) : mu_init;
|
||||
}
|
||||
}
|
||||
return mu_init > 0 && !std::isinf(mu_init) ? mu_init : -1; // if mu <= 0 or mu = inf, return -1,
|
||||
// which leads to termination of the main gnc loop. In this case, all residuals are already below the threshold
|
||||
// and there is no need to robustify (TLS = least squares)
|
||||
default:
|
||||
throw std::runtime_error(
|
||||
"GncOptimizer::initializeMu: called with unknown loss type.");
|
||||
|
@ -305,14 +370,14 @@ class GncOptimizer {
|
|||
if (nfg_[k]) {
|
||||
double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual
|
||||
weights[k] = std::pow(
|
||||
(mu * params_.barcSq) / (u2_k + mu * params_.barcSq), 2);
|
||||
(mu * barcSq_[k]) / (u2_k + mu * barcSq_[k]), 2);
|
||||
}
|
||||
}
|
||||
return weights;
|
||||
}
|
||||
case GncLossType::TLS: { // use eq (14) in GNC paper
|
||||
double upperbound = (mu + 1) / mu * params_.barcSq;
|
||||
double lowerbound = mu / (mu + 1) * params_.barcSq;
|
||||
double upperbound = (mu + 1) / mu * barcSq_.maxCoeff();
|
||||
double lowerbound = mu / (mu + 1) * barcSq_.minCoeff();
|
||||
for (size_t k : unknownWeights) {
|
||||
if (nfg_[k]) {
|
||||
double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual
|
||||
|
@ -321,7 +386,7 @@ class GncOptimizer {
|
|||
} else if (u2_k <= lowerbound) {
|
||||
weights[k] = 1;
|
||||
} else {
|
||||
weights[k] = std::sqrt(params_.barcSq * mu * (mu + 1) / u2_k)
|
||||
weights[k] = std::sqrt(barcSq_[k] * mu * (mu + 1) / u2_k)
|
||||
- mu;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,7 +66,6 @@ class GncParams {
|
|||
/// any other specific GNC parameters:
|
||||
GncLossType lossType = TLS; ///< Default loss
|
||||
size_t maxIterations = 100; ///< Maximum number of iterations
|
||||
double barcSq = 1.0; ///< A factor is considered an inlier if factor.error() < barcSq. Note that factor.error() whitens by the covariance
|
||||
double muStep = 1.4; ///< Multiplicative factor to reduce/increase the mu in gnc
|
||||
double relativeCostTol = 1e-5; ///< If relative cost change is below this threshold, stop iterating
|
||||
double weightsTol = 1e-4; ///< If the weights are within weightsTol from being binary, stop iterating (only for TLS)
|
||||
|
@ -86,16 +85,6 @@ class GncParams {
|
|||
maxIterations = maxIter;
|
||||
}
|
||||
|
||||
/** Set the maximum weighted residual error for an inlier. For a factor in the form f(x) = 0.5 * || r(x) ||^2_Omega,
|
||||
* the inlier threshold is the largest value of f(x) for the corresponding measurement to be considered an inlier.
|
||||
* In other words, an inlier at x is such that 0.5 * || r(x) ||^2_Omega <= barcSq.
|
||||
* Assuming a isotropic measurement covariance sigma^2 * Identity, the cost becomes: 0.5 * 1/sigma^2 || r(x) ||^2 <= barcSq.
|
||||
* Hence || r(x) ||^2 <= 2 * barcSq * sigma^2.
|
||||
* */
|
||||
void setInlierCostThreshold(const double inth) {
|
||||
barcSq = inth;
|
||||
}
|
||||
|
||||
/// Set the graduated non-convexity step: at each GNC iteration, mu is updated as mu <- mu * muStep.
|
||||
void setMuStep(const double step) {
|
||||
muStep = step;
|
||||
|
@ -131,7 +120,6 @@ class GncParams {
|
|||
bool equals(const GncParams& other, double tol = 1e-9) const {
|
||||
return baseOptimizerParams.equals(other.baseOptimizerParams)
|
||||
&& lossType == other.lossType && maxIterations == other.maxIterations
|
||||
&& std::fabs(barcSq - other.barcSq) <= tol
|
||||
&& std::fabs(muStep - other.muStep) <= tol
|
||||
&& verbosity == other.verbosity && knownInliers == other.knownInliers;
|
||||
}
|
||||
|
@ -150,7 +138,6 @@ class GncParams {
|
|||
throw std::runtime_error("GncParams::print: unknown loss type.");
|
||||
}
|
||||
std::cout << "maxIterations: " << maxIterations << "\n";
|
||||
std::cout << "barcSq: " << barcSq << "\n";
|
||||
std::cout << "muStep: " << muStep << "\n";
|
||||
std::cout << "relativeCostTol: " << relativeCostTol << "\n";
|
||||
std::cout << "weightsTol: " << weightsTol << "\n";
|
||||
|
|
|
@ -33,6 +33,9 @@
|
|||
#include <gtsam/slam/dataset.h>
|
||||
#include <tests/smallExample.h>
|
||||
|
||||
#include <gtsam/sam/BearingFactor.h>
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
|
@ -87,6 +90,12 @@ TEST(GncOptimizer, gncConstructor) {
|
|||
CHECK(gnc.getFactors().equals(fg));
|
||||
CHECK(gnc.getState().equals(initial));
|
||||
CHECK(gnc.getParams().equals(gncParams));
|
||||
|
||||
auto gnc2 = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||
gncParams);
|
||||
|
||||
// check the equal works
|
||||
CHECK(gnc.equals(gnc2));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -122,6 +131,7 @@ TEST(GncOptimizer, initializeMu) {
|
|||
gncParams.setLossType(GncLossType::GM);
|
||||
auto gnc_gm = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||
gncParams);
|
||||
gnc_gm.setInlierCostThresholds(1.0);
|
||||
// according to rmk 5 in the gnc paper: m0 = 2 rmax^2 / barcSq
|
||||
// (barcSq=1 in this example)
|
||||
EXPECT_DOUBLES_EQUAL(gnc_gm.initializeMu(), 2 * 198.999, 1e-3);
|
||||
|
@ -130,6 +140,7 @@ TEST(GncOptimizer, initializeMu) {
|
|||
gncParams.setLossType(GncLossType::TLS);
|
||||
auto gnc_tls = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||
gncParams);
|
||||
gnc_tls.setInlierCostThresholds(1.0);
|
||||
// according to rmk 5 in the gnc paper: m0 = barcSq / (2 * rmax^2 - barcSq)
|
||||
// (barcSq=1 in this example)
|
||||
EXPECT_DOUBLES_EQUAL(gnc_tls.initializeMu(), 1 / (2 * 198.999 - 1), 1e-3);
|
||||
|
@ -333,6 +344,7 @@ TEST(GncOptimizer, calculateWeightsGM) {
|
|||
GncParams<GaussNewtonParams> gncParams(gnParams);
|
||||
gncParams.setLossType(GncLossType::GM);
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams);
|
||||
gnc.setInlierCostThresholds(1.0);
|
||||
double mu = 1.0;
|
||||
Vector weights_actual = gnc.calculateWeights(initial, mu);
|
||||
CHECK(assert_equal(weights_expected, weights_actual, tol));
|
||||
|
@ -340,9 +352,10 @@ TEST(GncOptimizer, calculateWeightsGM) {
|
|||
mu = 2.0;
|
||||
double barcSq = 5.0;
|
||||
weights_expected[3] = std::pow(mu * barcSq / (50.0 + mu * barcSq), 2); // outlier, error = 50
|
||||
gncParams.setInlierCostThreshold(barcSq);
|
||||
|
||||
auto gnc2 = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial,
|
||||
gncParams);
|
||||
gnc2.setInlierCostThresholds(barcSq);
|
||||
weights_actual = gnc2.calculateWeights(initial, mu);
|
||||
CHECK(assert_equal(weights_expected, weights_actual, tol));
|
||||
}
|
||||
|
@ -398,9 +411,10 @@ TEST(GncOptimizer, calculateWeightsTLS2) {
|
|||
GaussNewtonParams gnParams;
|
||||
GncParams<GaussNewtonParams> gncParams(gnParams);
|
||||
gncParams.setLossType(GncLossType::TLS);
|
||||
gncParams.setInlierCostThreshold(0.51); // if inlier threshold is slightly larger than 0.5, then measurement is inlier
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(nfg, initial,
|
||||
gncParams);
|
||||
gnc.setInlierCostThresholds(0.51); // if inlier threshold is slightly larger than 0.5, then measurement is inlier
|
||||
|
||||
double mu = 1e6;
|
||||
Vector weights_actual = gnc.calculateWeights(initial, mu);
|
||||
CHECK(assert_equal(weights_expected, weights_actual, tol));
|
||||
|
@ -414,9 +428,9 @@ TEST(GncOptimizer, calculateWeightsTLS2) {
|
|||
GaussNewtonParams gnParams;
|
||||
GncParams<GaussNewtonParams> gncParams(gnParams);
|
||||
gncParams.setLossType(GncLossType::TLS);
|
||||
gncParams.setInlierCostThreshold(0.49); // if inlier threshold is slightly below 0.5, then measurement is outlier
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(nfg, initial,
|
||||
gncParams);
|
||||
gnc.setInlierCostThresholds(0.49); // if inlier threshold is slightly below 0.5, then measurement is outlier
|
||||
double mu = 1e6; // very large mu recovers original TLS cost
|
||||
Vector weights_actual = gnc.calculateWeights(initial, mu);
|
||||
CHECK(assert_equal(weights_expected, weights_actual, tol));
|
||||
|
@ -430,9 +444,9 @@ TEST(GncOptimizer, calculateWeightsTLS2) {
|
|||
GaussNewtonParams gnParams;
|
||||
GncParams<GaussNewtonParams> gncParams(gnParams);
|
||||
gncParams.setLossType(GncLossType::TLS);
|
||||
gncParams.setInlierCostThreshold(0.5); // if inlier threshold is slightly below 0.5, then measurement is outlier
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(nfg, initial,
|
||||
gncParams);
|
||||
gnc.setInlierCostThresholds(0.5); // if inlier threshold is slightly below 0.5, then measurement is outlier
|
||||
double mu = 1e6; // very large mu recovers original TLS cost
|
||||
Vector weights_actual = gnc.calculateWeights(initial, mu);
|
||||
CHECK(assert_equal(weights_expected, weights_actual, 1e-5));
|
||||
|
@ -542,7 +556,7 @@ TEST(GncOptimizer, optimizeWithKnownInliers) {
|
|||
//gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial,
|
||||
gncParams);
|
||||
|
||||
gnc.setInlierCostThresholds(1.0);
|
||||
Values gnc_result = gnc.optimize();
|
||||
CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
|
||||
|
||||
|
@ -576,9 +590,9 @@ TEST(GncOptimizer, optimizeWithKnownInliers) {
|
|||
gncParams.setKnownInliers(knownInliers);
|
||||
gncParams.setLossType(GncLossType::TLS);
|
||||
//gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::VALUES);
|
||||
gncParams.setInlierCostThreshold(100.0);
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial,
|
||||
gncParams);
|
||||
gnc.setInlierCostThresholds(100.0);
|
||||
|
||||
Values gnc_result = gnc.optimize();
|
||||
CHECK(assert_equal(Point2(0.25, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
|
||||
|
@ -592,6 +606,109 @@ TEST(GncOptimizer, optimizeWithKnownInliers) {
|
|||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, chi2inv) {
|
||||
DOUBLES_EQUAL(8.807468393511950, Chi2inv(0.997, 1), tol); // from MATLAB: chi2inv(0.997, 1) = 8.807468393511950
|
||||
DOUBLES_EQUAL(13.931422665512077, Chi2inv(0.997, 3), tol); // from MATLAB: chi2inv(0.997, 3) = 13.931422665512077
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, barcsq) {
|
||||
auto fg = example::sharedNonRobustFactorGraphWithOutliers();
|
||||
|
||||
Point2 p0(1, 0);
|
||||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
std::vector<size_t> knownInliers;
|
||||
knownInliers.push_back(0);
|
||||
knownInliers.push_back(1);
|
||||
knownInliers.push_back(2);
|
||||
|
||||
GncParams<GaussNewtonParams> gncParams;
|
||||
gncParams.setKnownInliers(knownInliers);
|
||||
gncParams.setLossType(GncLossType::GM);
|
||||
//gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial,
|
||||
gncParams);
|
||||
// expected: chi2inv(0.99, 2)/2
|
||||
CHECK(assert_equal(4.605170185988091 * Vector::Ones(fg.size()), gnc.getInlierCostThresholds(), 1e-3));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, barcsq_heterogeneousFactors) {
|
||||
NonlinearFactorGraph fg;
|
||||
// specify noise model, otherwise it segfault if we leave default noise model
|
||||
SharedNoiseModel model3D(noiseModel::Isotropic::Sigma(3, 0.5));
|
||||
fg.add( PriorFactor<Pose2>( 0, Pose2(0.0, 0.0, 0.0) , model3D )); // size 3
|
||||
SharedNoiseModel model2D(noiseModel::Isotropic::Sigma(2, 0.5));
|
||||
fg.add( PriorFactor<Point2>( 1, Point2(0.0,0.0), model2D )); // size 2
|
||||
SharedNoiseModel model1D(noiseModel::Isotropic::Sigma(1, 0.5));
|
||||
fg.add( BearingFactor<Pose2, Point2>( 0, 1, 1.0, model1D) ); // size 1
|
||||
|
||||
Values initial;
|
||||
initial.insert(0, Pose2(0.0, 0.0, 0.0));
|
||||
initial.insert(1, Point2(0.0,0.0));
|
||||
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial);
|
||||
CHECK(assert_equal(Vector3(5.672433365072185, 4.605170185988091, 3.317448300510607),
|
||||
gnc.getInlierCostThresholds(), 1e-3));
|
||||
|
||||
// extra test:
|
||||
// fg.add( PriorFactor<Pose2>( 0, Pose2(0.0, 0.0, 0.0) )); // works if we add model3D as noise model
|
||||
// std::cout << "fg[3]->dim() " << fg[3]->dim() << std::endl; // this segfaults?
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, setWeights) {
|
||||
auto fg = example::sharedNonRobustFactorGraphWithOutliers();
|
||||
|
||||
Point2 p0(1, 0);
|
||||
Values initial;
|
||||
initial.insert(X(1), p0);
|
||||
|
||||
std::vector<size_t> knownInliers;
|
||||
knownInliers.push_back(0);
|
||||
knownInliers.push_back(1);
|
||||
knownInliers.push_back(2);
|
||||
{
|
||||
GncParams<GaussNewtonParams> gncParams;
|
||||
gncParams.setKnownInliers(knownInliers);
|
||||
gncParams.setLossType(GncLossType::GM);
|
||||
//gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial,
|
||||
gncParams);
|
||||
gnc.setInlierCostThresholds(2.0);
|
||||
Values gnc_result = gnc.optimize();
|
||||
CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
|
||||
|
||||
// check weights were actually fixed:
|
||||
Vector finalWeights = gnc.getWeights();
|
||||
DOUBLES_EQUAL(1.0, finalWeights[0], tol);
|
||||
DOUBLES_EQUAL(1.0, finalWeights[1], tol);
|
||||
DOUBLES_EQUAL(1.0, finalWeights[2], tol);
|
||||
CHECK(assert_equal(2.0 * Vector::Ones(fg.size()), gnc.getInlierCostThresholds(), 1e-3));
|
||||
}
|
||||
{
|
||||
GncParams<GaussNewtonParams> gncParams;
|
||||
gncParams.setKnownInliers(knownInliers);
|
||||
gncParams.setLossType(GncLossType::GM);
|
||||
//gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
|
||||
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial,
|
||||
gncParams);
|
||||
gnc.setInlierCostThresholds(2.0 * Vector::Ones(fg.size()));
|
||||
Values gnc_result = gnc.optimize();
|
||||
CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
|
||||
|
||||
// check weights were actually fixed:
|
||||
Vector finalWeights = gnc.getWeights();
|
||||
DOUBLES_EQUAL(1.0, finalWeights[0], tol);
|
||||
DOUBLES_EQUAL(1.0, finalWeights[1], tol);
|
||||
DOUBLES_EQUAL(1.0, finalWeights[2], tol);
|
||||
CHECK(assert_equal(2.0 * Vector::Ones(fg.size()), gnc.getInlierCostThresholds(), 1e-3));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GncOptimizer, optimizeSmallPoseGraph) {
|
||||
/// load small pose graph
|
||||
|
|
Loading…
Reference in New Issue