commit
90e22cf0dd
|
@ -28,6 +28,8 @@ class GaussNewtonOptimizer;
|
||||||
* NonlinearOptimizationParams.
|
* NonlinearOptimizationParams.
|
||||||
*/
|
*/
|
||||||
class GTSAM_EXPORT GaussNewtonParams : public NonlinearOptimizerParams {
|
class GTSAM_EXPORT GaussNewtonParams : public NonlinearOptimizerParams {
|
||||||
|
public:
|
||||||
|
using OptimizerType = GaussNewtonOptimizer;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -0,0 +1,338 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file GncOptimizer.h
|
||||||
|
* @brief The GncOptimizer class
|
||||||
|
* @author Jingnan Shi
|
||||||
|
* @author Luca Carlone
|
||||||
|
* @author Frank Dellaert
|
||||||
|
*
|
||||||
|
* Implementation of the paper: Yang, Antonante, Tzoumas, Carlone, "Graduated Non-Convexity for Robust Spatial Perception:
|
||||||
|
* From Non-Minimal Solvers to Global Outlier Rejection", ICRA/RAL, 2020. (arxiv version: https://arxiv.org/pdf/1909.08605.pdf)
|
||||||
|
*
|
||||||
|
* See also:
|
||||||
|
* Antonante, Tzoumas, Yang, Carlone, "Outlier-Robust Estimation: Hardness, Minimally-Tuned Algorithms, and Applications",
|
||||||
|
* arxiv: https://arxiv.org/pdf/2007.15109.pdf, 2020.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <gtsam/nonlinear/GncParams.h>
|
||||||
|
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||||
|
|
||||||
|
namespace gtsam {
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
template<class GncParameters>
|
||||||
|
class GncOptimizer {
|
||||||
|
public:
|
||||||
|
/// For each parameter, specify the corresponding optimizer: e.g., GaussNewtonParams -> GaussNewtonOptimizer.
|
||||||
|
typedef typename GncParameters::OptimizerType BaseOptimizer;
|
||||||
|
|
||||||
|
private:
|
||||||
|
NonlinearFactorGraph nfg_; ///< Original factor graph to be solved by GNC.
|
||||||
|
Values state_; ///< Initial values to be used at each iteration by GNC.
|
||||||
|
GncParameters params_; ///< GNC parameters.
|
||||||
|
Vector weights_; ///< Weights associated to each factor in GNC (this could be a local variable in optimize, but it is useful to make it accessible from outside).
|
||||||
|
|
||||||
|
public:
|
||||||
|
/// Constructor.
|
||||||
|
GncOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
|
||||||
|
const GncParameters& params = GncParameters())
|
||||||
|
: state_(initialValues),
|
||||||
|
params_(params) {
|
||||||
|
|
||||||
|
// make sure all noiseModels are Gaussian or convert to Gaussian
|
||||||
|
nfg_.resize(graph.size());
|
||||||
|
for (size_t i = 0; i < graph.size(); i++) {
|
||||||
|
if (graph[i]) {
|
||||||
|
NoiseModelFactor::shared_ptr factor = boost::dynamic_pointer_cast<
|
||||||
|
NoiseModelFactor>(graph[i]);
|
||||||
|
auto robust = boost::dynamic_pointer_cast<
|
||||||
|
noiseModel::Robust>(factor->noiseModel());
|
||||||
|
// if the factor has a robust loss, we remove the robust loss
|
||||||
|
nfg_[i] = robust ? factor-> cloneWithNewNoiseModel(robust->noise()) : factor;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Access a copy of the internal factor graph.
|
||||||
|
const NonlinearFactorGraph& getFactors() const { return nfg_; }
|
||||||
|
|
||||||
|
/// Access a copy of the internal values.
|
||||||
|
const Values& getState() const { return state_; }
|
||||||
|
|
||||||
|
/// Access a copy of the parameters.
|
||||||
|
const GncParameters& getParams() const { return params_;}
|
||||||
|
|
||||||
|
/// Access a copy of the GNC weights.
|
||||||
|
const Vector& getWeights() const { return weights_;}
|
||||||
|
|
||||||
|
/// Compute optimal solution using graduated non-convexity.
|
||||||
|
Values optimize() {
|
||||||
|
// start by assuming all measurements are inliers
|
||||||
|
weights_ = Vector::Ones(nfg_.size());
|
||||||
|
BaseOptimizer baseOptimizer(nfg_, state_);
|
||||||
|
Values result = baseOptimizer.optimize();
|
||||||
|
double mu = initializeMu();
|
||||||
|
double prev_cost = nfg_.error(result);
|
||||||
|
double cost = 0.0; // this will be updated in the main loop
|
||||||
|
|
||||||
|
// handle the degenerate case that corresponds to small
|
||||||
|
// maximum residual errors at initialization
|
||||||
|
// For GM: if residual error is small, mu -> 0
|
||||||
|
// For TLS: if residual error is small, mu -> -1
|
||||||
|
if (mu <= 0) {
|
||||||
|
if (params_.verbosity >= GncParameters::Verbosity::SUMMARY) {
|
||||||
|
std::cout << "GNC Optimizer stopped because maximum residual at "
|
||||||
|
"initialization is small."
|
||||||
|
<< std::endl;
|
||||||
|
}
|
||||||
|
if (params_.verbosity >= GncParameters::Verbosity::VALUES) {
|
||||||
|
result.print("result\n");
|
||||||
|
std::cout << "mu: " << mu << std::endl;
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t iter;
|
||||||
|
for (iter = 0; iter < params_.maxIterations; iter++) {
|
||||||
|
|
||||||
|
// display info
|
||||||
|
if (params_.verbosity >= GncParameters::Verbosity::VALUES) {
|
||||||
|
std::cout << "iter: " << iter << std::endl;
|
||||||
|
result.print("result\n");
|
||||||
|
std::cout << "mu: " << mu << std::endl;
|
||||||
|
std::cout << "weights: " << weights_ << std::endl;
|
||||||
|
}
|
||||||
|
// weights update
|
||||||
|
weights_ = calculateWeights(result, mu);
|
||||||
|
|
||||||
|
// variable/values update
|
||||||
|
NonlinearFactorGraph graph_iter = this->makeWeightedGraph(weights_);
|
||||||
|
BaseOptimizer baseOptimizer_iter(graph_iter, state_);
|
||||||
|
result = baseOptimizer_iter.optimize();
|
||||||
|
|
||||||
|
// stopping condition
|
||||||
|
cost = graph_iter.error(result);
|
||||||
|
if (checkConvergence(mu, weights_, cost, prev_cost)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// update mu
|
||||||
|
mu = updateMu(mu);
|
||||||
|
|
||||||
|
// get ready for next iteration
|
||||||
|
prev_cost = cost;
|
||||||
|
|
||||||
|
// display info
|
||||||
|
if (params_.verbosity >= GncParameters::Verbosity::VALUES) {
|
||||||
|
std::cout << "previous cost: " << prev_cost << std::endl;
|
||||||
|
std::cout << "current cost: " << cost << std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// display info
|
||||||
|
if (params_.verbosity >= GncParameters::Verbosity::SUMMARY) {
|
||||||
|
std::cout << "final iterations: " << iter << std::endl;
|
||||||
|
std::cout << "final mu: " << mu << std::endl;
|
||||||
|
std::cout << "final weights: " << weights_ << std::endl;
|
||||||
|
std::cout << "previous cost: " << prev_cost << std::endl;
|
||||||
|
std::cout << "current cost: " << cost << std::endl;
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize the gnc parameter mu such that loss is approximately convex (remark 5 in GNC paper).
|
||||||
|
double initializeMu() const {
|
||||||
|
// compute largest error across all factors
|
||||||
|
double rmax_sq = 0.0;
|
||||||
|
for (size_t i = 0; i < nfg_.size(); i++) {
|
||||||
|
if (nfg_[i]) {
|
||||||
|
rmax_sq = std::max(rmax_sq, nfg_[i]->error(state_));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// set initial mu
|
||||||
|
switch (params_.lossType) {
|
||||||
|
case GncLossType::GM:
|
||||||
|
// surrogate cost is convex for large mu
|
||||||
|
return 2 * rmax_sq / params_.barcSq; // initial mu
|
||||||
|
case GncLossType::TLS:
|
||||||
|
/* initialize mu to the value specified in Remark 5 in GNC paper.
|
||||||
|
surrogate cost is convex for mu close to zero
|
||||||
|
degenerate case: 2 * rmax_sq - params_.barcSq < 0 (handled in the main loop)
|
||||||
|
according to remark mu = params_.barcSq / (2 * rmax_sq - params_.barcSq) = params_.barcSq/ excessResidual
|
||||||
|
however, if the denominator is 0 or negative, we return mu = -1 which leads to termination of the main GNC loop
|
||||||
|
*/
|
||||||
|
return
|
||||||
|
(2 * rmax_sq - params_.barcSq) > 0 ?
|
||||||
|
params_.barcSq / (2 * rmax_sq - params_.barcSq) : -1;
|
||||||
|
default:
|
||||||
|
throw std::runtime_error(
|
||||||
|
"GncOptimizer::initializeMu: called with unknown loss type.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Update the gnc parameter mu to gradually increase nonconvexity.
|
||||||
|
double updateMu(const double mu) const {
|
||||||
|
switch (params_.lossType) {
|
||||||
|
case GncLossType::GM:
|
||||||
|
// reduce mu, but saturate at 1 (original cost is recovered for mu -> 1)
|
||||||
|
return std::max(1.0, mu / params_.muStep);
|
||||||
|
case GncLossType::TLS:
|
||||||
|
// increases mu at each iteration (original cost is recovered for mu -> inf)
|
||||||
|
return mu * params_.muStep;
|
||||||
|
default:
|
||||||
|
throw std::runtime_error(
|
||||||
|
"GncOptimizer::updateMu: called with unknown loss type.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if we have reached the value of mu for which the surrogate loss matches the original loss.
|
||||||
|
bool checkMuConvergence(const double mu) const {
|
||||||
|
bool muConverged = false;
|
||||||
|
switch (params_.lossType) {
|
||||||
|
case GncLossType::GM:
|
||||||
|
muConverged = std::fabs(mu - 1.0) < 1e-9; // mu=1 recovers the original GM function
|
||||||
|
break;
|
||||||
|
case GncLossType::TLS:
|
||||||
|
muConverged = false; // for TLS there is no stopping condition on mu (it must tend to infinity)
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw std::runtime_error(
|
||||||
|
"GncOptimizer::checkMuConvergence: called with unknown loss type.");
|
||||||
|
}
|
||||||
|
if (muConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY)
|
||||||
|
std::cout << "muConverged = true " << std::endl;
|
||||||
|
return muConverged;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check convergence of relative cost differences.
|
||||||
|
bool checkCostConvergence(const double cost, const double prev_cost) const {
|
||||||
|
bool costConverged = std::fabs(cost - prev_cost) / std::max(prev_cost, 1e-7)
|
||||||
|
< params_.relativeCostTol;
|
||||||
|
if (costConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY)
|
||||||
|
std::cout << "checkCostConvergence = true " << std::endl;
|
||||||
|
return costConverged;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check convergence of weights to binary values.
|
||||||
|
bool checkWeightsConvergence(const Vector& weights) const {
|
||||||
|
bool weightsConverged = false;
|
||||||
|
switch (params_.lossType) {
|
||||||
|
case GncLossType::GM:
|
||||||
|
weightsConverged = false; // for GM, there is no clear binary convergence for the weights
|
||||||
|
break;
|
||||||
|
case GncLossType::TLS:
|
||||||
|
weightsConverged = true;
|
||||||
|
for (size_t i = 0; i < weights.size(); i++) {
|
||||||
|
if (std::fabs(weights[i] - std::round(weights[i]))
|
||||||
|
> params_.weightsTol) {
|
||||||
|
weightsConverged = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw std::runtime_error(
|
||||||
|
"GncOptimizer::checkWeightsConvergence: called with unknown loss type.");
|
||||||
|
}
|
||||||
|
if (weightsConverged
|
||||||
|
&& params_.verbosity >= GncParameters::Verbosity::SUMMARY)
|
||||||
|
std::cout << "weightsConverged = true " << std::endl;
|
||||||
|
return weightsConverged;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check for convergence between consecutive GNC iterations.
|
||||||
|
bool checkConvergence(const double mu, const Vector& weights,
|
||||||
|
const double cost, const double prev_cost) const {
|
||||||
|
return checkCostConvergence(cost, prev_cost)
|
||||||
|
|| checkWeightsConvergence(weights) || checkMuConvergence(mu);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a graph where each factor is weighted by the gnc weights.
|
||||||
|
NonlinearFactorGraph makeWeightedGraph(const Vector& weights) const {
|
||||||
|
// make sure all noiseModels are Gaussian or convert to Gaussian
|
||||||
|
NonlinearFactorGraph newGraph;
|
||||||
|
newGraph.resize(nfg_.size());
|
||||||
|
for (size_t i = 0; i < nfg_.size(); i++) {
|
||||||
|
if (nfg_[i]) {
|
||||||
|
auto factor = boost::dynamic_pointer_cast<
|
||||||
|
NoiseModelFactor>(nfg_[i]);
|
||||||
|
auto noiseModel =
|
||||||
|
boost::dynamic_pointer_cast<noiseModel::Gaussian>(
|
||||||
|
factor->noiseModel());
|
||||||
|
if (noiseModel) {
|
||||||
|
Matrix newInfo = weights[i] * noiseModel->information();
|
||||||
|
auto newNoiseModel = noiseModel::Gaussian::Information(newInfo);
|
||||||
|
newGraph[i] = factor->cloneWithNewNoiseModel(newNoiseModel);
|
||||||
|
} else {
|
||||||
|
throw std::runtime_error(
|
||||||
|
"GncOptimizer::makeWeightedGraph: unexpected non-Gaussian noise model.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return newGraph;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Calculate gnc weights.
|
||||||
|
Vector calculateWeights(const Values& currentEstimate, const double mu) {
|
||||||
|
Vector weights = Vector::Ones(nfg_.size());
|
||||||
|
|
||||||
|
// do not update the weights that the user has decided are known inliers
|
||||||
|
std::vector<size_t> allWeights;
|
||||||
|
for (size_t k = 0; k < nfg_.size(); k++) {
|
||||||
|
allWeights.push_back(k);
|
||||||
|
}
|
||||||
|
std::vector<size_t> unknownWeights;
|
||||||
|
std::set_difference(allWeights.begin(), allWeights.end(),
|
||||||
|
params_.knownInliers.begin(),
|
||||||
|
params_.knownInliers.end(),
|
||||||
|
std::inserter(unknownWeights, unknownWeights.begin()));
|
||||||
|
|
||||||
|
// update weights of known inlier/outlier measurements
|
||||||
|
switch (params_.lossType) {
|
||||||
|
case GncLossType::GM: { // use eq (12) in GNC paper
|
||||||
|
for (size_t k : unknownWeights) {
|
||||||
|
if (nfg_[k]) {
|
||||||
|
double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual
|
||||||
|
weights[k] = std::pow(
|
||||||
|
(mu * params_.barcSq) / (u2_k + mu * params_.barcSq), 2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return weights;
|
||||||
|
}
|
||||||
|
case GncLossType::TLS: { // use eq (14) in GNC paper
|
||||||
|
double upperbound = (mu + 1) / mu * params_.barcSq;
|
||||||
|
double lowerbound = mu / (mu + 1) * params_.barcSq;
|
||||||
|
for (size_t k : unknownWeights) {
|
||||||
|
if (nfg_[k]) {
|
||||||
|
double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual
|
||||||
|
if (u2_k >= upperbound) {
|
||||||
|
weights[k] = 0;
|
||||||
|
} else if (u2_k <= lowerbound) {
|
||||||
|
weights[k] = 1;
|
||||||
|
} else {
|
||||||
|
weights[k] = std::sqrt(params_.barcSq * mu * (mu + 1) / u2_k)
|
||||||
|
- mu;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return weights;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
throw std::runtime_error(
|
||||||
|
"GncOptimizer::calculateWeights: called with unknown loss type.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,164 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file GncOptimizer.h
|
||||||
|
* @brief The GncOptimizer class
|
||||||
|
* @author Jingnan Shi
|
||||||
|
* @author Luca Carlone
|
||||||
|
* @author Frank Dellaert
|
||||||
|
*
|
||||||
|
* Implementation of the paper: Yang, Antonante, Tzoumas, Carlone, "Graduated Non-Convexity for Robust Spatial Perception:
|
||||||
|
* From Non-Minimal Solvers to Global Outlier Rejection", ICRA/RAL, 2020. (arxiv version: https://arxiv.org/pdf/1909.08605.pdf)
|
||||||
|
*
|
||||||
|
* See also:
|
||||||
|
* Antonante, Tzoumas, Yang, Carlone, "Outlier-Robust Estimation: Hardness, Minimally-Tuned Algorithms, and Applications",
|
||||||
|
* arxiv: https://arxiv.org/pdf/2007.15109.pdf, 2020.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||||
|
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
|
||||||
|
|
||||||
|
namespace gtsam {
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
/// Choice of robust loss function for GNC.
|
||||||
|
enum GncLossType {
|
||||||
|
GM /*Geman McClure*/,
|
||||||
|
TLS /*Truncated least squares*/
|
||||||
|
};
|
||||||
|
|
||||||
|
template<class BaseOptimizerParameters>
|
||||||
|
class GncParams {
|
||||||
|
public:
|
||||||
|
/// For each parameter, specify the corresponding optimizer: e.g., GaussNewtonParams -> GaussNewtonOptimizer.
|
||||||
|
typedef typename BaseOptimizerParameters::OptimizerType OptimizerType;
|
||||||
|
|
||||||
|
/// Verbosity levels
|
||||||
|
enum Verbosity {
|
||||||
|
SILENT = 0,
|
||||||
|
SUMMARY,
|
||||||
|
VALUES
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Constructor.
|
||||||
|
GncParams(const BaseOptimizerParameters& baseOptimizerParams)
|
||||||
|
: baseOptimizerParams(baseOptimizerParams) {
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Default constructor.
|
||||||
|
GncParams()
|
||||||
|
: baseOptimizerParams() {
|
||||||
|
}
|
||||||
|
|
||||||
|
/// GNC parameters.
|
||||||
|
BaseOptimizerParameters baseOptimizerParams; ///< Optimization parameters used to solve the weighted least squares problem at each GNC iteration
|
||||||
|
/// any other specific GNC parameters:
|
||||||
|
GncLossType lossType = TLS; ///< Default loss
|
||||||
|
size_t maxIterations = 100; ///< Maximum number of iterations
|
||||||
|
double barcSq = 1.0; ///< A factor is considered an inlier if factor.error() < barcSq. Note that factor.error() whitens by the covariance
|
||||||
|
double muStep = 1.4; ///< Multiplicative factor to reduce/increase the mu in gnc
|
||||||
|
double relativeCostTol = 1e-5; ///< If relative cost change is below this threshold, stop iterating
|
||||||
|
double weightsTol = 1e-4; ///< If the weights are within weightsTol from being binary, stop iterating (only for TLS)
|
||||||
|
Verbosity verbosity = SILENT; ///< Verbosity level
|
||||||
|
std::vector<size_t> knownInliers = std::vector<size_t>(); ///< Slots in the factor graph corresponding to measurements that we know are inliers
|
||||||
|
|
||||||
|
/// Set the robust loss function to be used in GNC (chosen among the ones in GncLossType).
|
||||||
|
void setLossType(const GncLossType type) {
|
||||||
|
lossType = type;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the maximum number of iterations in GNC (changing the max nr of iters might lead to less accurate solutions and is not recommended).
|
||||||
|
void setMaxIterations(const size_t maxIter) {
|
||||||
|
std::cout
|
||||||
|
<< "setMaxIterations: changing the max nr of iters might lead to less accurate solutions and is not recommended! "
|
||||||
|
<< std::endl;
|
||||||
|
maxIterations = maxIter;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Set the maximum weighted residual error for an inlier. For a factor in the form f(x) = 0.5 * || r(x) ||^2_Omega,
|
||||||
|
* the inlier threshold is the largest value of f(x) for the corresponding measurement to be considered an inlier.
|
||||||
|
* In other words, an inlier at x is such that 0.5 * || r(x) ||^2_Omega <= barcSq.
|
||||||
|
* Assuming a isotropic measurement covariance sigma^2 * Identity, the cost becomes: 0.5 * 1/sigma^2 || r(x) ||^2 <= barcSq.
|
||||||
|
* Hence || r(x) ||^2 <= 2 * barcSq * sigma^2.
|
||||||
|
* */
|
||||||
|
void setInlierCostThreshold(const double inth) {
|
||||||
|
barcSq = inth;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the graduated non-convexity step: at each GNC iteration, mu is updated as mu <- mu * muStep.
|
||||||
|
void setMuStep(const double step) {
|
||||||
|
muStep = step;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the maximum relative difference in mu values to stop iterating.
|
||||||
|
void setRelativeCostTol(double value) {
|
||||||
|
relativeCostTol = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the maximum difference between the weights and their rounding in {0,1} to stop iterating.
|
||||||
|
void setWeightsTol(double value) {
|
||||||
|
weightsTol = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the verbosity level.
|
||||||
|
void setVerbosityGNC(const Verbosity value) {
|
||||||
|
verbosity = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** (Optional) Provide a vector of measurements that must be considered inliers. The enties in the vector
|
||||||
|
* corresponds to the slots in the factor graph. For instance, if you have a nonlinear factor graph nfg,
|
||||||
|
* and you provide knownIn = {0, 2, 15}, GNC will not apply outlier rejection to nfg[0], nfg[2], and nfg[15].
|
||||||
|
* This functionality is commonly used in SLAM when one may assume the odometry is outlier free, and
|
||||||
|
* only apply GNC to prune outliers from the loop closures.
|
||||||
|
* */
|
||||||
|
void setKnownInliers(const std::vector<size_t>& knownIn) {
|
||||||
|
for (size_t i = 0; i < knownIn.size(); i++)
|
||||||
|
knownInliers.push_back(knownIn[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Equals.
|
||||||
|
bool equals(const GncParams& other, double tol = 1e-9) const {
|
||||||
|
return baseOptimizerParams.equals(other.baseOptimizerParams)
|
||||||
|
&& lossType == other.lossType && maxIterations == other.maxIterations
|
||||||
|
&& std::fabs(barcSq - other.barcSq) <= tol
|
||||||
|
&& std::fabs(muStep - other.muStep) <= tol
|
||||||
|
&& verbosity == other.verbosity && knownInliers == other.knownInliers;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Print.
|
||||||
|
void print(const std::string& str) const {
|
||||||
|
std::cout << str << "\n";
|
||||||
|
switch (lossType) {
|
||||||
|
case GM:
|
||||||
|
std::cout << "lossType: Geman McClure" << "\n";
|
||||||
|
break;
|
||||||
|
case TLS:
|
||||||
|
std::cout << "lossType: Truncated Least-squares" << "\n";
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw std::runtime_error("GncParams::print: unknown loss type.");
|
||||||
|
}
|
||||||
|
std::cout << "maxIterations: " << maxIterations << "\n";
|
||||||
|
std::cout << "barcSq: " << barcSq << "\n";
|
||||||
|
std::cout << "muStep: " << muStep << "\n";
|
||||||
|
std::cout << "relativeCostTol: " << relativeCostTol << "\n";
|
||||||
|
std::cout << "weightsTol: " << weightsTol << "\n";
|
||||||
|
std::cout << "verbosity: " << verbosity << "\n";
|
||||||
|
for (size_t i = 0; i < knownInliers.size(); i++)
|
||||||
|
std::cout << "knownInliers: " << knownInliers[i] << "\n";
|
||||||
|
baseOptimizerParams.print(str);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -25,6 +25,8 @@
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
|
class LevenbergMarquardtOptimizer;
|
||||||
|
|
||||||
/** Parameters for Levenberg-Marquardt optimization. Note that this parameters
|
/** Parameters for Levenberg-Marquardt optimization. Note that this parameters
|
||||||
* class inherits from NonlinearOptimizerParams, which specifies the parameters
|
* class inherits from NonlinearOptimizerParams, which specifies the parameters
|
||||||
* common to all nonlinear optimization algorithms. This class also contains
|
* common to all nonlinear optimization algorithms. This class also contains
|
||||||
|
@ -40,6 +42,7 @@ public:
|
||||||
|
|
||||||
static VerbosityLM verbosityLMTranslator(const std::string &s);
|
static VerbosityLM verbosityLMTranslator(const std::string &s);
|
||||||
static std::string verbosityLMTranslator(VerbosityLM value);
|
static std::string verbosityLMTranslator(VerbosityLM value);
|
||||||
|
using OptimizerType = LevenbergMarquardtOptimizer;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
|
|
@ -76,6 +76,14 @@ bool NoiseModelFactor::equals(const NonlinearFactor& f, double tol) const {
|
||||||
&& noiseModel_->equals(*e->noiseModel_, tol)));
|
&& noiseModel_->equals(*e->noiseModel_, tol)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
NoiseModelFactor::shared_ptr NoiseModelFactor::cloneWithNewNoiseModel(
|
||||||
|
const SharedNoiseModel newNoise) const {
|
||||||
|
NoiseModelFactor::shared_ptr new_factor = boost::dynamic_pointer_cast<NoiseModelFactor>(clone());
|
||||||
|
new_factor->noiseModel_ = newNoise;
|
||||||
|
return new_factor;
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
static void check(const SharedNoiseModel& noiseModel, size_t m) {
|
static void check(const SharedNoiseModel& noiseModel, size_t m) {
|
||||||
if (noiseModel && m != noiseModel->dim())
|
if (noiseModel && m != noiseModel->dim())
|
||||||
|
|
|
@ -244,6 +244,12 @@ public:
|
||||||
*/
|
*/
|
||||||
boost::shared_ptr<GaussianFactor> linearize(const Values& x) const override;
|
boost::shared_ptr<GaussianFactor> linearize(const Values& x) const override;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a shared_ptr clone of the
|
||||||
|
* factor with a new noise model
|
||||||
|
*/
|
||||||
|
shared_ptr cloneWithNewNoiseModel(const SharedNoiseModel newNoise) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/** Serialization function */
|
/** Serialization function */
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
|
|
|
@ -113,6 +113,17 @@ public:
|
||||||
|
|
||||||
virtual void print(const std::string& str = "") const;
|
virtual void print(const std::string& str = "") const;
|
||||||
|
|
||||||
|
bool equals(const NonlinearOptimizerParams& other, double tol = 1e-9) const {
|
||||||
|
return maxIterations == other.getMaxIterations()
|
||||||
|
&& std::abs(relativeErrorTol - other.getRelativeErrorTol()) <= tol
|
||||||
|
&& std::abs(absoluteErrorTol - other.getAbsoluteErrorTol()) <= tol
|
||||||
|
&& std::abs(errorTol - other.getErrorTol()) <= tol
|
||||||
|
&& verbosityTranslator(verbosity) == other.getVerbosity();
|
||||||
|
// && orderingType.equals(other.getOrderingType()_;
|
||||||
|
// && linearSolverType == other.getLinearSolverType();
|
||||||
|
// TODO: check ordering, iterativeParams, and iterationsHook
|
||||||
|
}
|
||||||
|
|
||||||
inline bool isMultifrontal() const {
|
inline bool isMultifrontal() const {
|
||||||
return (linearSolverType == MULTIFRONTAL_CHOLESKY)
|
return (linearSolverType == MULTIFRONTAL_CHOLESKY)
|
||||||
|| (linearSolverType == MULTIFRONTAL_QR);
|
|| (linearSolverType == MULTIFRONTAL_QR);
|
||||||
|
|
|
@ -342,10 +342,25 @@ struct UnaryFactor: public gtsam::NoiseModelFactor1<Point2> {
|
||||||
return (h(x) - z_);
|
return (h(x) - z_);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
gtsam::NonlinearFactor::shared_ptr clone() const override {
|
||||||
|
return boost::static_pointer_cast<gtsam::NonlinearFactor>(
|
||||||
|
gtsam::NonlinearFactor::shared_ptr(new UnaryFactor(*this))); }
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
inline NonlinearFactorGraph nonlinearFactorGraphWithGivenSigma(const double sigma) {
|
||||||
|
using symbol_shorthand::X;
|
||||||
|
using symbol_shorthand::L;
|
||||||
|
boost::shared_ptr<NonlinearFactorGraph> fg(new NonlinearFactorGraph);
|
||||||
|
Point2 z(1.0, 0.0);
|
||||||
|
boost::shared_ptr<smallOptimize::UnaryFactor> factor(
|
||||||
|
new smallOptimize::UnaryFactor(z, noiseModel::Isotropic::Sigma(2,sigma), X(1)));
|
||||||
|
fg->push_back(factor);
|
||||||
|
return *fg;
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
inline boost::shared_ptr<const NonlinearFactorGraph> sharedReallyNonlinearFactorGraph() {
|
inline boost::shared_ptr<const NonlinearFactorGraph> sharedReallyNonlinearFactorGraph() {
|
||||||
using symbol_shorthand::X;
|
using symbol_shorthand::X;
|
||||||
|
@ -363,6 +378,54 @@ inline NonlinearFactorGraph createReallyNonlinearFactorGraph() {
|
||||||
return *sharedReallyNonlinearFactorGraph();
|
return *sharedReallyNonlinearFactorGraph();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
inline NonlinearFactorGraph sharedNonRobustFactorGraphWithOutliers() {
|
||||||
|
using symbol_shorthand::X;
|
||||||
|
boost::shared_ptr<NonlinearFactorGraph> fg(new NonlinearFactorGraph);
|
||||||
|
Point2 z(0.0, 0.0);
|
||||||
|
double sigma = 0.1;
|
||||||
|
|
||||||
|
boost::shared_ptr<PriorFactor<Point2>> factor(
|
||||||
|
new PriorFactor<Point2>(X(1), z, noiseModel::Isotropic::Sigma(2,sigma)));
|
||||||
|
// 3 noiseless inliers
|
||||||
|
fg->push_back(factor);
|
||||||
|
fg->push_back(factor);
|
||||||
|
fg->push_back(factor);
|
||||||
|
|
||||||
|
// 1 outlier
|
||||||
|
Point2 z_out(1.0, 0.0);
|
||||||
|
boost::shared_ptr<PriorFactor<Point2>> factor_out(
|
||||||
|
new PriorFactor<Point2>(X(1), z_out, noiseModel::Isotropic::Sigma(2,sigma)));
|
||||||
|
fg->push_back(factor_out);
|
||||||
|
|
||||||
|
return *fg;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
inline NonlinearFactorGraph sharedRobustFactorGraphWithOutliers() {
|
||||||
|
using symbol_shorthand::X;
|
||||||
|
boost::shared_ptr<NonlinearFactorGraph> fg(new NonlinearFactorGraph);
|
||||||
|
Point2 z(0.0, 0.0);
|
||||||
|
double sigma = 0.1;
|
||||||
|
auto gmNoise = noiseModel::Robust::Create(
|
||||||
|
noiseModel::mEstimator::GemanMcClure::Create(1.0), noiseModel::Isotropic::Sigma(2,sigma));
|
||||||
|
boost::shared_ptr<PriorFactor<Point2>> factor(
|
||||||
|
new PriorFactor<Point2>(X(1), z, gmNoise));
|
||||||
|
// 3 noiseless inliers
|
||||||
|
fg->push_back(factor);
|
||||||
|
fg->push_back(factor);
|
||||||
|
fg->push_back(factor);
|
||||||
|
|
||||||
|
// 1 outlier
|
||||||
|
Point2 z_out(1.0, 0.0);
|
||||||
|
boost::shared_ptr<PriorFactor<Point2>> factor_out(
|
||||||
|
new PriorFactor<Point2>(X(1), z_out, gmNoise));
|
||||||
|
fg->push_back(factor_out);
|
||||||
|
|
||||||
|
return *fg;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
inline std::pair<NonlinearFactorGraph, Values> createNonlinearSmoother(int T) {
|
inline std::pair<NonlinearFactorGraph, Values> createNonlinearSmoother(int T) {
|
||||||
using namespace impl;
|
using namespace impl;
|
||||||
|
|
|
@ -0,0 +1,640 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file testGncOptimizer.cpp
|
||||||
|
* @brief Unit tests for GncOptimizer class
|
||||||
|
* @author Jingnan Shi
|
||||||
|
* @author Luca Carlone
|
||||||
|
* @author Frank Dellaert
|
||||||
|
*
|
||||||
|
* Implementation of the paper: Yang, Antonante, Tzoumas, Carlone, "Graduated
|
||||||
|
* Non-Convexity for Robust Spatial Perception: From Non-Minimal Solvers to
|
||||||
|
* Global Outlier Rejection", ICRA/RAL, 2020. (arxiv version:
|
||||||
|
* https://arxiv.org/pdf/1909.08605.pdf)
|
||||||
|
*
|
||||||
|
* See also:
|
||||||
|
* Antonante, Tzoumas, Yang, Carlone, "Outlier-Robust Estimation: Hardness,
|
||||||
|
* Minimally-Tuned Algorithms, and Applications", arxiv:
|
||||||
|
* https://arxiv.org/pdf/2007.15109.pdf, 2020.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
#include <gtsam/nonlinear/GncOptimizer.h>
|
||||||
|
#include <gtsam/nonlinear/LinearContainerFactor.h>
|
||||||
|
#include <gtsam/slam/dataset.h>
|
||||||
|
#include <tests/smallExample.h>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace gtsam;
|
||||||
|
|
||||||
|
using symbol_shorthand::L;
|
||||||
|
using symbol_shorthand::X;
|
||||||
|
static double tol = 1e-7;
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(GncOptimizer, gncParamsConstructor) {
|
||||||
|
// check params are correctly parsed
|
||||||
|
LevenbergMarquardtParams lmParams;
|
||||||
|
GncParams<LevenbergMarquardtParams> gncParams1(lmParams);
|
||||||
|
CHECK(lmParams.equals(gncParams1.baseOptimizerParams));
|
||||||
|
|
||||||
|
// check also default constructor
|
||||||
|
GncParams<LevenbergMarquardtParams> gncParams1b;
|
||||||
|
CHECK(lmParams.equals(gncParams1b.baseOptimizerParams));
|
||||||
|
|
||||||
|
// and check params become different if we change lmParams
|
||||||
|
lmParams.setVerbosity("DELTA");
|
||||||
|
CHECK(!lmParams.equals(gncParams1.baseOptimizerParams));
|
||||||
|
|
||||||
|
// and same for GN
|
||||||
|
GaussNewtonParams gnParams;
|
||||||
|
GncParams<GaussNewtonParams> gncParams2(gnParams);
|
||||||
|
CHECK(gnParams.equals(gncParams2.baseOptimizerParams));
|
||||||
|
|
||||||
|
// check default constructor
|
||||||
|
GncParams<GaussNewtonParams> gncParams2b;
|
||||||
|
CHECK(gnParams.equals(gncParams2b.baseOptimizerParams));
|
||||||
|
|
||||||
|
// change something at the gncParams level
|
||||||
|
GncParams<GaussNewtonParams> gncParams2c(gncParams2b);
|
||||||
|
gncParams2c.setLossType(GncLossType::GM);
|
||||||
|
CHECK(!gncParams2c.equals(gncParams2b.baseOptimizerParams));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(GncOptimizer, gncConstructor) {
|
||||||
|
// has to have Gaussian noise models !
|
||||||
|
auto fg = example::createReallyNonlinearFactorGraph(); // just a unary factor
|
||||||
|
// on a 2D point
|
||||||
|
|
||||||
|
Point2 p0(3, 3);
|
||||||
|
Values initial;
|
||||||
|
initial.insert(X(1), p0);
|
||||||
|
|
||||||
|
GncParams<LevenbergMarquardtParams> gncParams;
|
||||||
|
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
|
||||||
|
CHECK(gnc.getFactors().equals(fg));
|
||||||
|
CHECK(gnc.getState().equals(initial));
|
||||||
|
CHECK(gnc.getParams().equals(gncParams));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(GncOptimizer, gncConstructorWithRobustGraphAsInput) {
|
||||||
|
auto fg = example::sharedNonRobustFactorGraphWithOutliers();
|
||||||
|
// same graph with robust noise model
|
||||||
|
auto fg_robust = example::sharedRobustFactorGraphWithOutliers();
|
||||||
|
|
||||||
|
Point2 p0(3, 3);
|
||||||
|
Values initial;
|
||||||
|
initial.insert(X(1), p0);
|
||||||
|
|
||||||
|
GncParams<LevenbergMarquardtParams> gncParams;
|
||||||
|
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg_robust,
|
||||||
|
initial,
|
||||||
|
gncParams);
|
||||||
|
|
||||||
|
// make sure that when parsing the graph is transformed into one without
|
||||||
|
// robust loss
|
||||||
|
CHECK(fg.equals(gnc.getFactors()));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(GncOptimizer, initializeMu) {
|
||||||
|
auto fg = example::createReallyNonlinearFactorGraph();
|
||||||
|
|
||||||
|
Point2 p0(3, 3);
|
||||||
|
Values initial;
|
||||||
|
initial.insert(X(1), p0);
|
||||||
|
|
||||||
|
// testing GM mu initialization
|
||||||
|
GncParams<LevenbergMarquardtParams> gncParams;
|
||||||
|
gncParams.setLossType(GncLossType::GM);
|
||||||
|
auto gnc_gm = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
// according to rmk 5 in the gnc paper: m0 = 2 rmax^2 / barcSq
|
||||||
|
// (barcSq=1 in this example)
|
||||||
|
EXPECT_DOUBLES_EQUAL(gnc_gm.initializeMu(), 2 * 198.999, 1e-3);
|
||||||
|
|
||||||
|
// testing TLS mu initialization
|
||||||
|
gncParams.setLossType(GncLossType::TLS);
|
||||||
|
auto gnc_tls = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
// according to rmk 5 in the gnc paper: m0 = barcSq / (2 * rmax^2 - barcSq)
|
||||||
|
// (barcSq=1 in this example)
|
||||||
|
EXPECT_DOUBLES_EQUAL(gnc_tls.initializeMu(), 1 / (2 * 198.999 - 1), 1e-3);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(GncOptimizer, updateMuGM) {
|
||||||
|
// has to have Gaussian noise models !
|
||||||
|
auto fg = example::createReallyNonlinearFactorGraph();
|
||||||
|
|
||||||
|
Point2 p0(3, 3);
|
||||||
|
Values initial;
|
||||||
|
initial.insert(X(1), p0);
|
||||||
|
|
||||||
|
GncParams<LevenbergMarquardtParams> gncParams;
|
||||||
|
gncParams.setLossType(GncLossType::GM);
|
||||||
|
gncParams.setMuStep(1.4);
|
||||||
|
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
|
||||||
|
double mu = 5.0;
|
||||||
|
EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), mu / 1.4, tol);
|
||||||
|
|
||||||
|
// check it correctly saturates to 1 for GM
|
||||||
|
mu = 1.2;
|
||||||
|
EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), 1.0, tol);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(GncOptimizer, updateMuTLS) {
|
||||||
|
// has to have Gaussian noise models !
|
||||||
|
auto fg = example::createReallyNonlinearFactorGraph();
|
||||||
|
|
||||||
|
Point2 p0(3, 3);
|
||||||
|
Values initial;
|
||||||
|
initial.insert(X(1), p0);
|
||||||
|
|
||||||
|
GncParams<LevenbergMarquardtParams> gncParams;
|
||||||
|
gncParams.setMuStep(1.4);
|
||||||
|
gncParams.setLossType(GncLossType::TLS);
|
||||||
|
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
|
||||||
|
double mu = 5.0;
|
||||||
|
EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), mu * 1.4, tol);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(GncOptimizer, checkMuConvergence) {
|
||||||
|
// has to have Gaussian noise models !
|
||||||
|
auto fg = example::createReallyNonlinearFactorGraph();
|
||||||
|
|
||||||
|
Point2 p0(3, 3);
|
||||||
|
Values initial;
|
||||||
|
initial.insert(X(1), p0);
|
||||||
|
|
||||||
|
{
|
||||||
|
GncParams<LevenbergMarquardtParams> gncParams;
|
||||||
|
gncParams.setLossType(GncLossType::GM);
|
||||||
|
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
|
||||||
|
double mu = 1.0;
|
||||||
|
CHECK(gnc.checkMuConvergence(mu));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
GncParams<LevenbergMarquardtParams> gncParams;
|
||||||
|
gncParams.setLossType(
|
||||||
|
GncLossType::TLS);
|
||||||
|
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
|
||||||
|
double mu = 1.0;
|
||||||
|
CHECK(!gnc.checkMuConvergence(mu)); //always false for TLS
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(GncOptimizer, checkCostConvergence) {
|
||||||
|
// has to have Gaussian noise models !
|
||||||
|
auto fg = example::createReallyNonlinearFactorGraph();
|
||||||
|
|
||||||
|
Point2 p0(3, 3);
|
||||||
|
Values initial;
|
||||||
|
initial.insert(X(1), p0);
|
||||||
|
|
||||||
|
{
|
||||||
|
GncParams<LevenbergMarquardtParams> gncParams;
|
||||||
|
gncParams.setRelativeCostTol(0.49);
|
||||||
|
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
|
||||||
|
double prev_cost = 1.0;
|
||||||
|
double cost = 0.5;
|
||||||
|
// relative cost reduction = 0.5 > 0.49, hence checkCostConvergence = false
|
||||||
|
CHECK(!gnc.checkCostConvergence(cost, prev_cost));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
GncParams<LevenbergMarquardtParams> gncParams;
|
||||||
|
gncParams.setRelativeCostTol(0.51);
|
||||||
|
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
|
||||||
|
double prev_cost = 1.0;
|
||||||
|
double cost = 0.5;
|
||||||
|
// relative cost reduction = 0.5 < 0.51, hence checkCostConvergence = true
|
||||||
|
CHECK(gnc.checkCostConvergence(cost, prev_cost));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(GncOptimizer, checkWeightsConvergence) {
|
||||||
|
// has to have Gaussian noise models !
|
||||||
|
auto fg = example::createReallyNonlinearFactorGraph();
|
||||||
|
|
||||||
|
Point2 p0(3, 3);
|
||||||
|
Values initial;
|
||||||
|
initial.insert(X(1), p0);
|
||||||
|
|
||||||
|
{
|
||||||
|
GncParams<LevenbergMarquardtParams> gncParams;
|
||||||
|
gncParams.setLossType(GncLossType::GM);
|
||||||
|
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
|
||||||
|
Vector weights = Vector::Ones(fg.size());
|
||||||
|
CHECK(!gnc.checkWeightsConvergence(weights)); //always false for GM
|
||||||
|
}
|
||||||
|
{
|
||||||
|
GncParams<LevenbergMarquardtParams> gncParams;
|
||||||
|
gncParams.setLossType(
|
||||||
|
GncLossType::TLS);
|
||||||
|
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
|
||||||
|
Vector weights = Vector::Ones(fg.size());
|
||||||
|
// weights are binary, so checkWeightsConvergence = true
|
||||||
|
CHECK(gnc.checkWeightsConvergence(weights));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
GncParams<LevenbergMarquardtParams> gncParams;
|
||||||
|
gncParams.setLossType(
|
||||||
|
GncLossType::TLS);
|
||||||
|
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
|
||||||
|
Vector weights = Vector::Ones(fg.size());
|
||||||
|
weights[0] = 0.9; // more than weightsTol = 1e-4 from 1, hence checkWeightsConvergence = false
|
||||||
|
CHECK(!gnc.checkWeightsConvergence(weights));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
GncParams<LevenbergMarquardtParams> gncParams;
|
||||||
|
gncParams.setLossType(
|
||||||
|
GncLossType::TLS);
|
||||||
|
gncParams.setWeightsTol(0.1);
|
||||||
|
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
|
||||||
|
Vector weights = Vector::Ones(fg.size());
|
||||||
|
weights[0] = 0.9; // exactly weightsTol = 0.1 from 1, hence checkWeightsConvergence = true
|
||||||
|
CHECK(gnc.checkWeightsConvergence(weights));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(GncOptimizer, checkConvergenceTLS) {
|
||||||
|
// has to have Gaussian noise models !
|
||||||
|
auto fg = example::createReallyNonlinearFactorGraph();
|
||||||
|
|
||||||
|
Point2 p0(3, 3);
|
||||||
|
Values initial;
|
||||||
|
initial.insert(X(1), p0);
|
||||||
|
|
||||||
|
GncParams<LevenbergMarquardtParams> gncParams;
|
||||||
|
gncParams.setRelativeCostTol(1e-5);
|
||||||
|
gncParams.setLossType(GncLossType::TLS);
|
||||||
|
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
|
||||||
|
CHECK(gnc.checkCostConvergence(1.0, 1.0));
|
||||||
|
CHECK(!gnc.checkCostConvergence(1.0, 2.0));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(GncOptimizer, calculateWeightsGM) {
|
||||||
|
auto fg = example::sharedNonRobustFactorGraphWithOutliers();
|
||||||
|
|
||||||
|
Point2 p0(0, 0);
|
||||||
|
Values initial;
|
||||||
|
initial.insert(X(1), p0);
|
||||||
|
|
||||||
|
// we have 4 factors, 3 with zero errors (inliers), 1 with error 50 = 0.5 *
|
||||||
|
// 1/sigma^2 || [1;0] - [0;0] ||^2 (outlier)
|
||||||
|
Vector weights_expected = Vector::Zero(4);
|
||||||
|
weights_expected[0] = 1.0; // zero error
|
||||||
|
weights_expected[1] = 1.0; // zero error
|
||||||
|
weights_expected[2] = 1.0; // zero error
|
||||||
|
weights_expected[3] = std::pow(1.0 / (50.0 + 1.0), 2); // outlier, error = 50
|
||||||
|
|
||||||
|
GaussNewtonParams gnParams;
|
||||||
|
GncParams<GaussNewtonParams> gncParams(gnParams);
|
||||||
|
gncParams.setLossType(GncLossType::GM);
|
||||||
|
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams);
|
||||||
|
double mu = 1.0;
|
||||||
|
Vector weights_actual = gnc.calculateWeights(initial, mu);
|
||||||
|
CHECK(assert_equal(weights_expected, weights_actual, tol));
|
||||||
|
|
||||||
|
mu = 2.0;
|
||||||
|
double barcSq = 5.0;
|
||||||
|
weights_expected[3] = std::pow(mu * barcSq / (50.0 + mu * barcSq), 2); // outlier, error = 50
|
||||||
|
gncParams.setInlierCostThreshold(barcSq);
|
||||||
|
auto gnc2 = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
weights_actual = gnc2.calculateWeights(initial, mu);
|
||||||
|
CHECK(assert_equal(weights_expected, weights_actual, tol));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(GncOptimizer, calculateWeightsTLS) {
|
||||||
|
auto fg = example::sharedNonRobustFactorGraphWithOutliers();
|
||||||
|
|
||||||
|
Point2 p0(0, 0);
|
||||||
|
Values initial;
|
||||||
|
initial.insert(X(1), p0);
|
||||||
|
|
||||||
|
// we have 4 factors, 3 with zero errors (inliers), 1 with error
|
||||||
|
Vector weights_expected = Vector::Zero(4);
|
||||||
|
weights_expected[0] = 1.0; // zero error
|
||||||
|
weights_expected[1] = 1.0; // zero error
|
||||||
|
weights_expected[2] = 1.0; // zero error
|
||||||
|
weights_expected[3] = 0; // outliers
|
||||||
|
|
||||||
|
GaussNewtonParams gnParams;
|
||||||
|
GncParams<GaussNewtonParams> gncParams(gnParams);
|
||||||
|
gncParams.setLossType(GncLossType::TLS);
|
||||||
|
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams);
|
||||||
|
double mu = 1.0;
|
||||||
|
Vector weights_actual = gnc.calculateWeights(initial, mu);
|
||||||
|
CHECK(assert_equal(weights_expected, weights_actual, tol));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(GncOptimizer, calculateWeightsTLS2) {
|
||||||
|
|
||||||
|
// create values
|
||||||
|
Point2 x_val(0.0, 0.0);
|
||||||
|
Point2 x_prior(1.0, 0.0);
|
||||||
|
Values initial;
|
||||||
|
initial.insert(X(1), x_val);
|
||||||
|
|
||||||
|
// create very simple factor graph with a single factor 0.5 * 1/sigma^2 * || x - [1;0] ||^2
|
||||||
|
double sigma = 1;
|
||||||
|
SharedDiagonal noise = noiseModel::Diagonal::Sigmas(Vector2(sigma, sigma));
|
||||||
|
NonlinearFactorGraph nfg;
|
||||||
|
nfg.add(PriorFactor<Point2>(X(1), x_prior, noise));
|
||||||
|
|
||||||
|
// cost of the factor:
|
||||||
|
DOUBLES_EQUAL(0.5 * 1 / (sigma * sigma), nfg.error(initial), tol);
|
||||||
|
|
||||||
|
// check the TLS weights are correct: CASE 1: residual below barcsq
|
||||||
|
{
|
||||||
|
// expected:
|
||||||
|
Vector weights_expected = Vector::Zero(1);
|
||||||
|
weights_expected[0] = 1.0; // inlier
|
||||||
|
// actual:
|
||||||
|
GaussNewtonParams gnParams;
|
||||||
|
GncParams<GaussNewtonParams> gncParams(gnParams);
|
||||||
|
gncParams.setLossType(GncLossType::TLS);
|
||||||
|
gncParams.setInlierCostThreshold(0.51); // if inlier threshold is slightly larger than 0.5, then measurement is inlier
|
||||||
|
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(nfg, initial,
|
||||||
|
gncParams);
|
||||||
|
double mu = 1e6;
|
||||||
|
Vector weights_actual = gnc.calculateWeights(initial, mu);
|
||||||
|
CHECK(assert_equal(weights_expected, weights_actual, tol));
|
||||||
|
}
|
||||||
|
// check the TLS weights are correct: CASE 2: residual above barcsq
|
||||||
|
{
|
||||||
|
// expected:
|
||||||
|
Vector weights_expected = Vector::Zero(1);
|
||||||
|
weights_expected[0] = 0.0; // outlier
|
||||||
|
// actual:
|
||||||
|
GaussNewtonParams gnParams;
|
||||||
|
GncParams<GaussNewtonParams> gncParams(gnParams);
|
||||||
|
gncParams.setLossType(GncLossType::TLS);
|
||||||
|
gncParams.setInlierCostThreshold(0.49); // if inlier threshold is slightly below 0.5, then measurement is outlier
|
||||||
|
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(nfg, initial,
|
||||||
|
gncParams);
|
||||||
|
double mu = 1e6; // very large mu recovers original TLS cost
|
||||||
|
Vector weights_actual = gnc.calculateWeights(initial, mu);
|
||||||
|
CHECK(assert_equal(weights_expected, weights_actual, tol));
|
||||||
|
}
|
||||||
|
// check the TLS weights are correct: CASE 2: residual at barcsq
|
||||||
|
{
|
||||||
|
// expected:
|
||||||
|
Vector weights_expected = Vector::Zero(1);
|
||||||
|
weights_expected[0] = 0.5; // undecided
|
||||||
|
// actual:
|
||||||
|
GaussNewtonParams gnParams;
|
||||||
|
GncParams<GaussNewtonParams> gncParams(gnParams);
|
||||||
|
gncParams.setLossType(GncLossType::TLS);
|
||||||
|
gncParams.setInlierCostThreshold(0.5); // if inlier threshold is slightly below 0.5, then measurement is outlier
|
||||||
|
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(nfg, initial,
|
||||||
|
gncParams);
|
||||||
|
double mu = 1e6; // very large mu recovers original TLS cost
|
||||||
|
Vector weights_actual = gnc.calculateWeights(initial, mu);
|
||||||
|
CHECK(assert_equal(weights_expected, weights_actual, 1e-5));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(GncOptimizer, makeWeightedGraph) {
|
||||||
|
// create original factor
|
||||||
|
double sigma1 = 0.1;
|
||||||
|
NonlinearFactorGraph nfg = example::nonlinearFactorGraphWithGivenSigma(
|
||||||
|
sigma1);
|
||||||
|
|
||||||
|
// create expected
|
||||||
|
double sigma2 = 10;
|
||||||
|
NonlinearFactorGraph expected = example::nonlinearFactorGraphWithGivenSigma(
|
||||||
|
sigma2);
|
||||||
|
|
||||||
|
// create weights
|
||||||
|
Vector weights = Vector::Ones(1); // original info:1/0.1^2 = 100. New info: 1/10^2 = 0.01. Ratio is 10-4
|
||||||
|
weights[0] = 1e-4;
|
||||||
|
|
||||||
|
// create actual
|
||||||
|
Point2 p0(3, 3);
|
||||||
|
Values initial;
|
||||||
|
initial.insert(X(1), p0);
|
||||||
|
|
||||||
|
LevenbergMarquardtParams lmParams;
|
||||||
|
GncParams<LevenbergMarquardtParams> gncParams(lmParams);
|
||||||
|
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(nfg, initial,
|
||||||
|
gncParams);
|
||||||
|
NonlinearFactorGraph actual = gnc.makeWeightedGraph(weights);
|
||||||
|
|
||||||
|
// check it's all good
|
||||||
|
CHECK(assert_equal(expected, actual));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(GncOptimizer, optimizeSimple) {
|
||||||
|
auto fg = example::createReallyNonlinearFactorGraph();
|
||||||
|
|
||||||
|
Point2 p0(3, 3);
|
||||||
|
Values initial;
|
||||||
|
initial.insert(X(1), p0);
|
||||||
|
|
||||||
|
LevenbergMarquardtParams lmParams;
|
||||||
|
GncParams<LevenbergMarquardtParams> gncParams(lmParams);
|
||||||
|
auto gnc = GncOptimizer<GncParams<LevenbergMarquardtParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
|
||||||
|
Values actual = gnc.optimize();
|
||||||
|
DOUBLES_EQUAL(0, fg.error(actual), tol);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(GncOptimizer, optimize) {
|
||||||
|
auto fg = example::sharedNonRobustFactorGraphWithOutliers();
|
||||||
|
|
||||||
|
Point2 p0(1, 0);
|
||||||
|
Values initial;
|
||||||
|
initial.insert(X(1), p0);
|
||||||
|
|
||||||
|
// try with nonrobust cost function and standard GN
|
||||||
|
GaussNewtonParams gnParams;
|
||||||
|
GaussNewtonOptimizer gn(fg, initial, gnParams);
|
||||||
|
Values gn_results = gn.optimize();
|
||||||
|
// converges to incorrect point due to lack of robustness to an outlier, ideal
|
||||||
|
// solution is Point2(0,0)
|
||||||
|
CHECK(assert_equal(Point2(0.25, 0.0), gn_results.at<Point2>(X(1)), 1e-3));
|
||||||
|
|
||||||
|
// try with robust loss function and standard GN
|
||||||
|
auto fg_robust = example::sharedRobustFactorGraphWithOutliers(); // same as fg, but with
|
||||||
|
// factors wrapped in
|
||||||
|
// Geman McClure losses
|
||||||
|
GaussNewtonOptimizer gn2(fg_robust, initial, gnParams);
|
||||||
|
Values gn2_results = gn2.optimize();
|
||||||
|
// converges to incorrect point, this time due to the nonconvexity of the loss
|
||||||
|
CHECK(assert_equal(Point2(0.999706, 0.0), gn2_results.at<Point2>(X(1)), 1e-3));
|
||||||
|
|
||||||
|
// .. but graduated nonconvexity ensures both robustness and convergence in
|
||||||
|
// the face of nonconvexity
|
||||||
|
GncParams<GaussNewtonParams> gncParams(gnParams);
|
||||||
|
// gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
|
||||||
|
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial, gncParams);
|
||||||
|
Values gnc_result = gnc.optimize();
|
||||||
|
CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(GncOptimizer, optimizeWithKnownInliers) {
|
||||||
|
auto fg = example::sharedNonRobustFactorGraphWithOutliers();
|
||||||
|
|
||||||
|
Point2 p0(1, 0);
|
||||||
|
Values initial;
|
||||||
|
initial.insert(X(1), p0);
|
||||||
|
|
||||||
|
std::vector<size_t> knownInliers;
|
||||||
|
knownInliers.push_back(0);
|
||||||
|
knownInliers.push_back(1);
|
||||||
|
knownInliers.push_back(2);
|
||||||
|
|
||||||
|
// nonconvexity with known inliers
|
||||||
|
{
|
||||||
|
GncParams<GaussNewtonParams> gncParams;
|
||||||
|
gncParams.setKnownInliers(knownInliers);
|
||||||
|
gncParams.setLossType(GncLossType::GM);
|
||||||
|
//gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
|
||||||
|
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
|
||||||
|
Values gnc_result = gnc.optimize();
|
||||||
|
CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
|
||||||
|
|
||||||
|
// check weights were actually fixed:
|
||||||
|
Vector finalWeights = gnc.getWeights();
|
||||||
|
DOUBLES_EQUAL(1.0, finalWeights[0], tol);
|
||||||
|
DOUBLES_EQUAL(1.0, finalWeights[1], tol);
|
||||||
|
DOUBLES_EQUAL(1.0, finalWeights[2], tol);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
GncParams<GaussNewtonParams> gncParams;
|
||||||
|
gncParams.setKnownInliers(knownInliers);
|
||||||
|
gncParams.setLossType(GncLossType::TLS);
|
||||||
|
// gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::SUMMARY);
|
||||||
|
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
|
||||||
|
Values gnc_result = gnc.optimize();
|
||||||
|
CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
|
||||||
|
|
||||||
|
// check weights were actually fixed:
|
||||||
|
Vector finalWeights = gnc.getWeights();
|
||||||
|
DOUBLES_EQUAL(1.0, finalWeights[0], tol);
|
||||||
|
DOUBLES_EQUAL(1.0, finalWeights[1], tol);
|
||||||
|
DOUBLES_EQUAL(1.0, finalWeights[2], tol);
|
||||||
|
DOUBLES_EQUAL(0.0, finalWeights[3], tol);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
// if we set the threshold large, they are all inliers
|
||||||
|
GncParams<GaussNewtonParams> gncParams;
|
||||||
|
gncParams.setKnownInliers(knownInliers);
|
||||||
|
gncParams.setLossType(GncLossType::TLS);
|
||||||
|
//gncParams.setVerbosityGNC(GncParams<GaussNewtonParams>::Verbosity::VALUES);
|
||||||
|
gncParams.setInlierCostThreshold(100.0);
|
||||||
|
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(fg, initial,
|
||||||
|
gncParams);
|
||||||
|
|
||||||
|
Values gnc_result = gnc.optimize();
|
||||||
|
CHECK(assert_equal(Point2(0.25, 0.0), gnc_result.at<Point2>(X(1)), 1e-3));
|
||||||
|
|
||||||
|
// check weights were actually fixed:
|
||||||
|
Vector finalWeights = gnc.getWeights();
|
||||||
|
DOUBLES_EQUAL(1.0, finalWeights[0], tol);
|
||||||
|
DOUBLES_EQUAL(1.0, finalWeights[1], tol);
|
||||||
|
DOUBLES_EQUAL(1.0, finalWeights[2], tol);
|
||||||
|
DOUBLES_EQUAL(1.0, finalWeights[3], tol);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST(GncOptimizer, optimizeSmallPoseGraph) {
|
||||||
|
/// load small pose graph
|
||||||
|
const string filename = findExampleDataFile("w100.graph");
|
||||||
|
NonlinearFactorGraph::shared_ptr graph;
|
||||||
|
Values::shared_ptr initial;
|
||||||
|
boost::tie(graph, initial) = load2D(filename);
|
||||||
|
// Add a Gaussian prior on first poses
|
||||||
|
Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin
|
||||||
|
SharedDiagonal priorNoise = noiseModel::Diagonal::Sigmas(
|
||||||
|
Vector3(0.01, 0.01, 0.01));
|
||||||
|
graph->addPrior(0, priorMean, priorNoise);
|
||||||
|
|
||||||
|
/// get expected values by optimizing outlier-free graph
|
||||||
|
Values expected = LevenbergMarquardtOptimizer(*graph, *initial).optimize();
|
||||||
|
|
||||||
|
// add a few outliers
|
||||||
|
SharedDiagonal betweenNoise = noiseModel::Diagonal::Sigmas(
|
||||||
|
Vector3(0.1, 0.1, 0.01));
|
||||||
|
graph->push_back(BetweenFactor<Pose2>(90, 50, Pose2(), betweenNoise)); // some arbitrary and incorrect between factor
|
||||||
|
|
||||||
|
/// get expected values by optimizing outlier-free graph
|
||||||
|
Values expectedWithOutliers = LevenbergMarquardtOptimizer(*graph, *initial)
|
||||||
|
.optimize();
|
||||||
|
// as expected, the following test fails due to the presence of an outlier!
|
||||||
|
// CHECK(assert_equal(expected, expectedWithOutliers, 1e-3));
|
||||||
|
|
||||||
|
// GNC
|
||||||
|
// Note: in difficult instances, we set the odometry measurements to be
|
||||||
|
// inliers, but this problem is simple enought to succeed even without that
|
||||||
|
// assumption std::vector<size_t> knownInliers;
|
||||||
|
GncParams<GaussNewtonParams> gncParams;
|
||||||
|
auto gnc = GncOptimizer<GncParams<GaussNewtonParams>>(*graph, *initial,
|
||||||
|
gncParams);
|
||||||
|
Values actual = gnc.optimize();
|
||||||
|
|
||||||
|
// compare
|
||||||
|
CHECK(assert_equal(expected, actual, 1e-3)); // yay! we are robust to outliers!
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
int main() {
|
||||||
|
TestResult tr;
|
||||||
|
return TestRegistry::runAllTests(tr);
|
||||||
|
}
|
||||||
|
/* ************************************************************************* */
|
|
@ -233,6 +233,26 @@ TEST( NonlinearFactor, linearize_constraint2 )
|
||||||
CHECK(assert_equal((const GaussianFactor&)expected, *actual));
|
CHECK(assert_equal((const GaussianFactor&)expected, *actual));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST( NonlinearFactor, cloneWithNewNoiseModel )
|
||||||
|
{
|
||||||
|
// create original factor
|
||||||
|
double sigma1 = 0.1;
|
||||||
|
NonlinearFactorGraph nfg = example::nonlinearFactorGraphWithGivenSigma(sigma1);
|
||||||
|
|
||||||
|
// create expected
|
||||||
|
double sigma2 = 10;
|
||||||
|
NonlinearFactorGraph expected = example::nonlinearFactorGraphWithGivenSigma(sigma2);
|
||||||
|
|
||||||
|
// create actual
|
||||||
|
NonlinearFactorGraph actual;
|
||||||
|
SharedNoiseModel noise2 = noiseModel::Isotropic::Sigma(2,sigma2);
|
||||||
|
actual.push_back( boost::dynamic_pointer_cast<NoiseModelFactor>(nfg[0])->cloneWithNewNoiseModel(noise2) );
|
||||||
|
|
||||||
|
// check it's all good
|
||||||
|
CHECK(assert_equal(expected, actual));
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
class TestFactor4 : public NoiseModelFactor4<double, double, double, double> {
|
class TestFactor4 : public NoiseModelFactor4<double, double, double, double> {
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -48,6 +48,19 @@ const double tol = 1e-5;
|
||||||
using symbol_shorthand::X;
|
using symbol_shorthand::X;
|
||||||
using symbol_shorthand::L;
|
using symbol_shorthand::L;
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
TEST( NonlinearOptimizer, paramsEquals )
|
||||||
|
{
|
||||||
|
// default constructors lead to two identical params
|
||||||
|
GaussNewtonParams gnParams1;
|
||||||
|
GaussNewtonParams gnParams2;
|
||||||
|
CHECK(gnParams1.equals(gnParams2));
|
||||||
|
|
||||||
|
// but the params become different if we change something in gnParams2
|
||||||
|
gnParams2.setVerbosity("DELTA");
|
||||||
|
CHECK(!gnParams1.equals(gnParams2));
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST( NonlinearOptimizer, iterateLM )
|
TEST( NonlinearOptimizer, iterateLM )
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue