Merge pull request #1035 from borglab/fix/gncOptimizer
commit
d12dd55c55
|
@ -108,17 +108,16 @@ std::pair<NonlinearFactorGraph, Values> triangulationGraph(
|
|||
const std::vector<Pose3>& poses, boost::shared_ptr<CALIBRATION> sharedCal,
|
||||
const Point2Vector& measurements, Key landmarkKey,
|
||||
const Point3& initialEstimate,
|
||||
const SharedNoiseModel& model = nullptr) {
|
||||
const SharedNoiseModel& model = noiseModel::Unit::Create(2)) {
|
||||
Values values;
|
||||
values.insert(landmarkKey, initialEstimate); // Initial landmark value
|
||||
NonlinearFactorGraph graph;
|
||||
static SharedNoiseModel unit2(noiseModel::Unit::Create(2));
|
||||
for (size_t i = 0; i < measurements.size(); i++) {
|
||||
const Pose3& pose_i = poses[i];
|
||||
typedef PinholePose<CALIBRATION> Camera;
|
||||
Camera camera_i(pose_i, sharedCal);
|
||||
graph.emplace_shared<TriangulationFactor<Camera> > //
|
||||
(camera_i, measurements[i], model? model : unit2, landmarkKey);
|
||||
(camera_i, measurements[i], model, landmarkKey);
|
||||
}
|
||||
return std::make_pair(graph, values);
|
||||
}
|
||||
|
|
|
@ -207,9 +207,11 @@ class GTSAM_EXPORT GncOptimizer {
|
|||
std::cout << "GNC Optimizer stopped because all measurements are already known to be inliers or outliers"
|
||||
<< std::endl;
|
||||
}
|
||||
if (params_.verbosity >= GncParameters::Verbosity::MU) {
|
||||
std::cout << "mu: " << mu << std::endl;
|
||||
}
|
||||
if (params_.verbosity >= GncParameters::Verbosity::VALUES) {
|
||||
result.print("result\n");
|
||||
std::cout << "mu: " << mu << std::endl;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -218,12 +220,16 @@ class GTSAM_EXPORT GncOptimizer {
|
|||
for (iter = 0; iter < params_.maxIterations; iter++) {
|
||||
|
||||
// display info
|
||||
if (params_.verbosity >= GncParameters::Verbosity::VALUES) {
|
||||
if (params_.verbosity >= GncParameters::Verbosity::MU) {
|
||||
std::cout << "iter: " << iter << std::endl;
|
||||
result.print("result\n");
|
||||
std::cout << "mu: " << mu << std::endl;
|
||||
}
|
||||
if (params_.verbosity >= GncParameters::Verbosity::WEIGHTS) {
|
||||
std::cout << "weights: " << weights_ << std::endl;
|
||||
}
|
||||
if (params_.verbosity >= GncParameters::Verbosity::VALUES) {
|
||||
result.print("result\n");
|
||||
}
|
||||
// weights update
|
||||
weights_ = calculateWeights(result, mu);
|
||||
|
||||
|
@ -255,10 +261,12 @@ class GTSAM_EXPORT GncOptimizer {
|
|||
if (params_.verbosity >= GncParameters::Verbosity::SUMMARY) {
|
||||
std::cout << "final iterations: " << iter << std::endl;
|
||||
std::cout << "final mu: " << mu << std::endl;
|
||||
std::cout << "final weights: " << weights_ << std::endl;
|
||||
std::cout << "previous cost: " << prev_cost << std::endl;
|
||||
std::cout << "current cost: " << cost << std::endl;
|
||||
}
|
||||
if (params_.verbosity >= GncParameters::Verbosity::WEIGHTS) {
|
||||
std::cout << "final weights: " << weights_ << std::endl;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -293,6 +301,11 @@ class GTSAM_EXPORT GncOptimizer {
|
|||
std::min(mu_init, barcSq_[k] / (2 * rk - barcSq_[k]) ) : mu_init;
|
||||
}
|
||||
}
|
||||
if (mu_init >= 0 && mu_init < 1e-6){
|
||||
mu_init = 1e-6; // if mu ~ 0 (but positive), that means we have measurements with large errors,
|
||||
// i.e., rk > barcSq_[k] and rk very large, hence we threshold to 1e-6 to avoid mu = 0
|
||||
}
|
||||
|
||||
return mu_init > 0 && !std::isinf(mu_init) ? mu_init : -1; // if mu <= 0 or mu = inf, return -1,
|
||||
// which leads to termination of the main gnc loop. In this case, all residuals are already below the threshold
|
||||
// and there is no need to robustify (TLS = least squares)
|
||||
|
@ -340,8 +353,10 @@ class GTSAM_EXPORT GncOptimizer {
|
|||
bool checkCostConvergence(const double cost, const double prev_cost) const {
|
||||
bool costConverged = std::fabs(cost - prev_cost) / std::max(prev_cost, 1e-7)
|
||||
< params_.relativeCostTol;
|
||||
if (costConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY)
|
||||
std::cout << "checkCostConvergence = true " << std::endl;
|
||||
if (costConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY){
|
||||
std::cout << "checkCostConvergence = true (prev. cost = " << prev_cost
|
||||
<< ", curr. cost = " << cost << ")" << std::endl;
|
||||
}
|
||||
return costConverged;
|
||||
}
|
||||
|
||||
|
@ -436,18 +451,16 @@ class GTSAM_EXPORT GncOptimizer {
|
|||
return weights;
|
||||
}
|
||||
case GncLossType::TLS: { // use eq (14) in GNC paper
|
||||
double upperbound = (mu + 1) / mu * barcSq_.maxCoeff();
|
||||
double lowerbound = mu / (mu + 1) * barcSq_.minCoeff();
|
||||
for (size_t k : unknownWeights) {
|
||||
if (nfg_[k]) {
|
||||
double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual
|
||||
if (u2_k >= upperbound) {
|
||||
double upperbound = (mu + 1) / mu * barcSq_[k];
|
||||
double lowerbound = mu / (mu + 1) * barcSq_[k];
|
||||
weights[k] = std::sqrt(barcSq_[k] * mu * (mu + 1) / u2_k) - mu;
|
||||
if (u2_k >= upperbound || weights[k] < 0) {
|
||||
weights[k] = 0;
|
||||
} else if (u2_k <= lowerbound) {
|
||||
} else if (u2_k <= lowerbound || weights[k] > 1) {
|
||||
weights[k] = 1;
|
||||
} else {
|
||||
weights[k] = std::sqrt(barcSq_[k] * mu * (mu + 1) / u2_k)
|
||||
- mu;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,6 +48,8 @@ class GTSAM_EXPORT GncParams {
|
|||
enum Verbosity {
|
||||
SILENT = 0,
|
||||
SUMMARY,
|
||||
MU,
|
||||
WEIGHTS,
|
||||
VALUES
|
||||
};
|
||||
|
||||
|
|
|
@ -61,6 +61,12 @@ class PoseToPointFactor : public NoiseModelFactor2<POSE, POINT> {
|
|||
traits<POINT>::Equals(this->measured_, e->measured_, tol);
|
||||
}
|
||||
|
||||
/// @return a deep copy of this factor
|
||||
gtsam::NonlinearFactor::shared_ptr clone() const override {
|
||||
return boost::static_pointer_cast<gtsam::NonlinearFactor>(
|
||||
gtsam::NonlinearFactor::shared_ptr(new This(*this)));
|
||||
}
|
||||
|
||||
/** implement functions needed to derive from Factor */
|
||||
|
||||
/** vector of errors
|
||||
|
|
Loading…
Reference in New Issue