Merge pull request #269 from borglab/feature/noisemodel_rename_functions

Feature/noisemodel rename functions
release/4.3a0
Frank Dellaert 2020-07-13 13:15:37 -04:00 committed by GitHub
commit 73209e6faa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 281 additions and 209 deletions

BIN
doc/robust.pdf Normal file

Binary file not shown.

18
gtsam.h
View File

@ -1459,7 +1459,7 @@ virtual class Null: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double residual(double error) const;
double loss(double error) const;
};
virtual class Fair: gtsam::noiseModel::mEstimator::Base {
@ -1470,7 +1470,7 @@ virtual class Fair: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double residual(double error) const;
double loss(double error) const;
};
virtual class Huber: gtsam::noiseModel::mEstimator::Base {
@ -1481,7 +1481,7 @@ virtual class Huber: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double residual(double error) const;
double loss(double error) const;
};
virtual class Cauchy: gtsam::noiseModel::mEstimator::Base {
@ -1492,7 +1492,7 @@ virtual class Cauchy: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double residual(double error) const;
double loss(double error) const;
};
virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
@ -1503,7 +1503,7 @@ virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double residual(double error) const;
double loss(double error) const;
};
virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
@ -1514,7 +1514,7 @@ virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double residual(double error) const;
double loss(double error) const;
};
virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base {
@ -1525,7 +1525,7 @@ virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double residual(double error) const;
double loss(double error) const;
};
virtual class DCS: gtsam::noiseModel::mEstimator::Base {
@ -1536,7 +1536,7 @@ virtual class DCS: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double residual(double error) const;
double loss(double error) const;
};
virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
@ -1547,7 +1547,7 @@ virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double residual(double error) const;
double loss(double error) const;
};
}///\namespace mEstimator

View File

@ -515,7 +515,7 @@ Vector JacobianFactor::error_vector(const VectorValues& c) const {
double JacobianFactor::error(const VectorValues& c) const {
Vector e = unweighted_error(c);
// Use the noise model distance function to get the correct error if available.
if (model_) return 0.5 * model_->distance(e);
if (model_) return 0.5 * model_->squaredMahalanobisDistance(e);
return 0.5 * e.dot(e);
}

View File

@ -137,12 +137,12 @@ Fair::Fair(double c, const ReweightScheme reweight) : Base(reweight), c_(c) {
}
}
double Fair::weight(double error) const {
return 1.0 / (1.0 + std::abs(error) / c_);
double Fair::weight(double distance) const {
return 1.0 / (1.0 + std::abs(distance) / c_);
}
double Fair::residual(double error) const {
const double absError = std::abs(error);
double Fair::loss(double distance) const {
const double absError = std::abs(distance);
const double normalizedError = absError / c_;
const double c_2 = c_ * c_;
return c_2 * (normalizedError - std::log1p(normalizedError));
@ -170,15 +170,15 @@ Huber::Huber(double k, const ReweightScheme reweight) : Base(reweight), k_(k) {
}
}
double Huber::weight(double error) const {
const double absError = std::abs(error);
double Huber::weight(double distance) const {
const double absError = std::abs(distance);
return (absError <= k_) ? (1.0) : (k_ / absError);
}
double Huber::residual(double error) const {
const double absError = std::abs(error);
double Huber::loss(double distance) const {
const double absError = std::abs(distance);
if (absError <= k_) { // |x| <= k
return error*error / 2;
return distance*distance / 2;
} else { // |x| > k
return k_ * (absError - (k_/2));
}
@ -208,12 +208,12 @@ Cauchy::Cauchy(double k, const ReweightScheme reweight) : Base(reweight), k_(k),
}
}
double Cauchy::weight(double error) const {
return ksquared_ / (ksquared_ + error*error);
double Cauchy::weight(double distance) const {
return ksquared_ / (ksquared_ + distance*distance);
}
double Cauchy::residual(double error) const {
const double val = std::log1p(error * error / ksquared_);
double Cauchy::loss(double distance) const {
const double val = std::log1p(distance * distance / ksquared_);
return ksquared_ * val * 0.5;
}
@ -241,18 +241,18 @@ Tukey::Tukey(double c, const ReweightScheme reweight) : Base(reweight), c_(c), c
}
}
double Tukey::weight(double error) const {
if (std::abs(error) <= c_) {
const double one_minus_xc2 = 1.0 - error*error/csquared_;
double Tukey::weight(double distance) const {
if (std::abs(distance) <= c_) {
const double one_minus_xc2 = 1.0 - distance*distance/csquared_;
return one_minus_xc2 * one_minus_xc2;
}
return 0.0;
}
double Tukey::residual(double error) const {
double absError = std::abs(error);
double Tukey::loss(double distance) const {
double absError = std::abs(distance);
if (absError <= c_) {
const double one_minus_xc2 = 1.0 - error*error/csquared_;
const double one_minus_xc2 = 1.0 - distance*distance/csquared_;
const double t = one_minus_xc2*one_minus_xc2*one_minus_xc2;
return csquared_ * (1 - t) / 6.0;
} else {
@ -280,13 +280,13 @@ Tukey::shared_ptr Tukey::Create(double c, const ReweightScheme reweight) {
Welsch::Welsch(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {}
double Welsch::weight(double error) const {
const double xc2 = (error*error)/csquared_;
double Welsch::weight(double distance) const {
const double xc2 = (distance*distance)/csquared_;
return std::exp(-xc2);
}
double Welsch::residual(double error) const {
const double xc2 = (error*error)/csquared_;
double Welsch::loss(double distance) const {
const double xc2 = (distance*distance)/csquared_;
return csquared_ * 0.5 * -std::expm1(-xc2);
}
@ -311,16 +311,16 @@ GemanMcClure::GemanMcClure(double c, const ReweightScheme reweight)
: Base(reweight), c_(c) {
}
double GemanMcClure::weight(double error) const {
double GemanMcClure::weight(double distance) const {
const double c2 = c_*c_;
const double c4 = c2*c2;
const double c2error = c2 + error*error;
const double c2error = c2 + distance*distance;
return c4/(c2error*c2error);
}
double GemanMcClure::residual(double error) const {
double GemanMcClure::loss(double distance) const {
const double c2 = c_*c_;
const double error2 = error*error;
const double error2 = distance*distance;
return 0.5 * (c2 * error2) / (c2 + error2);
}
@ -345,8 +345,8 @@ DCS::DCS(double c, const ReweightScheme reweight)
: Base(reweight), c_(c) {
}
double DCS::weight(double error) const {
const double e2 = error*error;
double DCS::weight(double distance) const {
const double e2 = distance*distance;
if (e2 > c_)
{
const double w = 2.0*c_/(c_ + e2);
@ -356,10 +356,10 @@ double DCS::weight(double error) const {
return 1.0;
}
double DCS::residual(double error) const {
double DCS::loss(double distance) const {
// This is the simplified version of Eq 9 from (Agarwal13icra)
// after you simplify and cancel terms.
const double e2 = error*error;
const double e2 = distance*distance;
const double e4 = e2*e2;
const double c2 = c_*c_;
@ -391,17 +391,17 @@ L2WithDeadZone::L2WithDeadZone(double k, const ReweightScheme reweight)
}
}
double L2WithDeadZone::weight(double error) const {
double L2WithDeadZone::weight(double distance) const {
// note that this code is slightly uglier than residual, because there are three distinct
// cases to handle (left of deadzone, deadzone, right of deadzone) instead of the two
// cases (deadzone, non-deadzone) in residual.
if (std::abs(error) <= k_) return 0.0;
else if (error > k_) return (-k_+error)/error;
else return (k_+error)/error;
if (std::abs(distance) <= k_) return 0.0;
else if (distance > k_) return (-k_+distance)/distance;
else return (k_+distance)/distance;
}
double L2WithDeadZone::residual(double error) const {
const double abs_error = std::abs(error);
double L2WithDeadZone::loss(double distance) const {
const double abs_error = std::abs(distance);
return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error);
}

View File

@ -36,12 +36,12 @@ namespace noiseModel {
* The mEstimator name space contains all robust error functions.
* It mirrors the exposition at
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* which talks about minimizing \sum \rho(r_i), where \rho is a residual function of choice.
* which talks about minimizing \sum \rho(r_i), where \rho is a loss function of choice.
*
* To illustrate, let's consider the least-squares (L2), L1, and Huber estimators as examples:
*
* Name Symbol Least-Squares L1-norm Huber
* Residual \rho(x) 0.5*x^2 |x| 0.5*x^2 if |x|<k, 0.5*k^2 + k|x-k| otherwise
* Loss \rho(x) 0.5*x^2 |x| 0.5*x^2 if |x|<k, 0.5*k^2 + k|x-k| otherwise
* Derivative \phi(x) x sgn(x) x if |x|<k, k sgn(x) otherwise
* Weight w(x)=\phi(x)/x 1 1/|x| 1 if |x|<k, k/|x| otherwise
*
@ -75,27 +75,31 @@ class GTSAM_EXPORT Base {
* the quadratic function for an L2 penalty, the absolute value function for
* an L1 penalty, etc.
*
* TODO(mikebosse): When the residual function has as input the norm of the
* residual vector, then it prevents implementations of asymmeric loss
* TODO(mikebosse): When the loss function has as input the norm of the
* error vector, then it prevents implementations of asymmeric loss
* functions. It would be better for this function to accept the vector and
* internally call the norm if necessary.
*/
virtual double residual(double error) const { return 0; };
virtual double loss(double distance) const { return 0; };
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
virtual double residual(double distance) const { return loss(distance); };
#endif
/*
* This method is responsible for returning the weight function for a given
* amount of error. The weight function is related to the analytic derivative
* of the residual function. See
* of the loss function. See
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* for details. This method is required when optimizing cost functions with
* robust penalties using iteratively re-weighted least squares.
*/
virtual double weight(double error) const = 0;
virtual double weight(double distance) const = 0;
virtual void print(const std::string &s) const = 0;
virtual bool equals(const Base &expected, double tol = 1e-8) const = 0;
double sqrtWeight(double error) const { return std::sqrt(weight(error)); }
double sqrtWeight(double distance) const { return std::sqrt(weight(distance)); }
/** produce a weight vector according to an error vector and the implemented
* robust function */
@ -123,7 +127,7 @@ class GTSAM_EXPORT Base {
}
};
/// Null class is not robust so is a Gaussian ?
/// Null class should behave as Gaussian
class GTSAM_EXPORT Null : public Base {
public:
typedef boost::shared_ptr<Null> shared_ptr;
@ -131,7 +135,7 @@ class GTSAM_EXPORT Null : public Base {
Null(const ReweightScheme reweight = Block) : Base(reweight) {}
~Null() {}
double weight(double /*error*/) const { return 1.0; }
double residual(double error) const { return error; }
double loss(double distance) const { return 0.5 * distance * distance; }
void print(const std::string &s) const;
bool equals(const Base & /*expected*/, double /*tol*/) const { return true; }
static shared_ptr Create();
@ -154,8 +158,8 @@ class GTSAM_EXPORT Fair : public Base {
typedef boost::shared_ptr<Fair> shared_ptr;
Fair(double c = 1.3998, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
double weight(double distance) const override;
double loss(double distance) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double c, const ReweightScheme reweight = Block);
@ -179,8 +183,8 @@ class GTSAM_EXPORT Huber : public Base {
typedef boost::shared_ptr<Huber> shared_ptr;
Huber(double k = 1.345, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
double weight(double distance) const override;
double loss(double distance) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
@ -209,8 +213,8 @@ class GTSAM_EXPORT Cauchy : public Base {
typedef boost::shared_ptr<Cauchy> shared_ptr;
Cauchy(double k = 0.1, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
double weight(double distance) const override;
double loss(double distance) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
@ -234,8 +238,8 @@ class GTSAM_EXPORT Tukey : public Base {
typedef boost::shared_ptr<Tukey> shared_ptr;
Tukey(double c = 4.6851, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
double weight(double distance) const override;
double loss(double distance) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
@ -259,8 +263,8 @@ class GTSAM_EXPORT Welsch : public Base {
typedef boost::shared_ptr<Welsch> shared_ptr;
Welsch(double c = 2.9846, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
double weight(double distance) const override;
double loss(double distance) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
@ -295,8 +299,8 @@ class GTSAM_EXPORT GemanMcClure : public Base {
GemanMcClure(double c = 1.0, const ReweightScheme reweight = Block);
~GemanMcClure() {}
double weight(double error) const override;
double residual(double error) const override;
double weight(double distance) const override;
double loss(double distance) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
@ -325,8 +329,8 @@ class GTSAM_EXPORT DCS : public Base {
DCS(double c = 1.0, const ReweightScheme reweight = Block);
~DCS() {}
double weight(double error) const override;
double residual(double error) const override;
double weight(double distance) const override;
double loss(double distance) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
@ -358,8 +362,8 @@ class GTSAM_EXPORT L2WithDeadZone : public Base {
typedef boost::shared_ptr<L2WithDeadZone> shared_ptr;
L2WithDeadZone(double k = 1.0, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
double weight(double distance) const override;
double loss(double distance) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);

View File

@ -74,6 +74,13 @@ Vector Base::sigmas() const {
throw("Base::sigmas: sigmas() not implemented for this noise model");
}
/* ************************************************************************* */
double Base::squaredMahalanobisDistance(const Vector& v) const {
// Note: for Diagonal, which does ediv_, will be correct for constraints
Vector w = whiten(v);
return w.dot(w);
}
/* ************************************************************************* */
Gaussian::shared_ptr Gaussian::SqrtInformation(const Matrix& R, bool smart) {
size_t m = R.rows(), n = R.cols();
@ -164,13 +171,6 @@ Vector Gaussian::unwhiten(const Vector& v) const {
return backSubstituteUpper(thisR(), v);
}
/* ************************************************************************* */
double Gaussian::squaredMahalanobisDistance(const Vector& v) const {
// Note: for Diagonal, which does ediv_, will be correct for constraints
Vector w = whiten(v);
return w.dot(w);
}
/* ************************************************************************* */
Matrix Gaussian::Whiten(const Matrix& H) const {
return thisR() * H;
@ -376,8 +376,19 @@ Vector Constrained::whiten(const Vector& v) const {
return c;
}
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/* ************************************************************************* */
double Constrained::distance(const Vector& v) const {
double Constrained::error(const Vector& v) const {
Vector w = Diagonal::whiten(v); // get noisemodel for constrained elements
for (size_t i=0; i<dim_; ++i) // add mu weights on constrained variables
if (constrained(i)) // whiten makes constrained variables zero
w[i] = v[i] * sqrt(mu_[i]); // TODO: may want to store sqrt rather than rebuild
return 0.5 * w.dot(w);
}
#endif
/* ************************************************************************* */
double Constrained::squaredMahalanobisDistance(const Vector& v) const {
Vector w = Diagonal::whiten(v); // get noisemodel for constrained elements
for (size_t i=0; i<dim_; ++i) // add mu weights on constrained variables
if (constrained(i)) // whiten makes constrained variables zero

View File

@ -90,7 +90,27 @@ namespace gtsam {
/// Unwhiten an error vector.
virtual Vector unwhiten(const Vector& v) const = 0;
virtual double distance(const Vector& v) const = 0;
/// Squared Mahalanobis distance v'*R'*R*v = <R*v,R*v>
virtual double squaredMahalanobisDistance(const Vector& v) const;
/// Mahalanobis distance
virtual double mahalanobisDistance(const Vector& v) const {
return std::sqrt(squaredMahalanobisDistance(v));
}
/// loss function, input is Mahalanobis distance
virtual double loss(const double squared_distance) const {
return 0.5 * squared_distance;
}
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// calculate the error value given measurement error vector
virtual double error(const Vector& v) const = 0;
virtual double distance(const Vector& v) {
return error(v) * 2;
}
#endif
virtual void WhitenSystem(std::vector<Matrix>& A, Vector& b) const = 0;
virtual void WhitenSystem(Matrix& A, Vector& b) const = 0;
@ -200,39 +220,30 @@ namespace gtsam {
*/
static shared_ptr Covariance(const Matrix& covariance, bool smart = true);
virtual void print(const std::string& name) const;
virtual bool equals(const Base& expected, double tol=1e-9) const;
virtual Vector sigmas() const;
virtual Vector whiten(const Vector& v) const;
virtual Vector unwhiten(const Vector& v) const;
/**
* Squared Mahalanobis distance v'*R'*R*v = <R*v,R*v>
*/
virtual double squaredMahalanobisDistance(const Vector& v) const;
/**
* Mahalanobis distance
*/
virtual double mahalanobisDistance(const Vector& v) const {
return std::sqrt(squaredMahalanobisDistance(v));
}
void print(const std::string& name) const override;
bool equals(const Base& expected, double tol=1e-9) const override;
Vector sigmas() const override;
Vector whiten(const Vector& v) const override;
Vector unwhiten(const Vector& v) const override;
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
virtual double Mahalanobis(const Vector& v) const {
return squaredMahalanobisDistance(v);
}
#endif
inline virtual double distance(const Vector& v) const {
return squaredMahalanobisDistance(v);
/**
* error value 0.5 * v'*R'*R*v
*/
inline double error(const Vector& v) const override {
return 0.5 * squaredMahalanobisDistance(v);
}
#endif
/**
* Multiply a derivative with R (derivative of whiten)
* Equivalent to whitening each column of the input matrix.
*/
virtual Matrix Whiten(const Matrix& H) const;
Matrix Whiten(const Matrix& H) const override;
/**
* In-place version
@ -247,10 +258,10 @@ namespace gtsam {
/**
* Whiten a system, in place as well
*/
virtual void WhitenSystem(std::vector<Matrix>& A, Vector& b) const;
virtual void WhitenSystem(Matrix& A, Vector& b) const;
virtual void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const;
virtual void WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const;
void WhitenSystem(std::vector<Matrix>& A, Vector& b) const override;
void WhitenSystem(Matrix& A, Vector& b) const override;
void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const override;
void WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const override;
/**
* Apply appropriately weighted QR factorization to the system [A b]
@ -335,13 +346,13 @@ namespace gtsam {
return Variances(precisions.array().inverse(), smart);
}
virtual void print(const std::string& name) const;
virtual Vector sigmas() const { return sigmas_; }
virtual Vector whiten(const Vector& v) const;
virtual Vector unwhiten(const Vector& v) const;
virtual Matrix Whiten(const Matrix& H) const;
virtual void WhitenInPlace(Matrix& H) const;
virtual void WhitenInPlace(Eigen::Block<Matrix> H) const;
void print(const std::string& name) const override;
Vector sigmas() const override { return sigmas_; }
Vector whiten(const Vector& v) const override;
Vector unwhiten(const Vector& v) const override;
Matrix Whiten(const Matrix& H) const override;
void WhitenInPlace(Matrix& H) const override;
void WhitenInPlace(Eigen::Block<Matrix> H) const override;
/**
* Return standard deviations (sqrt of diagonal)
@ -363,7 +374,7 @@ namespace gtsam {
/**
* Return R itself, but note that Whiten(H) is cheaper than R*H
*/
virtual Matrix R() const {
Matrix R() const override {
return invsigmas().asDiagonal();
}
@ -417,10 +428,10 @@ namespace gtsam {
typedef boost::shared_ptr<Constrained> shared_ptr;
virtual ~Constrained() {}
~Constrained() {}
/// true if a constrained noise mode, saves slow/clumsy dynamic casting
virtual bool isConstrained() const { return true; }
bool isConstrained() const override { return true; }
/// Return true if a particular dimension is free or constrained
bool constrained(size_t i) const;
@ -472,12 +483,16 @@ namespace gtsam {
return MixedVariances(precisions.array().inverse());
}
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/**
* The distance function for a constrained noisemodel,
* The error function for a constrained noisemodel,
* for non-constrained versions, uses sigmas, otherwise
* uses the penalty function with mu
*/
virtual double distance(const Vector& v) const;
double error(const Vector& v) const override;
#endif
double squaredMahalanobisDistance(const Vector& v) const override;
/** Fully constrained variations */
static shared_ptr All(size_t dim) {
@ -494,16 +509,16 @@ namespace gtsam {
return shared_ptr(new Constrained(Vector::Constant(dim, mu), Vector::Constant(dim,0)));
}
virtual void print(const std::string& name) const;
void print(const std::string& name) const override;
/// Calculates error vector with weights applied
virtual Vector whiten(const Vector& v) const;
Vector whiten(const Vector& v) const override;
/// Whitening functions will perform partial whitening on rows
/// with a non-zero sigma. Other rows remain untouched.
virtual Matrix Whiten(const Matrix& H) const;
virtual void WhitenInPlace(Matrix& H) const;
virtual void WhitenInPlace(Eigen::Block<Matrix> H) const;
Matrix Whiten(const Matrix& H) const override;
void WhitenInPlace(Matrix& H) const override;
void WhitenInPlace(Eigen::Block<Matrix> H) const override;
/**
* Apply QR factorization to the system [A b], taking into account constraints
@ -514,7 +529,7 @@ namespace gtsam {
* @param Ab is the m*(n+1) augmented system matrix [A b]
* @return diagonal noise model can be all zeros, mixed, or not-constrained
*/
virtual Diagonal::shared_ptr QR(Matrix& Ab) const;
Diagonal::shared_ptr QR(Matrix& Ab) const override;
/**
* Returns a Unit version of a constrained noisemodel in which
@ -576,14 +591,14 @@ namespace gtsam {
return Variance(dim, 1.0/precision, smart);
}
virtual void print(const std::string& name) const;
virtual double squaredMahalanobisDistance(const Vector& v) const;
virtual Vector whiten(const Vector& v) const;
virtual Vector unwhiten(const Vector& v) const;
virtual Matrix Whiten(const Matrix& H) const;
virtual void WhitenInPlace(Matrix& H) const;
virtual void whitenInPlace(Vector& v) const;
virtual void WhitenInPlace(Eigen::Block<Matrix> H) const;
void print(const std::string& name) const override;
double squaredMahalanobisDistance(const Vector& v) const override;
Vector whiten(const Vector& v) const override;
Vector unwhiten(const Vector& v) const override;
Matrix Whiten(const Matrix& H) const override;
void WhitenInPlace(Matrix& H) const override;
void whitenInPlace(Vector& v) const override;
void WhitenInPlace(Eigen::Block<Matrix> H) const override;
/**
* Return standard deviation
@ -616,7 +631,7 @@ namespace gtsam {
typedef boost::shared_ptr<Unit> shared_ptr;
virtual ~Unit() {}
~Unit() {}
/**
* Create a unit covariance noise model
@ -626,19 +641,19 @@ namespace gtsam {
}
/// true if a unit noise model, saves slow/clumsy dynamic casting
virtual bool isUnit() const { return true; }
bool isUnit() const override { return true; }
virtual void print(const std::string& name) const;
virtual double squaredMahalanobisDistance(const Vector& v) const {return v.dot(v); }
virtual Vector whiten(const Vector& v) const { return v; }
virtual Vector unwhiten(const Vector& v) const { return v; }
virtual Matrix Whiten(const Matrix& H) const { return H; }
virtual void WhitenInPlace(Matrix& /*H*/) const {}
virtual void WhitenInPlace(Eigen::Block<Matrix> /*H*/) const {}
virtual void whitenInPlace(Vector& /*v*/) const {}
virtual void unwhitenInPlace(Vector& /*v*/) const {}
virtual void whitenInPlace(Eigen::Block<Vector>& /*v*/) const {}
virtual void unwhitenInPlace(Eigen::Block<Vector>& /*v*/) const {}
void print(const std::string& name) const override;
double squaredMahalanobisDistance(const Vector& v) const override {return v.dot(v); }
Vector whiten(const Vector& v) const override { return v; }
Vector unwhiten(const Vector& v) const override { return v; }
Matrix Whiten(const Matrix& H) const override { return H; }
void WhitenInPlace(Matrix& /*H*/) const override {}
void WhitenInPlace(Eigen::Block<Matrix> /*H*/) const override {}
void whitenInPlace(Vector& /*v*/) const override {}
void unwhitenInPlace(Vector& /*v*/) const override {}
void whitenInPlace(Eigen::Block<Vector>& /*v*/) const override {}
void unwhitenInPlace(Eigen::Block<Vector>& /*v*/) const override {}
private:
/** Serialization function */
@ -687,10 +702,10 @@ namespace gtsam {
: Base(noise->dim()), robust_(robust), noise_(noise) {}
/// Destructor
virtual ~Robust() {}
~Robust() {}
virtual void print(const std::string& name) const;
virtual bool equals(const Base& expected, double tol=1e-9) const;
void print(const std::string& name) const override;
bool equals(const Base& expected, double tol=1e-9) const override;
/// Return the contained robust error function
const RobustModel::shared_ptr& robust() const { return robust_; }
@ -699,28 +714,36 @@ namespace gtsam {
const NoiseModel::shared_ptr& noise() const { return noise_; }
// TODO: functions below are dummy but necessary for the noiseModel::Base
inline virtual Vector whiten(const Vector& v) const
inline Vector whiten(const Vector& v) const override
{ Vector r = v; this->WhitenSystem(r); return r; }
inline virtual Matrix Whiten(const Matrix& A) const
inline Matrix Whiten(const Matrix& A) const override
{ Vector b; Matrix B=A; this->WhitenSystem(B,b); return B; }
inline virtual Vector unwhiten(const Vector& /*v*/) const
inline Vector unwhiten(const Vector& /*v*/) const override
{ throw std::invalid_argument("unwhiten is not currently supported for robust noise models."); }
// Fold the use of the m-estimator residual(...) function into distance(...)
inline virtual double distance(const Vector& v) const
{ return robust_->residual(this->unweightedWhiten(v).norm()); }
inline virtual double distance_non_whitened(const Vector& v) const
{ return robust_->residual(v.norm()); }
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
inline double distance(const Vector& v) override {
return robust_->loss(this->unweightedWhiten(v).norm());
}
// Fold the use of the m-estimator loss(...) function into error(...)
inline double error(const Vector& v) const override
{ return robust_->loss(noise_->mahalanobisDistance(v)); }
#endif
double loss(const double squared_distance) const override {
return robust_->loss(std::sqrt(squared_distance));
}
// TODO: these are really robust iterated re-weighting support functions
virtual void WhitenSystem(Vector& b) const;
virtual void WhitenSystem(std::vector<Matrix>& A, Vector& b) const;
virtual void WhitenSystem(Matrix& A, Vector& b) const;
virtual void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const;
virtual void WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const;
void WhitenSystem(std::vector<Matrix>& A, Vector& b) const override;
void WhitenSystem(Matrix& A, Vector& b) const override;
void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const override;
void WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const override;
virtual Vector unweightedWhiten(const Vector& v) const {
Vector unweightedWhiten(const Vector& v) const override {
return noise_->unweightedWhiten(v);
}
virtual double weight(const Vector& v) const {
double weight(const Vector& v) const override {
// Todo(mikebosse): make the robust weight function input a vector.
return robust_->weight(v.norm());
}

View File

@ -182,8 +182,9 @@ TEST(NoiseModel, ConstrainedMixed )
EXPECT(assert_equal(Vector3(0.5, 1.0, 0.5),d->whiten(infeasible)));
EXPECT(assert_equal(Vector3(0.5, 0.0, 0.5),d->whiten(feasible)));
DOUBLES_EQUAL(1000.0 + 0.25 + 0.25,d->distance(infeasible),1e-9);
DOUBLES_EQUAL(0.5,d->distance(feasible),1e-9);
DOUBLES_EQUAL(0.5 * (1000.0 + 0.25 + 0.25),d->loss(d->squaredMahalanobisDistance(infeasible)),1e-9);
DOUBLES_EQUAL(0.5, d->squaredMahalanobisDistance(feasible),1e-9);
DOUBLES_EQUAL(0.5 * 0.5, d->loss(0.5),1e-9);
}
/* ************************************************************************* */
@ -197,8 +198,9 @@ TEST(NoiseModel, ConstrainedAll )
EXPECT(assert_equal(Vector3(1.0, 1.0, 1.0),i->whiten(infeasible)));
EXPECT(assert_equal(Vector3(0.0, 0.0, 0.0),i->whiten(feasible)));
DOUBLES_EQUAL(1000.0 * 3.0,i->distance(infeasible),1e-9);
DOUBLES_EQUAL(0.0,i->distance(feasible),1e-9);
DOUBLES_EQUAL(0.5 * 1000.0 * 3.0,i->loss(i->squaredMahalanobisDistance(infeasible)),1e-9);
DOUBLES_EQUAL(0.0, i->squaredMahalanobisDistance(feasible), 1e-9);
DOUBLES_EQUAL(0.0, i->loss(0.0),1e-9);
}
/* ************************************************************************* */
@ -451,7 +453,7 @@ TEST(NoiseModel, WhitenInPlace)
/*
* These tests are responsible for testing the weight functions for the m-estimators in GTSAM.
* The weight function is related to the analytic derivative of the residual function. See
* The weight function is related to the analytic derivative of the loss function. See
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* for details. This weight function is required when optimizing cost functions with robust
* penalties using iteratively re-weighted least squares.
@ -467,10 +469,10 @@ TEST(NoiseModel, robustFunctionFair)
DOUBLES_EQUAL(0.3333333333333333, fair->weight(error3), 1e-8);
DOUBLES_EQUAL(0.8333333333333333, fair->weight(error4), 1e-8);
DOUBLES_EQUAL(0.441961080151135, fair->residual(error1), 1e-8);
DOUBLES_EQUAL(22.534692783297260, fair->residual(error2), 1e-8);
DOUBLES_EQUAL(22.534692783297260, fair->residual(error3), 1e-8);
DOUBLES_EQUAL(0.441961080151135, fair->residual(error4), 1e-8);
DOUBLES_EQUAL(0.441961080151135, fair->loss(error1), 1e-8);
DOUBLES_EQUAL(22.534692783297260, fair->loss(error2), 1e-8);
DOUBLES_EQUAL(22.534692783297260, fair->loss(error3), 1e-8);
DOUBLES_EQUAL(0.441961080151135, fair->loss(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionHuber)
@ -483,10 +485,10 @@ TEST(NoiseModel, robustFunctionHuber)
DOUBLES_EQUAL(0.5, huber->weight(error3), 1e-8);
DOUBLES_EQUAL(1.0, huber->weight(error4), 1e-8);
DOUBLES_EQUAL(0.5000, huber->residual(error1), 1e-8);
DOUBLES_EQUAL(37.5000, huber->residual(error2), 1e-8);
DOUBLES_EQUAL(37.5000, huber->residual(error3), 1e-8);
DOUBLES_EQUAL(0.5000, huber->residual(error4), 1e-8);
DOUBLES_EQUAL(0.5000, huber->loss(error1), 1e-8);
DOUBLES_EQUAL(37.5000, huber->loss(error2), 1e-8);
DOUBLES_EQUAL(37.5000, huber->loss(error3), 1e-8);
DOUBLES_EQUAL(0.5000, huber->loss(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionCauchy)
@ -499,10 +501,10 @@ TEST(NoiseModel, robustFunctionCauchy)
DOUBLES_EQUAL(0.2000, cauchy->weight(error3), 1e-8);
DOUBLES_EQUAL(0.961538461538461, cauchy->weight(error4), 1e-8);
DOUBLES_EQUAL(0.490258914416017, cauchy->residual(error1), 1e-8);
DOUBLES_EQUAL(20.117973905426254, cauchy->residual(error2), 1e-8);
DOUBLES_EQUAL(20.117973905426254, cauchy->residual(error3), 1e-8);
DOUBLES_EQUAL(0.490258914416017, cauchy->residual(error4), 1e-8);
DOUBLES_EQUAL(0.490258914416017, cauchy->loss(error1), 1e-8);
DOUBLES_EQUAL(20.117973905426254, cauchy->loss(error2), 1e-8);
DOUBLES_EQUAL(20.117973905426254, cauchy->loss(error3), 1e-8);
DOUBLES_EQUAL(0.490258914416017, cauchy->loss(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionGemanMcClure)
@ -514,10 +516,10 @@ TEST(NoiseModel, robustFunctionGemanMcClure)
DOUBLES_EQUAL(9.80296e-5, gmc->weight(error3), 1e-8);
DOUBLES_EQUAL(0.25 , gmc->weight(error4), 1e-8);
DOUBLES_EQUAL(0.2500, gmc->residual(error1), 1e-8);
DOUBLES_EQUAL(0.495049504950495, gmc->residual(error2), 1e-8);
DOUBLES_EQUAL(0.495049504950495, gmc->residual(error3), 1e-8);
DOUBLES_EQUAL(0.2500, gmc->residual(error4), 1e-8);
DOUBLES_EQUAL(0.2500, gmc->loss(error1), 1e-8);
DOUBLES_EQUAL(0.495049504950495, gmc->loss(error2), 1e-8);
DOUBLES_EQUAL(0.495049504950495, gmc->loss(error3), 1e-8);
DOUBLES_EQUAL(0.2500, gmc->loss(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionWelsch)
@ -530,10 +532,10 @@ TEST(NoiseModel, robustFunctionWelsch)
DOUBLES_EQUAL(0.018315638888734, welsch->weight(error3), 1e-8);
DOUBLES_EQUAL(0.960789439152323, welsch->weight(error4), 1e-8);
DOUBLES_EQUAL(0.490132010595960, welsch->residual(error1), 1e-8);
DOUBLES_EQUAL(12.271054513890823, welsch->residual(error2), 1e-8);
DOUBLES_EQUAL(12.271054513890823, welsch->residual(error3), 1e-8);
DOUBLES_EQUAL(0.490132010595960, welsch->residual(error4), 1e-8);
DOUBLES_EQUAL(0.490132010595960, welsch->loss(error1), 1e-8);
DOUBLES_EQUAL(12.271054513890823, welsch->loss(error2), 1e-8);
DOUBLES_EQUAL(12.271054513890823, welsch->loss(error3), 1e-8);
DOUBLES_EQUAL(0.490132010595960, welsch->loss(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionTukey)
@ -546,10 +548,10 @@ TEST(NoiseModel, robustFunctionTukey)
DOUBLES_EQUAL(0.0, tukey->weight(error3), 1e-8);
DOUBLES_EQUAL(0.9216, tukey->weight(error4), 1e-8);
DOUBLES_EQUAL(0.480266666666667, tukey->residual(error1), 1e-8);
DOUBLES_EQUAL(4.166666666666667, tukey->residual(error2), 1e-8);
DOUBLES_EQUAL(4.166666666666667, tukey->residual(error3), 1e-8);
DOUBLES_EQUAL(0.480266666666667, tukey->residual(error4), 1e-8);
DOUBLES_EQUAL(0.480266666666667, tukey->loss(error1), 1e-8);
DOUBLES_EQUAL(4.166666666666667, tukey->loss(error2), 1e-8);
DOUBLES_EQUAL(4.166666666666667, tukey->loss(error3), 1e-8);
DOUBLES_EQUAL(0.480266666666667, tukey->loss(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionDCS)
@ -560,8 +562,8 @@ TEST(NoiseModel, robustFunctionDCS)
DOUBLES_EQUAL(1.0 , dcs->weight(error1), 1e-8);
DOUBLES_EQUAL(0.00039211, dcs->weight(error2), 1e-8);
DOUBLES_EQUAL(0.5 , dcs->residual(error1), 1e-8);
DOUBLES_EQUAL(0.9900990099, dcs->residual(error2), 1e-8);
DOUBLES_EQUAL(0.5 , dcs->loss(error1), 1e-8);
DOUBLES_EQUAL(0.9900990099, dcs->loss(error2), 1e-8);
}
TEST(NoiseModel, robustFunctionL2WithDeadZone)
@ -576,12 +578,12 @@ TEST(NoiseModel, robustFunctionL2WithDeadZone)
DOUBLES_EQUAL(0.00990099009, lsdz->weight(e4), 1e-8);
DOUBLES_EQUAL(0.9, lsdz->weight(e5), 1e-8);
DOUBLES_EQUAL(40.5, lsdz->residual(e0), 1e-8);
DOUBLES_EQUAL(0.00005, lsdz->residual(e1), 1e-8);
DOUBLES_EQUAL(0.0, lsdz->residual(e2), 1e-8);
DOUBLES_EQUAL(0.0, lsdz->residual(e3), 1e-8);
DOUBLES_EQUAL(0.00005, lsdz->residual(e4), 1e-8);
DOUBLES_EQUAL(40.5, lsdz->residual(e5), 1e-8);
DOUBLES_EQUAL(40.5, lsdz->loss(e0), 1e-8);
DOUBLES_EQUAL(0.00005, lsdz->loss(e1), 1e-8);
DOUBLES_EQUAL(0.0, lsdz->loss(e2), 1e-8);
DOUBLES_EQUAL(0.0, lsdz->loss(e3), 1e-8);
DOUBLES_EQUAL(0.00005, lsdz->loss(e4), 1e-8);
DOUBLES_EQUAL(40.5, lsdz->loss(e5), 1e-8);
}
/* ************************************************************************* */
@ -665,11 +667,11 @@ TEST(NoiseModel, robustNoiseL2WithDeadZone)
/*
* TODO(mike): There is currently a bug in GTSAM, where none of the mEstimator classes
* implement a residual function, and GTSAM calls the weight function to evaluate the
* total penalty, rather than calling the residual function. The weight function should be
* implement a loss function, and GTSAM calls the weight function to evaluate the
* total penalty, rather than calling the loss function. The weight function should be
* used during iteratively reweighted least squares optimization, but should not be used to
* evaluate the total penalty. The long-term solution is for all mEstimators to implement
* both a weight and a residual function, and for GTSAM to call the residual function when
* both a weight and a loss function, and for GTSAM to call the loss function when
* evaluating the total penalty. This bug causes the test below to fail, so I'm leaving it
* commented out until the underlying bug in GTSAM is fixed.
*
@ -681,13 +683,44 @@ TEST(NoiseModel, robustNoiseL2WithDeadZone)
}
TEST(NoiseModel, lossFunctionAtZero)
{
const double k = 5.0;
auto fair = mEstimator::Fair::Create(k);
DOUBLES_EQUAL(fair->loss(0), 0, 1e-8);
DOUBLES_EQUAL(fair->weight(0), 1, 1e-8);
auto huber = mEstimator::Huber::Create(k);
DOUBLES_EQUAL(huber->loss(0), 0, 1e-8);
DOUBLES_EQUAL(huber->weight(0), 1, 1e-8);
auto cauchy = mEstimator::Cauchy::Create(k);
DOUBLES_EQUAL(cauchy->loss(0), 0, 1e-8);
DOUBLES_EQUAL(cauchy->weight(0), 1, 1e-8);
auto gmc = mEstimator::GemanMcClure::Create(k);
DOUBLES_EQUAL(gmc->loss(0), 0, 1e-8);
DOUBLES_EQUAL(gmc->weight(0), 1, 1e-8);
auto welsch = mEstimator::Welsch::Create(k);
DOUBLES_EQUAL(welsch->loss(0), 0, 1e-8);
DOUBLES_EQUAL(welsch->weight(0), 1, 1e-8);
auto tukey = mEstimator::Tukey::Create(k);
DOUBLES_EQUAL(tukey->loss(0), 0, 1e-8);
DOUBLES_EQUAL(tukey->weight(0), 1, 1e-8);
auto dcs = mEstimator::DCS::Create(k);
DOUBLES_EQUAL(dcs->loss(0), 0, 1e-8);
DOUBLES_EQUAL(dcs->weight(0), 1, 1e-8);
// auto lsdz = mEstimator::L2WithDeadZone::Create(k);
// DOUBLES_EQUAL(lsdz->loss(0), 0, 1e-8);
// DOUBLES_EQUAL(lsdz->weight(0), 1, 1e-8);
}
/* ************************************************************************* */
#define TEST_GAUSSIAN(gaussian)\
EQUALITY(info, gaussian->information());\
EQUALITY(cov, gaussian->covariance());\
EXPECT(assert_equal(white, gaussian->whiten(e)));\
EXPECT(assert_equal(e, gaussian->unwhiten(white)));\
EXPECT_DOUBLES_EQUAL(251, gaussian->distance(e), 1e-9);\
EXPECT_DOUBLES_EQUAL(251.0, gaussian->squaredMahalanobisDistance(e), 1e-9);\
EXPECT_DOUBLES_EQUAL(0.5 * 251.0, gaussian->loss(251.0), 1e-9);\
Matrix A = R.inverse(); Vector b = e;\
gaussian->WhitenSystem(A, b);\
EXPECT(assert_equal(I, A));\

View File

@ -121,7 +121,7 @@ double NoiseModelFactor::error(const Values& c) const {
const Vector b = unwhitenedError(c);
check(noiseModel_, b.size());
if (noiseModel_)
return 0.5 * noiseModel_->distance(b);
return noiseModel_->loss(noiseModel_->squaredMahalanobisDistance(b));
else
return 0.5 * b.squaredNorm();
} else {

View File

@ -459,11 +459,12 @@ TEST(NonlinearOptimizer, RobustMeanCalculation) {
init.insert(0, 100.0);
expected.insert(0, 3.33333333);
LevenbergMarquardtParams params;
DoglegParams params_dl;
params_dl.setRelativeErrorTol(1e-10);
auto gn_result = GaussNewtonOptimizer(fg, init).optimize();
auto lm_result = LevenbergMarquardtOptimizer(fg, init, params).optimize();
auto dl_result = DoglegOptimizer(fg, init).optimize();
auto lm_result = LevenbergMarquardtOptimizer(fg, init).optimize();
auto dl_result = DoglegOptimizer(fg, init, params_dl).optimize();
EXPECT(assert_equal(expected, gn_result, tol));
EXPECT(assert_equal(expected, lm_result, tol));