Make variable names consistent with math

Rename mEstimator residual to loss so that it is not confused
with lsq residual vector Ax-b.
Rename distance to squaredDistance when appropriate.
Removed erroneous 0.5 factor from NoiseModelFactor::weight()
Fix bug in mEstimator::Null::loss()
release/4.3a0
Michael Bosse 2019-12-03 09:01:43 -08:00
parent 60d820e042
commit 23f5c8c331
8 changed files with 87 additions and 89 deletions

18
gtsam.h
View File

@ -1364,7 +1364,7 @@ virtual class Null: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double residual(double error) const;
double loss(double error) const;
};
virtual class Fair: gtsam::noiseModel::mEstimator::Base {
@ -1375,7 +1375,7 @@ virtual class Fair: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double residual(double error) const;
double loss(double error) const;
};
virtual class Huber: gtsam::noiseModel::mEstimator::Base {
@ -1386,7 +1386,7 @@ virtual class Huber: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double residual(double error) const;
double loss(double error) const;
};
virtual class Cauchy: gtsam::noiseModel::mEstimator::Base {
@ -1397,7 +1397,7 @@ virtual class Cauchy: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double residual(double error) const;
double loss(double error) const;
};
virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
@ -1408,7 +1408,7 @@ virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double residual(double error) const;
double loss(double error) const;
};
virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
@ -1419,7 +1419,7 @@ virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double residual(double error) const;
double loss(double error) const;
};
virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base {
@ -1430,7 +1430,7 @@ virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double residual(double error) const;
double loss(double error) const;
};
virtual class DCS: gtsam::noiseModel::mEstimator::Base {
@ -1441,7 +1441,7 @@ virtual class DCS: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double residual(double error) const;
double loss(double error) const;
};
virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
@ -1452,7 +1452,7 @@ virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
void serializable() const;
double weight(double error) const;
double residual(double error) const;
double loss(double error) const;
};
}///\namespace mEstimator

View File

@ -141,7 +141,7 @@ double Fair::weight(double error) const {
return 1.0 / (1.0 + std::abs(error) / c_);
}
double Fair::residual(double error) const {
double Fair::loss(double error) const {
const double absError = std::abs(error);
const double normalizedError = absError / c_;
const double c_2 = c_ * c_;
@ -175,7 +175,7 @@ double Huber::weight(double error) const {
return (absError <= k_) ? (1.0) : (k_ / absError);
}
double Huber::residual(double error) const {
double Huber::loss(double error) const {
const double absError = std::abs(error);
if (absError <= k_) { // |x| <= k
return error*error / 2;
@ -212,7 +212,7 @@ double Cauchy::weight(double error) const {
return ksquared_ / (ksquared_ + error*error);
}
double Cauchy::residual(double error) const {
double Cauchy::loss(double error) const {
const double val = std::log1p(error * error / ksquared_);
return ksquared_ * val * 0.5;
}
@ -249,7 +249,7 @@ double Tukey::weight(double error) const {
return 0.0;
}
double Tukey::residual(double error) const {
double Tukey::loss(double error) const {
double absError = std::abs(error);
if (absError <= c_) {
const double one_minus_xc2 = 1.0 - error*error/csquared_;
@ -285,7 +285,7 @@ double Welsch::weight(double error) const {
return std::exp(-xc2);
}
double Welsch::residual(double error) const {
double Welsch::loss(double error) const {
const double xc2 = (error*error)/csquared_;
return csquared_ * 0.5 * -std::expm1(-xc2);
}
@ -318,7 +318,7 @@ double GemanMcClure::weight(double error) const {
return c4/(c2error*c2error);
}
double GemanMcClure::residual(double error) const {
double GemanMcClure::loss(double error) const {
const double c2 = c_*c_;
const double error2 = error*error;
return 0.5 * (c2 * error2) / (c2 + error2);
@ -356,7 +356,7 @@ double DCS::weight(double error) const {
return 1.0;
}
double DCS::residual(double error) const {
double DCS::loss(double error) const {
// This is the simplified version of Eq 9 from (Agarwal13icra)
// after you simplify and cancel terms.
const double e2 = error*error;
@ -400,7 +400,7 @@ double L2WithDeadZone::weight(double error) const {
else return (k_+error)/error;
}
double L2WithDeadZone::residual(double error) const {
double L2WithDeadZone::loss(double error) const {
const double abs_error = std::abs(error);
return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error);
}

View File

@ -36,12 +36,12 @@ namespace noiseModel {
* The mEstimator name space contains all robust error functions.
* It mirrors the exposition at
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* which talks about minimizing \sum \rho(r_i), where \rho is a residual function of choice.
* which talks about minimizing \sum \rho(r_i), where \rho is a loss function of choice.
*
* To illustrate, let's consider the least-squares (L2), L1, and Huber estimators as examples:
*
* Name Symbol Least-Squares L1-norm Huber
* Residual \rho(x) 0.5*x^2 |x| 0.5*x^2 if |x|<k, 0.5*k^2 + k|x-k| otherwise
* Loss \rho(x) 0.5*x^2 |x| 0.5*x^2 if |x|<k, 0.5*k^2 + k|x-k| otherwise
* Derivative \phi(x) x sgn(x) x if |x|<k, k sgn(x) otherwise
* Weight w(x)=\phi(x)/x 1 1/|x| 1 if |x|<k, k/|x| otherwise
*
@ -75,17 +75,17 @@ class GTSAM_EXPORT Base {
* the quadratic function for an L2 penalty, the absolute value function for
* an L1 penalty, etc.
*
* TODO(mikebosse): When the residual function has as input the norm of the
* TODO(mikebosse): When the loss function has as input the norm of the
* residual vector, then it prevents implementations of asymmeric loss
* functions. It would be better for this function to accept the vector and
* internally call the norm if necessary.
*/
virtual double residual(double error) const { return 0; };
virtual double loss(double error) const { return 0; };
/*
* This method is responsible for returning the weight function for a given
* amount of error. The weight function is related to the analytic derivative
* of the residual function. See
* of the loss function. See
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* for details. This method is required when optimizing cost functions with
* robust penalties using iteratively re-weighted least squares.
@ -123,7 +123,7 @@ class GTSAM_EXPORT Base {
}
};
/// Null class is not robust so is a Gaussian ?
/// Null class is the L2 norm and not robust; equivalent to a Gaussian noise model with no loss function.
class GTSAM_EXPORT Null : public Base {
public:
typedef boost::shared_ptr<Null> shared_ptr;
@ -131,7 +131,7 @@ class GTSAM_EXPORT Null : public Base {
Null(const ReweightScheme reweight = Block) : Base(reweight) {}
~Null() {}
double weight(double /*error*/) const { return 1.0; }
double residual(double error) const { return error; }
double loss(double error) const { return 0.5 * error * error; }
void print(const std::string &s) const;
bool equals(const Base & /*expected*/, double /*tol*/) const { return true; }
static shared_ptr Create();
@ -155,7 +155,7 @@ class GTSAM_EXPORT Fair : public Base {
Fair(double c = 1.3998, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
double loss(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double c, const ReweightScheme reweight = Block);
@ -180,7 +180,7 @@ class GTSAM_EXPORT Huber : public Base {
Huber(double k = 1.345, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
double loss(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
@ -210,7 +210,7 @@ class GTSAM_EXPORT Cauchy : public Base {
Cauchy(double k = 0.1, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
double loss(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
@ -235,7 +235,7 @@ class GTSAM_EXPORT Tukey : public Base {
Tukey(double c = 4.6851, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
double loss(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
@ -260,7 +260,7 @@ class GTSAM_EXPORT Welsch : public Base {
Welsch(double c = 2.9846, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
double loss(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
@ -296,7 +296,7 @@ class GTSAM_EXPORT GemanMcClure : public Base {
GemanMcClure(double c = 1.0, const ReweightScheme reweight = Block);
~GemanMcClure() {}
double weight(double error) const override;
double residual(double error) const override;
double loss(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
@ -326,7 +326,7 @@ class GTSAM_EXPORT DCS : public Base {
DCS(double c = 1.0, const ReweightScheme reweight = Block);
~DCS() {}
double weight(double error) const override;
double residual(double error) const override;
double loss(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
@ -359,7 +359,7 @@ class GTSAM_EXPORT L2WithDeadZone : public Base {
L2WithDeadZone(double k = 1.0, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
double loss(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);

View File

@ -371,7 +371,7 @@ Vector Constrained::whiten(const Vector& v) const {
}
/* ************************************************************************* */
double Constrained::distance(const Vector& v) const {
double Constrained::squaredDistance(const Vector& v) const {
Vector w = Diagonal::whiten(v); // get noisemodel for constrained elements
for (size_t i=0; i<dim_; ++i) // add mu weights on constrained variables
if (constrained(i)) // whiten makes constrained variables zero

View File

@ -90,7 +90,7 @@ namespace gtsam {
/// Unwhiten an error vector.
virtual Vector unwhiten(const Vector& v) const = 0;
virtual double distance(const Vector& v) const = 0;
virtual double squaredDistance(const Vector& v) const = 0;
virtual void WhitenSystem(std::vector<Matrix>& A, Vector& b) const = 0;
virtual void WhitenSystem(Matrix& A, Vector& b) const = 0;
@ -211,7 +211,7 @@ namespace gtsam {
*/
virtual double Mahalanobis(const Vector& v) const;
inline virtual double distance(const Vector& v) const {
inline virtual double squaredDistance(const Vector& v) const {
return Mahalanobis(v);
}
@ -460,11 +460,11 @@ namespace gtsam {
}
/**
* The distance function for a constrained noisemodel,
* The squaredDistance function for a constrained noisemodel,
* for non-constrained versions, uses sigmas, otherwise
* uses the penalty function with mu
*/
virtual double distance(const Vector& v) const;
virtual double squaredDistance(const Vector& v) const;
/** Fully constrained variations */
static shared_ptr All(size_t dim) {
@ -692,11 +692,9 @@ namespace gtsam {
{ Vector b; Matrix B=A; this->WhitenSystem(B,b); return B; }
inline virtual Vector unwhiten(const Vector& /*v*/) const
{ throw std::invalid_argument("unwhiten is not currently supported for robust noise models."); }
// Fold the use of the m-estimator residual(...) function into distance(...)
inline virtual double distance(const Vector& v) const
{ return robust_->residual(this->unweightedWhiten(v).norm()); }
inline virtual double distance_non_whitened(const Vector& v) const
{ return robust_->residual(v.norm()); }
// Fold the use of the m-estimator loss(...) function into squaredDistance(...)
inline virtual double squaredDistance(const Vector& v) const
{ return 2.0 * robust_->loss(this->unweightedWhiten(v).norm()); }
// TODO: these are really robust iterated re-weighting support functions
virtual void WhitenSystem(Vector& b) const;
virtual void WhitenSystem(std::vector<Matrix>& A, Vector& b) const;

View File

@ -68,10 +68,10 @@ TEST(NoiseModel, constructors)
for(Gaussian::shared_ptr mi: m)
EXPECT(assert_equal(unwhitened,mi->unwhiten(whitened)));
// test Mahalanobis distance
double distance = 5*5+10*10+15*15;
// test Mahalanobis squaredDistance
double squaredDistance = 5*5+10*10+15*15;
for(Gaussian::shared_ptr mi: m)
DOUBLES_EQUAL(distance,mi->Mahalanobis(unwhitened),1e-9);
DOUBLES_EQUAL(squaredDistance,mi->Mahalanobis(unwhitened),1e-9);
// test R matrix
for(Gaussian::shared_ptr mi: m)
@ -182,8 +182,8 @@ TEST(NoiseModel, ConstrainedMixed )
EXPECT(assert_equal(Vector3(0.5, 1.0, 0.5),d->whiten(infeasible)));
EXPECT(assert_equal(Vector3(0.5, 0.0, 0.5),d->whiten(feasible)));
DOUBLES_EQUAL(1000.0 + 0.25 + 0.25,d->distance(infeasible),1e-9);
DOUBLES_EQUAL(0.5,d->distance(feasible),1e-9);
DOUBLES_EQUAL(1000.0 + 0.25 + 0.25,d->squaredDistance(infeasible),1e-9);
DOUBLES_EQUAL(0.5,d->squaredDistance(feasible),1e-9);
}
/* ************************************************************************* */
@ -197,8 +197,8 @@ TEST(NoiseModel, ConstrainedAll )
EXPECT(assert_equal(Vector3(1.0, 1.0, 1.0),i->whiten(infeasible)));
EXPECT(assert_equal(Vector3(0.0, 0.0, 0.0),i->whiten(feasible)));
DOUBLES_EQUAL(1000.0 * 3.0,i->distance(infeasible),1e-9);
DOUBLES_EQUAL(0.0,i->distance(feasible),1e-9);
DOUBLES_EQUAL(1000.0 * 3.0,i->squaredDistance(infeasible),1e-9);
DOUBLES_EQUAL(0.0,i->squaredDistance(feasible),1e-9);
}
/* ************************************************************************* */
@ -467,10 +467,10 @@ TEST(NoiseModel, robustFunctionFair)
DOUBLES_EQUAL(0.3333333333333333, fair->weight(error3), 1e-8);
DOUBLES_EQUAL(0.8333333333333333, fair->weight(error4), 1e-8);
DOUBLES_EQUAL(0.441961080151135, fair->residual(error1), 1e-8);
DOUBLES_EQUAL(22.534692783297260, fair->residual(error2), 1e-8);
DOUBLES_EQUAL(22.534692783297260, fair->residual(error3), 1e-8);
DOUBLES_EQUAL(0.441961080151135, fair->residual(error4), 1e-8);
DOUBLES_EQUAL(0.441961080151135, fair->loss(error1), 1e-8);
DOUBLES_EQUAL(22.534692783297260, fair->loss(error2), 1e-8);
DOUBLES_EQUAL(22.534692783297260, fair->loss(error3), 1e-8);
DOUBLES_EQUAL(0.441961080151135, fair->loss(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionHuber)
@ -483,10 +483,10 @@ TEST(NoiseModel, robustFunctionHuber)
DOUBLES_EQUAL(0.5, huber->weight(error3), 1e-8);
DOUBLES_EQUAL(1.0, huber->weight(error4), 1e-8);
DOUBLES_EQUAL(0.5000, huber->residual(error1), 1e-8);
DOUBLES_EQUAL(37.5000, huber->residual(error2), 1e-8);
DOUBLES_EQUAL(37.5000, huber->residual(error3), 1e-8);
DOUBLES_EQUAL(0.5000, huber->residual(error4), 1e-8);
DOUBLES_EQUAL(0.5000, huber->loss(error1), 1e-8);
DOUBLES_EQUAL(37.5000, huber->loss(error2), 1e-8);
DOUBLES_EQUAL(37.5000, huber->loss(error3), 1e-8);
DOUBLES_EQUAL(0.5000, huber->loss(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionCauchy)
@ -499,10 +499,10 @@ TEST(NoiseModel, robustFunctionCauchy)
DOUBLES_EQUAL(0.2000, cauchy->weight(error3), 1e-8);
DOUBLES_EQUAL(0.961538461538461, cauchy->weight(error4), 1e-8);
DOUBLES_EQUAL(0.490258914416017, cauchy->residual(error1), 1e-8);
DOUBLES_EQUAL(20.117973905426254, cauchy->residual(error2), 1e-8);
DOUBLES_EQUAL(20.117973905426254, cauchy->residual(error3), 1e-8);
DOUBLES_EQUAL(0.490258914416017, cauchy->residual(error4), 1e-8);
DOUBLES_EQUAL(0.490258914416017, cauchy->loss(error1), 1e-8);
DOUBLES_EQUAL(20.117973905426254, cauchy->loss(error2), 1e-8);
DOUBLES_EQUAL(20.117973905426254, cauchy->loss(error3), 1e-8);
DOUBLES_EQUAL(0.490258914416017, cauchy->loss(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionGemanMcClure)
@ -514,10 +514,10 @@ TEST(NoiseModel, robustFunctionGemanMcClure)
DOUBLES_EQUAL(9.80296e-5, gmc->weight(error3), 1e-8);
DOUBLES_EQUAL(0.25 , gmc->weight(error4), 1e-8);
DOUBLES_EQUAL(0.2500, gmc->residual(error1), 1e-8);
DOUBLES_EQUAL(0.495049504950495, gmc->residual(error2), 1e-8);
DOUBLES_EQUAL(0.495049504950495, gmc->residual(error3), 1e-8);
DOUBLES_EQUAL(0.2500, gmc->residual(error4), 1e-8);
DOUBLES_EQUAL(0.2500, gmc->loss(error1), 1e-8);
DOUBLES_EQUAL(0.495049504950495, gmc->loss(error2), 1e-8);
DOUBLES_EQUAL(0.495049504950495, gmc->loss(error3), 1e-8);
DOUBLES_EQUAL(0.2500, gmc->loss(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionWelsch)
@ -530,10 +530,10 @@ TEST(NoiseModel, robustFunctionWelsch)
DOUBLES_EQUAL(0.018315638888734, welsch->weight(error3), 1e-8);
DOUBLES_EQUAL(0.960789439152323, welsch->weight(error4), 1e-8);
DOUBLES_EQUAL(0.490132010595960, welsch->residual(error1), 1e-8);
DOUBLES_EQUAL(12.271054513890823, welsch->residual(error2), 1e-8);
DOUBLES_EQUAL(12.271054513890823, welsch->residual(error3), 1e-8);
DOUBLES_EQUAL(0.490132010595960, welsch->residual(error4), 1e-8);
DOUBLES_EQUAL(0.490132010595960, welsch->loss(error1), 1e-8);
DOUBLES_EQUAL(12.271054513890823, welsch->loss(error2), 1e-8);
DOUBLES_EQUAL(12.271054513890823, welsch->loss(error3), 1e-8);
DOUBLES_EQUAL(0.490132010595960, welsch->loss(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionTukey)
@ -546,10 +546,10 @@ TEST(NoiseModel, robustFunctionTukey)
DOUBLES_EQUAL(0.0, tukey->weight(error3), 1e-8);
DOUBLES_EQUAL(0.9216, tukey->weight(error4), 1e-8);
DOUBLES_EQUAL(0.480266666666667, tukey->residual(error1), 1e-8);
DOUBLES_EQUAL(4.166666666666667, tukey->residual(error2), 1e-8);
DOUBLES_EQUAL(4.166666666666667, tukey->residual(error3), 1e-8);
DOUBLES_EQUAL(0.480266666666667, tukey->residual(error4), 1e-8);
DOUBLES_EQUAL(0.480266666666667, tukey->loss(error1), 1e-8);
DOUBLES_EQUAL(4.166666666666667, tukey->loss(error2), 1e-8);
DOUBLES_EQUAL(4.166666666666667, tukey->loss(error3), 1e-8);
DOUBLES_EQUAL(0.480266666666667, tukey->loss(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionDCS)
@ -560,8 +560,8 @@ TEST(NoiseModel, robustFunctionDCS)
DOUBLES_EQUAL(1.0 , dcs->weight(error1), 1e-8);
DOUBLES_EQUAL(0.00039211, dcs->weight(error2), 1e-8);
DOUBLES_EQUAL(0.5 , dcs->residual(error1), 1e-8);
DOUBLES_EQUAL(0.9900990099, dcs->residual(error2), 1e-8);
DOUBLES_EQUAL(0.5 , dcs->loss(error1), 1e-8);
DOUBLES_EQUAL(0.9900990099, dcs->loss(error2), 1e-8);
}
TEST(NoiseModel, robustFunctionL2WithDeadZone)
@ -576,12 +576,12 @@ TEST(NoiseModel, robustFunctionL2WithDeadZone)
DOUBLES_EQUAL(0.00990099009, lsdz->weight(e4), 1e-8);
DOUBLES_EQUAL(0.9, lsdz->weight(e5), 1e-8);
DOUBLES_EQUAL(40.5, lsdz->residual(e0), 1e-8);
DOUBLES_EQUAL(0.00005, lsdz->residual(e1), 1e-8);
DOUBLES_EQUAL(0.0, lsdz->residual(e2), 1e-8);
DOUBLES_EQUAL(0.0, lsdz->residual(e3), 1e-8);
DOUBLES_EQUAL(0.00005, lsdz->residual(e4), 1e-8);
DOUBLES_EQUAL(40.5, lsdz->residual(e5), 1e-8);
DOUBLES_EQUAL(40.5, lsdz->loss(e0), 1e-8);
DOUBLES_EQUAL(0.00005, lsdz->loss(e1), 1e-8);
DOUBLES_EQUAL(0.0, lsdz->loss(e2), 1e-8);
DOUBLES_EQUAL(0.0, lsdz->loss(e3), 1e-8);
DOUBLES_EQUAL(0.00005, lsdz->loss(e4), 1e-8);
DOUBLES_EQUAL(40.5, lsdz->loss(e5), 1e-8);
}
/* ************************************************************************* */
@ -673,12 +673,12 @@ TEST(NoiseModel, robustNoiseL2WithDeadZone)
* evaluating the total penalty. This bug causes the test below to fail, so I'm leaving it
* commented out until the underlying bug in GTSAM is fixed.
*
* for (int i = 0; i < 5; i++) {
* Vector3 error = Vector3(i, 0, 0);
* DOUBLES_EQUAL(0.5*max(0,i-1)*max(0,i-1), robust->distance(error), 1e-8);
* }
*/
for (int i = 0; i < 5; i++) {
Vector3 error = Vector3(i, 0, 0);
DOUBLES_EQUAL(0.5*max(0,i-1)*max(0,i-1), robust->squaredDistance(error), 1e-8);
}
}
/* ************************************************************************* */
@ -687,7 +687,7 @@ TEST(NoiseModel, robustNoiseL2WithDeadZone)
EQUALITY(cov, gaussian->covariance());\
EXPECT(assert_equal(white, gaussian->whiten(e)));\
EXPECT(assert_equal(e, gaussian->unwhiten(white)));\
EXPECT_DOUBLES_EQUAL(251, gaussian->distance(e), 1e-9);\
EXPECT_DOUBLES_EQUAL(251, gaussian->squaredDistance(e), 1e-9);\
Matrix A = R.inverse(); Vector b = e;\
gaussian->WhitenSystem(A, b);\
EXPECT(assert_equal(I, A));\

View File

@ -106,7 +106,7 @@ double NoiseModelFactor::weight(const Values& c) const {
if (noiseModel_) {
const Vector b = unwhitenedError(c);
check(noiseModel_, b.size());
return 0.5 * noiseModel_->weight(b);
return noiseModel_->weight(b);
}
else
return 1.0;
@ -121,7 +121,7 @@ double NoiseModelFactor::error(const Values& c) const {
const Vector b = unwhitenedError(c);
check(noiseModel_, b.size());
if (noiseModel_)
return 0.5 * noiseModel_->distance(b);
return 0.5 * noiseModel_->squaredDistance(b);
else
return 0.5 * b.squaredNorm();
} else {

View File

@ -48,7 +48,7 @@ function plot_m_estimator(x, model, plot_title, fig_id, filename)
rho = zeros(size(x));
for i = 1:size(x, 2)
w(i) = model.weight(x(i));
rho(i) = model.residual(x(i));
rho(i) = model.loss(x(i));
end
psi = w .* x;