Merge pull request #117 from borglab/feature/wrap-mestimator-weight

Wrap mEstimators
release/4.3a0
Varun Agrawal 2019-10-10 16:55:26 -04:00 committed by GitHub
commit a4ac57c5b8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 383 additions and 132 deletions

58
gtsam.h
View File

@ -1362,6 +1362,9 @@ virtual class Null: gtsam::noiseModel::mEstimator::Base {
// enabling serialization functionality
void serializable() const;
double weight(double error) const;
double residual(double error) const;
};
virtual class Fair: gtsam::noiseModel::mEstimator::Base {
@ -1370,6 +1373,9 @@ virtual class Fair: gtsam::noiseModel::mEstimator::Base {
// enabling serialization functionality
void serializable() const;
double weight(double error) const;
double residual(double error) const;
};
virtual class Huber: gtsam::noiseModel::mEstimator::Base {
@ -1378,6 +1384,20 @@ virtual class Huber: gtsam::noiseModel::mEstimator::Base {
// enabling serialization functionality
void serializable() const;
double weight(double error) const;
double residual(double error) const;
};
virtual class Cauchy: gtsam::noiseModel::mEstimator::Base {
Cauchy(double k);
static gtsam::noiseModel::mEstimator::Cauchy* Create(double k);
// enabling serialization functionality
void serializable() const;
double weight(double error) const;
double residual(double error) const;
};
virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
@ -1386,6 +1406,9 @@ virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
// enabling serialization functionality
void serializable() const;
double weight(double error) const;
double residual(double error) const;
};
virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
@ -1394,8 +1417,43 @@ virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
// enabling serialization functionality
void serializable() const;
double weight(double error) const;
double residual(double error) const;
};
virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base {
GemanMcClure(double c);
static gtsam::noiseModel::mEstimator::GemanMcClure* Create(double c);
// enabling serialization functionality
void serializable() const;
double weight(double error) const;
double residual(double error) const;
};
virtual class DCS: gtsam::noiseModel::mEstimator::Base {
DCS(double c);
static gtsam::noiseModel::mEstimator::DCS* Create(double c);
// enabling serialization functionality
void serializable() const;
double weight(double error) const;
double residual(double error) const;
};
virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
L2WithDeadZone(double k);
static gtsam::noiseModel::mEstimator::L2WithDeadZone* Create(double k);
// enabling serialization functionality
void serializable() const;
double weight(double error) const;
double residual(double error) const;
};
}///\namespace mEstimator

View File

@ -718,15 +718,26 @@ void Null::print(const std::string &s="") const
Null::shared_ptr Null::Create()
{ return shared_ptr(new Null()); }
/* ************************************************************************* */
// Fair
/* ************************************************************************* */
Fair::Fair(double c, const ReweightScheme reweight) : Base(reweight), c_(c) {
if (c_ <= 0) {
throw runtime_error("mEstimator Fair takes only positive double in constructor.");
}
}
/* ************************************************************************* */
// Fair
/* ************************************************************************* */
double Fair::weight(double error) const {
return 1.0 / (1.0 + std::abs(error) / c_);
}
double Fair::residual(double error) const {
const double absError = std::abs(error);
const double normalizedError = absError / c_;
const double c_2 = c_ * c_;
return c_2 * (normalizedError - std::log(1 + normalizedError));
}
void Fair::print(const std::string &s="") const
{ cout << s << "fair (" << c_ << ")" << endl; }
@ -750,6 +761,20 @@ Huber::Huber(double k, const ReweightScheme reweight) : Base(reweight), k_(k) {
}
}
double Huber::weight(double error) const {
const double absError = std::abs(error);
return (absError <= k_) ? (1.0) : (k_ / absError);
}
double Huber::residual(double error) const {
const double absError = std::abs(error);
if (absError <= k_) { // |x| <= k
return error*error / 2;
} else { // |x| > k
return k_ * (absError - (k_/2));
}
}
void Huber::print(const std::string &s="") const {
cout << s << "huber (" << k_ << ")" << endl;
}
@ -774,6 +799,16 @@ Cauchy::Cauchy(double k, const ReweightScheme reweight) : Base(reweight), k_(k),
}
}
double Cauchy::weight(double error) const {
return ksquared_ / (ksquared_ + error*error);
}
double Cauchy::residual(double error) const {
const double xc2 = error / k_;
const double val = std::log(1 + (xc2*xc2));
return ksquared_ * val * 0.5;
}
void Cauchy::print(const std::string &s="") const {
cout << s << "cauchy (" << k_ << ")" << endl;
}
@ -791,7 +826,31 @@ Cauchy::shared_ptr Cauchy::Create(double c, const ReweightScheme reweight) {
/* ************************************************************************* */
// Tukey
/* ************************************************************************* */
Tukey::Tukey(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {}
Tukey::Tukey(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {
if (c <= 0) {
throw runtime_error("mEstimator Tukey takes only positive double in constructor.");
}
}
double Tukey::weight(double error) const {
if (std::abs(error) <= c_) {
const double xc2 = error*error/csquared_;
return (1.0-xc2)*(1.0-xc2);
}
return 0.0;
}
double Tukey::residual(double error) const {
double absError = std::abs(error);
if (absError <= c_) {
const double xc2 = error*error/csquared_;
const double t = (1 - xc2)*(1 - xc2)*(1 - xc2);
return csquared_ * (1 - t) / 6.0;
} else {
return csquared_ / 6.0;
}
}
void Tukey::print(const std::string &s="") const {
std::cout << s << ": Tukey (" << c_ << ")" << std::endl;
@ -810,8 +869,19 @@ Tukey::shared_ptr Tukey::Create(double c, const ReweightScheme reweight) {
/* ************************************************************************* */
// Welsch
/* ************************************************************************* */
Welsch::Welsch(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {}
double Welsch::weight(double error) const {
const double xc2 = (error*error)/csquared_;
return std::exp(-xc2);
}
double Welsch::residual(double error) const {
const double xc2 = (error*error)/csquared_;
return csquared_ * 0.5 * (1 - std::exp(-xc2) );
}
void Welsch::print(const std::string &s="") const {
std::cout << s << ": Welsch (" << c_ << ")" << std::endl;
}
@ -826,24 +896,6 @@ Welsch::shared_ptr Welsch::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Welsch(c, reweight));
}
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
Welsh::Welsh(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {}
void Welsh::print(const std::string &s="") const {
std::cout << s << ": Welsh (" << c_ << ")" << std::endl;
}
bool Welsh::equals(const Base &expected, double tol) const {
const Welsh* p = dynamic_cast<const Welsh*>(&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_) < tol;
}
Welsh::shared_ptr Welsh::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Welsh(c, reweight));
}
#endif
/* ************************************************************************* */
// GemanMcClure
/* ************************************************************************* */
@ -858,6 +910,12 @@ double GemanMcClure::weight(double error) const {
return c4/(c2error*c2error);
}
double GemanMcClure::residual(double error) const {
const double c2 = c_*c_;
const double error2 = error*error;
return 0.5 * (c2 * error2) / (c2 + error2);
}
void GemanMcClure::print(const std::string &s="") const {
std::cout << s << ": Geman-McClure (" << c_ << ")" << std::endl;
}
@ -890,6 +948,16 @@ double DCS::weight(double error) const {
return 1.0;
}
double DCS::residual(double error) const {
// This is the simplified version of Eq 9 from (Agarwal13icra)
// after you simplify and cancel terms.
const double e2 = error*error;
const double e4 = e2*e2;
const double c2 = c_*c_;
return (c2*e2 + c_*e4) / ((e2 + c_)*(e2 + c_));
}
void DCS::print(const std::string &s="") const {
std::cout << s << ": DCS (" << c_ << ")" << std::endl;
}
@ -908,12 +976,27 @@ DCS::shared_ptr DCS::Create(double c, const ReweightScheme reweight) {
// L2WithDeadZone
/* ************************************************************************* */
L2WithDeadZone::L2WithDeadZone(double k, const ReweightScheme reweight) : Base(reweight), k_(k) {
L2WithDeadZone::L2WithDeadZone(double k, const ReweightScheme reweight)
: Base(reweight), k_(k) {
if (k_ <= 0) {
throw runtime_error("mEstimator L2WithDeadZone takes only positive double in constructor.");
}
}
double L2WithDeadZone::weight(double error) const {
// note that this code is slightly uglier than residual, because there are three distinct
// cases to handle (left of deadzone, deadzone, right of deadzone) instead of the two
// cases (deadzone, non-deadzone) in residual.
if (std::abs(error) <= k_) return 0.0;
else if (error > k_) return (-k_+error)/error;
else return (k_+error)/error;
}
double L2WithDeadZone::residual(double error) const {
const double abs_error = std::abs(error);
return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error);
}
void L2WithDeadZone::print(const std::string &s="") const {
std::cout << s << ": L2WithDeadZone (" << k_ << ")" << std::endl;
}

View File

@ -635,9 +635,9 @@ namespace gtsam {
* To illustrate, let's consider the least-squares (L2), L1, and Huber estimators as examples:
*
* Name Symbol Least-Squares L1-norm Huber
* Residual \rho(x) 0.5*x^2 |x| 0.5*x^2 if x<k, 0.5*k^2 + k|x-k| otherwise
* Derivative \phi(x) x sgn(x) x if x<k, k sgn(x) otherwise
* Weight w(x)=\phi(x)/x 1 1/|x| 1 if x<k, k/|x| otherwise
* Residual \rho(x) 0.5*x^2 |x| 0.5*x^2 if |x|<k, 0.5*k^2 + k|x-k| otherwise
* Derivative \phi(x) x sgn(x) x if |x|<k, k sgn(x) otherwise
* Weight w(x)=\phi(x)/x 1 1/|x| 1 if |x|<k, k/|x| otherwise
*
* With these definitions, D(\rho(x), p) = \phi(x) D(x,p) = w(x) x D(x,p) = w(x) D(L2(x), p),
* and hence we can solve the equivalent weighted least squares problem \sum w(r_i) \rho(r_i)
@ -725,10 +725,11 @@ namespace gtsam {
typedef boost::shared_ptr<Null> shared_ptr;
Null(const ReweightScheme reweight = Block) : Base(reweight) {}
virtual ~Null() {}
virtual double weight(double /*error*/) const { return 1.0; }
virtual void print(const std::string &s) const;
virtual bool equals(const Base& /*expected*/, double /*tol*/) const { return true; }
~Null() {}
double weight(double /*error*/) const { return 1.0; }
double residual(double error) const { return error; }
void print(const std::string &s) const;
bool equals(const Base& /*expected*/, double /*tol*/) const { return true; }
static shared_ptr Create() ;
private:
@ -749,9 +750,8 @@ namespace gtsam {
typedef boost::shared_ptr<Fair> shared_ptr;
Fair(double c = 1.3998, const ReweightScheme reweight = Block);
double weight(double error) const {
return 1.0 / (1.0 + std::abs(error) / c_);
}
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const;
bool equals(const Base& expected, double tol=1e-8) const;
static shared_ptr Create(double c, const ReweightScheme reweight = Block) ;
@ -775,10 +775,8 @@ namespace gtsam {
typedef boost::shared_ptr<Huber> shared_ptr;
Huber(double k = 1.345, const ReweightScheme reweight = Block);
double weight(double error) const {
double absError = std::abs(error);
return (absError < k_) ? (1.0) : (k_ / absError);
}
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const;
bool equals(const Base& expected, double tol=1e-8) const;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
@ -806,9 +804,8 @@ namespace gtsam {
typedef boost::shared_ptr<Cauchy> shared_ptr;
Cauchy(double k = 0.1, const ReweightScheme reweight = Block);
double weight(double error) const {
return ksquared_ / (ksquared_ + error*error);
}
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const;
bool equals(const Base& expected, double tol=1e-8) const;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
@ -832,13 +829,8 @@ namespace gtsam {
typedef boost::shared_ptr<Tukey> shared_ptr;
Tukey(double c = 4.6851, const ReweightScheme reweight = Block);
double weight(double error) const {
if (std::abs(error) <= c_) {
double xc2 = error*error/csquared_;
return (1.0-xc2)*(1.0-xc2);
}
return 0.0;
}
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const;
bool equals(const Base& expected, double tol=1e-8) const;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
@ -862,10 +854,8 @@ namespace gtsam {
typedef boost::shared_ptr<Welsch> shared_ptr;
Welsch(double c = 2.9846, const ReweightScheme reweight = Block);
double weight(double error) const {
double xc2 = (error*error)/csquared_;
return std::exp(-xc2);
}
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const;
bool equals(const Base& expected, double tol=1e-8) const;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
@ -885,31 +875,7 @@ namespace gtsam {
// Welsh implements the "Welsch" robust error model (Zhang97ivc)
// This was misspelled in previous versions of gtsam and should be
// removed in the future.
class GTSAM_EXPORT Welsh : public Base {
protected:
double c_, csquared_;
public:
typedef boost::shared_ptr<Welsh> shared_ptr;
Welsh(double c = 2.9846, const ReweightScheme reweight = Block);
double weight(double error) const {
double xc2 = (error*error)/csquared_;
return std::exp(-xc2);
}
void print(const std::string &s) const;
bool equals(const Base& expected, double tol=1e-8) const;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(c_);
}
};
using Welsh = Welsch;
#endif
/// GemanMcClure implements the "Geman-McClure" robust error model
@ -923,10 +889,11 @@ namespace gtsam {
typedef boost::shared_ptr<GemanMcClure> shared_ptr;
GemanMcClure(double c = 1.0, const ReweightScheme reweight = Block);
virtual ~GemanMcClure() {}
virtual double weight(double error) const;
virtual void print(const std::string &s) const;
virtual bool equals(const Base& expected, double tol=1e-8) const;
~GemanMcClure() {}
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const;
bool equals(const Base& expected, double tol=1e-8) const;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
protected:
@ -952,10 +919,11 @@ namespace gtsam {
typedef boost::shared_ptr<DCS> shared_ptr;
DCS(double c = 1.0, const ReweightScheme reweight = Block);
virtual ~DCS() {}
virtual double weight(double error) const;
virtual void print(const std::string &s) const;
virtual bool equals(const Base& expected, double tol=1e-8) const;
~DCS() {}
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const;
bool equals(const Base& expected, double tol=1e-8) const;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
protected:
@ -983,19 +951,9 @@ namespace gtsam {
public:
typedef boost::shared_ptr<L2WithDeadZone> shared_ptr;
L2WithDeadZone(double k, const ReweightScheme reweight = Block);
double residual(double error) const {
const double abs_error = std::abs(error);
return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error);
}
double weight(double error) const {
// note that this code is slightly uglier than above, because there are three distinct
// cases to handle (left of deadzone, deadzone, right of deadzone) instead of the two
// cases (deadzone, non-deadzone) above.
if (std::abs(error) <= k_) return 0.0;
else if (error > k_) return (-k_+error)/error;
else return (k_+error)/error;
}
L2WithDeadZone(double k = 1.0, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const;
bool equals(const Base& expected, double tol=1e-8) const;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);

View File

@ -457,6 +457,22 @@ TEST(NoiseModel, WhitenInPlace)
* penalties using iteratively re-weighted least squares.
*/
TEST(NoiseModel, robustFunctionFair)
{
const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
const mEstimator::Fair::shared_ptr fair = mEstimator::Fair::Create(k);
DOUBLES_EQUAL(0.8333333333333333, fair->weight(error1), 1e-8);
DOUBLES_EQUAL(0.3333333333333333, fair->weight(error2), 1e-8);
// Test negative value to ensure we take absolute value of error.
DOUBLES_EQUAL(0.3333333333333333, fair->weight(error3), 1e-8);
DOUBLES_EQUAL(0.8333333333333333, fair->weight(error4), 1e-8);
DOUBLES_EQUAL(0.441961080151135, fair->residual(error1), 1e-8);
DOUBLES_EQUAL(22.534692783297260, fair->residual(error2), 1e-8);
DOUBLES_EQUAL(22.534692783297260, fair->residual(error3), 1e-8);
DOUBLES_EQUAL(0.441961080151135, fair->residual(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionHuber)
{
const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
@ -466,26 +482,86 @@ TEST(NoiseModel, robustFunctionHuber)
// Test negative value to ensure we take absolute value of error.
DOUBLES_EQUAL(0.5, huber->weight(error3), 1e-8);
DOUBLES_EQUAL(1.0, huber->weight(error4), 1e-8);
DOUBLES_EQUAL(0.5000, huber->residual(error1), 1e-8);
DOUBLES_EQUAL(37.5000, huber->residual(error2), 1e-8);
DOUBLES_EQUAL(37.5000, huber->residual(error3), 1e-8);
DOUBLES_EQUAL(0.5000, huber->residual(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionCauchy)
{
const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
const mEstimator::Cauchy::shared_ptr cauchy = mEstimator::Cauchy::Create(k);
DOUBLES_EQUAL(0.961538461538461, cauchy->weight(error1), 1e-8);
DOUBLES_EQUAL(0.2000, cauchy->weight(error2), 1e-8);
// Test negative value to ensure we take absolute value of error.
DOUBLES_EQUAL(0.2000, cauchy->weight(error3), 1e-8);
DOUBLES_EQUAL(0.961538461538461, cauchy->weight(error4), 1e-8);
DOUBLES_EQUAL(0.490258914416017, cauchy->residual(error1), 1e-8);
DOUBLES_EQUAL(20.117973905426254, cauchy->residual(error2), 1e-8);
DOUBLES_EQUAL(20.117973905426254, cauchy->residual(error3), 1e-8);
DOUBLES_EQUAL(0.490258914416017, cauchy->residual(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionGemanMcClure)
{
const double k = 1.0, error1 = 1.0, error2 = 10.0;
const double k = 1.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
const mEstimator::GemanMcClure::shared_ptr gmc = mEstimator::GemanMcClure::Create(k);
const double weight1 = gmc->weight(error1),
weight2 = gmc->weight(error2);
DOUBLES_EQUAL(0.25 , weight1, 1e-8);
DOUBLES_EQUAL(9.80296e-5, weight2, 1e-8);
DOUBLES_EQUAL(0.25 , gmc->weight(error1), 1e-8);
DOUBLES_EQUAL(9.80296e-5, gmc->weight(error2), 1e-8);
DOUBLES_EQUAL(9.80296e-5, gmc->weight(error3), 1e-8);
DOUBLES_EQUAL(0.25 , gmc->weight(error4), 1e-8);
DOUBLES_EQUAL(0.2500, gmc->residual(error1), 1e-8);
DOUBLES_EQUAL(0.495049504950495, gmc->residual(error2), 1e-8);
DOUBLES_EQUAL(0.495049504950495, gmc->residual(error3), 1e-8);
DOUBLES_EQUAL(0.2500, gmc->residual(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionWelsch)
{
const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
const mEstimator::Welsch::shared_ptr welsch = mEstimator::Welsch::Create(k);
DOUBLES_EQUAL(0.960789439152323, welsch->weight(error1), 1e-8);
DOUBLES_EQUAL(0.018315638888734, welsch->weight(error2), 1e-8);
// Test negative value to ensure we take absolute value of error.
DOUBLES_EQUAL(0.018315638888734, welsch->weight(error3), 1e-8);
DOUBLES_EQUAL(0.960789439152323, welsch->weight(error4), 1e-8);
DOUBLES_EQUAL(0.490132010595960, welsch->residual(error1), 1e-8);
DOUBLES_EQUAL(12.271054513890823, welsch->residual(error2), 1e-8);
DOUBLES_EQUAL(12.271054513890823, welsch->residual(error3), 1e-8);
DOUBLES_EQUAL(0.490132010595960, welsch->residual(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionTukey)
{
const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
const mEstimator::Tukey::shared_ptr tukey = mEstimator::Tukey::Create(k);
DOUBLES_EQUAL(0.9216, tukey->weight(error1), 1e-8);
DOUBLES_EQUAL(0.0, tukey->weight(error2), 1e-8);
// Test negative value to ensure we take absolute value of error.
DOUBLES_EQUAL(0.0, tukey->weight(error3), 1e-8);
DOUBLES_EQUAL(0.9216, tukey->weight(error4), 1e-8);
DOUBLES_EQUAL(0.480266666666667, tukey->residual(error1), 1e-8);
DOUBLES_EQUAL(4.166666666666667, tukey->residual(error2), 1e-8);
DOUBLES_EQUAL(4.166666666666667, tukey->residual(error3), 1e-8);
DOUBLES_EQUAL(0.480266666666667, tukey->residual(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionDCS)
{
const double k = 1.0, error1 = 1.0, error2 = 10.0;
const mEstimator::DCS::shared_ptr dcs = mEstimator::DCS::Create(k);
const double weight1 = dcs->weight(error1),
weight2 = dcs->weight(error2);
DOUBLES_EQUAL(1.0 , weight1, 1e-8);
DOUBLES_EQUAL(0.00039211, weight2, 1e-8);
DOUBLES_EQUAL(1.0 , dcs->weight(error1), 1e-8);
DOUBLES_EQUAL(0.00039211, dcs->weight(error2), 1e-8);
DOUBLES_EQUAL(0.5 , dcs->residual(error1), 1e-8);
DOUBLES_EQUAL(0.9900990099, dcs->residual(error2), 1e-8);
}
TEST(NoiseModel, robustFunctionL2WithDeadZone)

View File

@ -0,0 +1,73 @@
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% GTSAM Copyright 2010, Georgia Tech Research Corporation,
% Atlanta, Georgia 30332-0415
% All Rights Reserved
% Authors: Frank Dellaert, et al. (see THANKS for the full author list)
%
% See LICENSE for the license information
%
% @brief Plot visualizations of residuals, residual derivatives, and weights for the various mEstimators.
% @author Varun Agrawal
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% import gtsam.*
close all;
x = linspace(-10, 10, 1000);
%% Define all the mEstimator models and plot them
c = 1.3998;
fairNoiseModel = gtsam.noiseModel.mEstimator.Fair(c);
plot_m_estimator(x, fairNoiseModel, 'Fair', 1, 'fair.png')
c = 1.345;
huberNoiseModel = gtsam.noiseModel.mEstimator.Huber(c);
plot_m_estimator(x, huberNoiseModel, 'Huber', 2, 'huber.png')
c = 0.1;
cauchyNoiseModel = gtsam.noiseModel.mEstimator.Cauchy(c);
plot_m_estimator(x, cauchyNoiseModel, 'Cauchy', 3, 'cauchy.png')
c = 1.0;
gemanmcclureNoiseModel = gtsam.noiseModel.mEstimator.GemanMcClure(c);
plot_m_estimator(x, gemanmcclureNoiseModel, 'Geman-McClure', 4, 'gemanmcclure.png')
c = 2.9846;
welschNoiseModel = gtsam.noiseModel.mEstimator.Welsch(c);
plot_m_estimator(x, welschNoiseModel, 'Welsch', 5, 'welsch.png')
c = 4.6851;
tukeyNoiseModel = gtsam.noiseModel.mEstimator.Tukey(c);
plot_m_estimator(x, tukeyNoiseModel, 'Tukey', 6, 'tukey.png')
%% Plot rho, psi and weights of the mEstimator.
function plot_m_estimator(x, model, plot_title, fig_id, filename)
w = zeros(size(x));
rho = zeros(size(x));
for i = 1:size(x, 2)
w(i) = model.weight(x(i));
rho(i) = model.residual(x(i));
end
psi = w .* x;
figure(fig_id);
subplot(3, 1, 1);
plot(x, rho, 'LineWidth',2);
title('rho function');
xlim([-5, 5]);
subplot(3, 1, 2);
plot(x, psi, 'LineWidth',2);
title('influence function');
xlim([-5, 5]);
subplot(3, 1, 3);
plot(x, w, 'LineWidth',2);
title('weight function');
xlim([-5, 5]);
sgtitle(plot_title, 'FontSize', 26);
saveas(figure(fig_id), filename);
end

View File

@ -159,10 +159,10 @@ namespace example {
namespace impl {
typedef boost::shared_ptr<NonlinearFactor> shared_nlf;
static SharedDiagonal sigma1_0 = noiseModel::Isotropic::Sigma(2,1.0);
static SharedDiagonal sigma0_1 = noiseModel::Isotropic::Sigma(2,0.1);
static SharedDiagonal sigma0_2 = noiseModel::Isotropic::Sigma(2,0.2);
static SharedDiagonal constraintModel = noiseModel::Constrained::All(2);
static SharedDiagonal kSigma1_0 = noiseModel::Isotropic::Sigma(2,1.0);
static SharedDiagonal kSigma0_1 = noiseModel::Isotropic::Sigma(2,0.1);
static SharedDiagonal kSigma0_2 = noiseModel::Isotropic::Sigma(2,0.2);
static SharedDiagonal kConstrainedModel = noiseModel::Constrained::All(2);
static const Key _l1_=0, _x1_=1, _x2_=2;
static const Key _x_=0, _y_=1, _z_=2;
@ -170,40 +170,43 @@ static const Key _x_=0, _y_=1, _z_=2;
/* ************************************************************************* */
inline boost::shared_ptr<const NonlinearFactorGraph> sharedNonlinearFactorGraph() {
inline boost::shared_ptr<const NonlinearFactorGraph>
sharedNonlinearFactorGraph(const SharedNoiseModel &noiseModel1 = impl::kSigma0_1,
const SharedNoiseModel &noiseModel2 = impl::kSigma0_2) {
using namespace impl;
using symbol_shorthand::X;
using symbol_shorthand::L;
using symbol_shorthand::X;
// Create
boost::shared_ptr<NonlinearFactorGraph> nlfg(
new NonlinearFactorGraph);
boost::shared_ptr<NonlinearFactorGraph> nlfg(new NonlinearFactorGraph);
// prior on x1
Point2 mu(0,0);
shared_nlf f1(new simulated2D::Prior(mu, sigma0_1, X(1)));
Point2 mu(0, 0);
shared_nlf f1(new simulated2D::Prior(mu, noiseModel1, X(1)));
nlfg->push_back(f1);
// odometry between x1 and x2
Point2 z2(1.5, 0);
shared_nlf f2(new simulated2D::Odometry(z2, sigma0_1, X(1), X(2)));
shared_nlf f2(new simulated2D::Odometry(z2, noiseModel1, X(1), X(2)));
nlfg->push_back(f2);
// measurement between x1 and l1
Point2 z3(0, -1);
shared_nlf f3(new simulated2D::Measurement(z3, sigma0_2, X(1), L(1)));
shared_nlf f3(new simulated2D::Measurement(z3, noiseModel2, X(1), L(1)));
nlfg->push_back(f3);
// measurement between x2 and l1
Point2 z4(-1.5, -1.);
shared_nlf f4(new simulated2D::Measurement(z4, sigma0_2, X(2), L(1)));
shared_nlf f4(new simulated2D::Measurement(z4, noiseModel2, X(2), L(1)));
nlfg->push_back(f4);
return nlfg;
}
/* ************************************************************************* */
inline NonlinearFactorGraph createNonlinearFactorGraph() {
return *sharedNonlinearFactorGraph();
inline NonlinearFactorGraph
createNonlinearFactorGraph(const SharedNoiseModel &noiseModel1 = impl::kSigma0_1,
const SharedNoiseModel &noiseModel2 = impl::kSigma0_2) {
return *sharedNonlinearFactorGraph(noiseModel1, noiseModel2);
}
/* ************************************************************************* */
@ -372,19 +375,19 @@ inline std::pair<NonlinearFactorGraph, Values> createNonlinearSmoother(int T) {
// prior on x1
Point2 x1(1.0, 0.0);
shared_nlf prior(new simulated2D::Prior(x1, sigma1_0, X(1)));
shared_nlf prior(new simulated2D::Prior(x1, kSigma1_0, X(1)));
nlfg.push_back(prior);
poses.insert(X(1), x1);
for (int t = 2; t <= T; t++) {
// odometry between x_t and x_{t-1}
Point2 odo(1.0, 0.0);
shared_nlf odometry(new simulated2D::Odometry(odo, sigma1_0, X(t - 1), X(t)));
shared_nlf odometry(new simulated2D::Odometry(odo, kSigma1_0, X(t - 1), X(t)));
nlfg.push_back(odometry);
// measurement on x_t is like perfect GPS
Point2 xt(t, 0);
shared_nlf measurement(new simulated2D::Prior(xt, sigma1_0, X(t)));
shared_nlf measurement(new simulated2D::Prior(xt, kSigma1_0, X(t)));
nlfg.push_back(measurement);
// initial estimate
@ -412,7 +415,7 @@ inline GaussianFactorGraph createSimpleConstraintGraph() {
Vector b1(2);
b1(0) = 1.0;
b1(1) = -1.0;
JacobianFactor::shared_ptr f1(new JacobianFactor(_x_, Ax, b1, sigma0_1));
JacobianFactor::shared_ptr f1(new JacobianFactor(_x_, Ax, b1, kSigma0_1));
// create binary constraint factor
// between _x_ and _y_, that is going to be the only factor on _y_
@ -422,7 +425,7 @@ inline GaussianFactorGraph createSimpleConstraintGraph() {
Matrix Ay1 = I_2x2 * -1;
Vector b2 = Vector2(0.0, 0.0);
JacobianFactor::shared_ptr f2(new JacobianFactor(_x_, Ax1, _y_, Ay1, b2,
constraintModel));
kConstrainedModel));
// construct the graph
GaussianFactorGraph fg;
@ -453,8 +456,8 @@ inline GaussianFactorGraph createSingleConstraintGraph() {
Vector b1(2);
b1(0) = 1.0;
b1(1) = -1.0;
//GaussianFactor::shared_ptr f1(new JacobianFactor(_x_, sigma0_1->Whiten(Ax), sigma0_1->whiten(b1), sigma0_1));
JacobianFactor::shared_ptr f1(new JacobianFactor(_x_, Ax, b1, sigma0_1));
//GaussianFactor::shared_ptr f1(new JacobianFactor(_x_, kSigma0_1->Whiten(Ax), kSigma0_1->whiten(b1), kSigma0_1));
JacobianFactor::shared_ptr f1(new JacobianFactor(_x_, Ax, b1, kSigma0_1));
// create binary constraint factor
// between _x_ and _y_, that is going to be the only factor on _y_
@ -468,7 +471,7 @@ inline GaussianFactorGraph createSingleConstraintGraph() {
Matrix Ay1 = I_2x2 * 10;
Vector b2 = Vector2(1.0, 2.0);
JacobianFactor::shared_ptr f2(new JacobianFactor(_x_, Ax1, _y_, Ay1, b2,
constraintModel));
kConstrainedModel));
// construct the graph
GaussianFactorGraph fg;
@ -493,7 +496,7 @@ inline GaussianFactorGraph createMultiConstraintGraph() {
// unary factor 1
Matrix A = I_2x2;
Vector b = Vector2(-2.0, 2.0);
JacobianFactor::shared_ptr lf1(new JacobianFactor(_x_, A, b, sigma0_1));
JacobianFactor::shared_ptr lf1(new JacobianFactor(_x_, A, b, kSigma0_1));
// constraint 1
Matrix A11(2, 2);
@ -512,7 +515,7 @@ inline GaussianFactorGraph createMultiConstraintGraph() {
b1(0) = 1.0;
b1(1) = 2.0;
JacobianFactor::shared_ptr lc1(new JacobianFactor(_x_, A11, _y_, A12, b1,
constraintModel));
kConstrainedModel));
// constraint 2
Matrix A21(2, 2);
@ -531,7 +534,7 @@ inline GaussianFactorGraph createMultiConstraintGraph() {
b2(0) = 3.0;
b2(1) = 4.0;
JacobianFactor::shared_ptr lc2(new JacobianFactor(_x_, A21, _z_, A22, b2,
constraintModel));
kConstrainedModel));
// construct the graph
GaussianFactorGraph fg;