Move mEstimator into separate file

release/4.3a0
Francesco Papi 2019-11-12 10:27:34 -08:00
parent 4ac78953ae
commit 73542d980a
4 changed files with 804 additions and 741 deletions

View File

@ -0,0 +1,423 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file NoiseModel.cpp
* @date Jan 13, 2010
* @author Richard Roberts
* @author Frank Dellaert
*/
#include <gtsam/linear/LossFunctions.h>
using namespace std;
namespace gtsam {
namespace noiseModel {
/* ************************************************************************* */
// M-Estimator
/* ************************************************************************* */
namespace mEstimator {
Vector Base::weight(const Vector& error) const {
const size_t n = error.rows();
Vector w(n);
for (size_t i = 0; i < n; ++i)
w(i) = weight(error(i));
return w;
}
// The following three functions re-weight block matrices and a vector
// according to their weight implementation
void Base::reweight(Vector& error) const {
if (reweight_ == Block) {
const double w = sqrtWeight(error.norm());
error *= w;
} else {
error.array() *= weight(error).cwiseSqrt().array();
}
}
// Reweight n block matrices with one error vector
void Base::reweight(vector<Matrix> &A, Vector &error) const {
if ( reweight_ == Block ) {
const double w = sqrtWeight(error.norm());
for(Matrix& Aj: A) {
Aj *= w;
}
error *= w;
}
else {
const Vector W = sqrtWeight(error);
for(Matrix& Aj: A) {
vector_scale_inplace(W,Aj);
}
error = W.cwiseProduct(error);
}
}
// Reweight one block matrix with one error vector
void Base::reweight(Matrix &A, Vector &error) const {
if ( reweight_ == Block ) {
const double w = sqrtWeight(error.norm());
A *= w;
error *= w;
}
else {
const Vector W = sqrtWeight(error);
vector_scale_inplace(W,A);
error = W.cwiseProduct(error);
}
}
// Reweight two block matrix with one error vector
void Base::reweight(Matrix &A1, Matrix &A2, Vector &error) const {
if ( reweight_ == Block ) {
const double w = sqrtWeight(error.norm());
A1 *= w;
A2 *= w;
error *= w;
}
else {
const Vector W = sqrtWeight(error);
vector_scale_inplace(W,A1);
vector_scale_inplace(W,A2);
error = W.cwiseProduct(error);
}
}
// Reweight three block matrix with one error vector
void Base::reweight(Matrix &A1, Matrix &A2, Matrix &A3, Vector &error) const {
if ( reweight_ == Block ) {
const double w = sqrtWeight(error.norm());
A1 *= w;
A2 *= w;
A3 *= w;
error *= w;
}
else {
const Vector W = sqrtWeight(error);
vector_scale_inplace(W,A1);
vector_scale_inplace(W,A2);
vector_scale_inplace(W,A3);
error = W.cwiseProduct(error);
}
}
/* ************************************************************************* */
// Null model
/* ************************************************************************* */
void Null::print(const std::string &s="") const
{ cout << s << "null ()" << endl; }
Null::shared_ptr Null::Create()
{ return shared_ptr(new Null()); }
/* ************************************************************************* */
// Fair
/* ************************************************************************* */
Fair::Fair(double c, const ReweightScheme reweight) : Base(reweight), c_(c) {
if (c_ <= 0) {
throw runtime_error("mEstimator Fair takes only positive double in constructor.");
}
}
double Fair::weight(double error) const {
return 1.0 / (1.0 + std::abs(error) / c_);
}
double Fair::residual(double error) const {
const double absError = std::abs(error);
const double normalizedError = absError / c_;
const double c_2 = c_ * c_;
return c_2 * (normalizedError - std::log(1 + normalizedError));
}
void Fair::print(const std::string &s="") const
{ cout << s << "fair (" << c_ << ")" << endl; }
bool Fair::equals(const Base &expected, double tol) const {
const Fair* p = dynamic_cast<const Fair*> (&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_ ) < tol;
}
Fair::shared_ptr Fair::Create(double c, const ReweightScheme reweight)
{ return shared_ptr(new Fair(c, reweight)); }
/* ************************************************************************* */
// Huber
/* ************************************************************************* */
Huber::Huber(double k, const ReweightScheme reweight) : Base(reweight), k_(k) {
if (k_ <= 0) {
throw runtime_error("mEstimator Huber takes only positive double in constructor.");
}
}
double Huber::weight(double error) const {
const double absError = std::abs(error);
return (absError <= k_) ? (1.0) : (k_ / absError);
}
double Huber::residual(double error) const {
const double absError = std::abs(error);
if (absError <= k_) { // |x| <= k
return error*error / 2;
} else { // |x| > k
return k_ * (absError - (k_/2));
}
}
void Huber::print(const std::string &s="") const {
cout << s << "huber (" << k_ << ")" << endl;
}
bool Huber::equals(const Base &expected, double tol) const {
const Huber* p = dynamic_cast<const Huber*>(&expected);
if (p == NULL) return false;
return std::abs(k_ - p->k_) < tol;
}
Huber::shared_ptr Huber::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Huber(c, reweight));
}
/* ************************************************************************* */
// Cauchy
/* ************************************************************************* */
Cauchy::Cauchy(double k, const ReweightScheme reweight) : Base(reweight), k_(k), ksquared_(k * k) {
if (k <= 0) {
throw runtime_error("mEstimator Cauchy takes only positive double in constructor.");
}
}
double Cauchy::weight(double error) const {
return ksquared_ / (ksquared_ + error*error);
}
double Cauchy::residual(double error) const {
const double xc2 = error / k_;
const double val = std::log(1 + (xc2*xc2));
return ksquared_ * val * 0.5;
}
void Cauchy::print(const std::string &s="") const {
cout << s << "cauchy (" << k_ << ")" << endl;
}
bool Cauchy::equals(const Base &expected, double tol) const {
const Cauchy* p = dynamic_cast<const Cauchy*>(&expected);
if (p == NULL) return false;
return std::abs(ksquared_ - p->ksquared_) < tol;
}
Cauchy::shared_ptr Cauchy::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Cauchy(c, reweight));
}
/* ************************************************************************* */
// Tukey
/* ************************************************************************* */
Tukey::Tukey(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {
if (c <= 0) {
throw runtime_error("mEstimator Tukey takes only positive double in constructor.");
}
}
double Tukey::weight(double error) const {
if (std::abs(error) <= c_) {
const double xc2 = error*error/csquared_;
return (1.0-xc2)*(1.0-xc2);
}
return 0.0;
}
double Tukey::residual(double error) const {
double absError = std::abs(error);
if (absError <= c_) {
const double xc2 = error*error/csquared_;
const double t = (1 - xc2)*(1 - xc2)*(1 - xc2);
return csquared_ * (1 - t) / 6.0;
} else {
return csquared_ / 6.0;
}
}
void Tukey::print(const std::string &s="") const {
std::cout << s << ": Tukey (" << c_ << ")" << std::endl;
}
bool Tukey::equals(const Base &expected, double tol) const {
const Tukey* p = dynamic_cast<const Tukey*>(&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_) < tol;
}
Tukey::shared_ptr Tukey::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Tukey(c, reweight));
}
/* ************************************************************************* */
// Welsch
/* ************************************************************************* */
Welsch::Welsch(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {}
double Welsch::weight(double error) const {
const double xc2 = (error*error)/csquared_;
return std::exp(-xc2);
}
double Welsch::residual(double error) const {
const double xc2 = (error*error)/csquared_;
return csquared_ * 0.5 * (1 - std::exp(-xc2) );
}
void Welsch::print(const std::string &s="") const {
std::cout << s << ": Welsch (" << c_ << ")" << std::endl;
}
bool Welsch::equals(const Base &expected, double tol) const {
const Welsch* p = dynamic_cast<const Welsch*>(&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_) < tol;
}
Welsch::shared_ptr Welsch::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Welsch(c, reweight));
}
/* ************************************************************************* */
// GemanMcClure
/* ************************************************************************* */
GemanMcClure::GemanMcClure(double c, const ReweightScheme reweight)
: Base(reweight), c_(c) {
}
double GemanMcClure::weight(double error) const {
const double c2 = c_*c_;
const double c4 = c2*c2;
const double c2error = c2 + error*error;
return c4/(c2error*c2error);
}
double GemanMcClure::residual(double error) const {
const double c2 = c_*c_;
const double error2 = error*error;
return 0.5 * (c2 * error2) / (c2 + error2);
}
void GemanMcClure::print(const std::string &s="") const {
std::cout << s << ": Geman-McClure (" << c_ << ")" << std::endl;
}
bool GemanMcClure::equals(const Base &expected, double tol) const {
const GemanMcClure* p = dynamic_cast<const GemanMcClure*>(&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_) < tol;
}
GemanMcClure::shared_ptr GemanMcClure::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new GemanMcClure(c, reweight));
}
/* ************************************************************************* */
// DCS
/* ************************************************************************* */
DCS::DCS(double c, const ReweightScheme reweight)
: Base(reweight), c_(c) {
}
double DCS::weight(double error) const {
const double e2 = error*error;
if (e2 > c_)
{
const double w = 2.0*c_/(c_ + e2);
return w*w;
}
return 1.0;
}
double DCS::residual(double error) const {
// This is the simplified version of Eq 9 from (Agarwal13icra)
// after you simplify and cancel terms.
const double e2 = error*error;
const double e4 = e2*e2;
const double c2 = c_*c_;
return (c2*e2 + c_*e4) / ((e2 + c_)*(e2 + c_));
}
void DCS::print(const std::string &s="") const {
std::cout << s << ": DCS (" << c_ << ")" << std::endl;
}
bool DCS::equals(const Base &expected, double tol) const {
const DCS* p = dynamic_cast<const DCS*>(&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_) < tol;
}
DCS::shared_ptr DCS::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new DCS(c, reweight));
}
/* ************************************************************************* */
// L2WithDeadZone
/* ************************************************************************* */
L2WithDeadZone::L2WithDeadZone(double k, const ReweightScheme reweight)
: Base(reweight), k_(k) {
if (k_ <= 0) {
throw runtime_error("mEstimator L2WithDeadZone takes only positive double in constructor.");
}
}
double L2WithDeadZone::weight(double error) const {
// note that this code is slightly uglier than residual, because there are three distinct
// cases to handle (left of deadzone, deadzone, right of deadzone) instead of the two
// cases (deadzone, non-deadzone) in residual.
if (std::abs(error) <= k_) return 0.0;
else if (error > k_) return (-k_+error)/error;
else return (k_+error)/error;
}
double L2WithDeadZone::residual(double error) const {
const double abs_error = std::abs(error);
return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error);
}
void L2WithDeadZone::print(const std::string &s="") const {
std::cout << s << ": L2WithDeadZone (" << k_ << ")" << std::endl;
}
bool L2WithDeadZone::equals(const Base &expected, double tol) const {
const L2WithDeadZone* p = dynamic_cast<const L2WithDeadZone*>(&expected);
if (p == NULL) return false;
return std::abs(k_ - p->k_) < tol;
}
L2WithDeadZone::shared_ptr L2WithDeadZone::Create(double k, const ReweightScheme reweight) {
return shared_ptr(new L2WithDeadZone(k, reweight));
}
} // namespace mEstimator
} // namespace noiseModel
} // gtsam

View File

@ -0,0 +1,379 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file NoiseModel.h
* @date Jan 13, 2010
* @author Richard Roberts
* @author Frank Dellaert
*/
#pragma once
#include <gtsam/base/Matrix.h>
#include <gtsam/base/Testable.h>
#include <gtsam/dllexport.h>
#include <boost/serialization/extended_type_info.hpp>
#include <boost/serialization/nvp.hpp>
#include <boost/serialization/optional.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/singleton.hpp>
namespace gtsam {
namespace noiseModel {
// clang-format off
/**
* The mEstimator name space contains all robust error functions.
* It mirrors the exposition at
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* which talks about minimizing \sum \rho(r_i), where \rho is a residual function of choice.
*
* To illustrate, let's consider the least-squares (L2), L1, and Huber estimators as examples:
*
* Name Symbol Least-Squares L1-norm Huber
* Residual \rho(x) 0.5*x^2 |x| 0.5*x^2 if |x|<k, 0.5*k^2 + k|x-k| otherwise
* Derivative \phi(x) x sgn(x) x if |x|<k, k sgn(x) otherwise
* Weight w(x)=\phi(x)/x 1 1/|x| 1 if |x|<k, k/|x| otherwise
*
* With these definitions, D(\rho(x), p) = \phi(x) D(x,p) = w(x) x D(x,p) = w(x) D(L2(x), p),
* and hence we can solve the equivalent weighted least squares problem \sum w(r_i) \rho(r_i)
*
* Each M-estimator in the mEstimator name space simply implements the above functions.
*/
// clang-format on
namespace mEstimator {
//---------------------------------------------------------------------------------------
class GTSAM_EXPORT Base {
public:
enum ReweightScheme { Scalar, Block };
typedef boost::shared_ptr<Base> shared_ptr;
protected:
/** the rows can be weighted independently according to the error
* or uniformly with the norm of the right hand side */
ReweightScheme reweight_;
public:
Base(const ReweightScheme reweight = Block) : reweight_(reweight) {}
virtual ~Base() {}
/*
* This method is responsible for returning the total penalty for a given
* amount of error. For example, this method is responsible for implementing
* the quadratic function for an L2 penalty, the absolute value function for
* an L1 penalty, etc.
*
* TODO(mikebosse): When the residual function has as input the norm of the
* residual vector, then it prevents implementations of asymmeric loss
* functions. It would be better for this function to accept the vector and
* internally call the norm if necessary.
*/
virtual double residual(double error) const { return 0; };
/*
* This method is responsible for returning the weight function for a given
* amount of error. The weight function is related to the analytic derivative
* of the residual function. See
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* for details. This method is required when optimizing cost functions with
* robust penalties using iteratively re-weighted least squares.
*/
virtual double weight(double error) const = 0;
virtual void print(const std::string &s) const = 0;
virtual bool equals(const Base &expected, double tol = 1e-8) const = 0;
double sqrtWeight(double error) const { return std::sqrt(weight(error)); }
/** produce a weight vector according to an error vector and the implemented
* robust function */
Vector weight(const Vector &error) const;
/** square root version of the weight function */
Vector sqrtWeight(const Vector &error) const {
return weight(error).cwiseSqrt();
}
/** reweight block matrices and a vector according to their weight
* implementation */
void reweight(Vector &error) const;
void reweight(std::vector<Matrix> &A, Vector &error) const;
void reweight(Matrix &A, Vector &error) const;
void reweight(Matrix &A1, Matrix &A2, Vector &error) const;
void reweight(Matrix &A1, Matrix &A2, Matrix &A3, Vector &error) const;
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_NVP(reweight_);
}
};
/// Null class is not robust so is a Gaussian ?
class GTSAM_EXPORT Null : public Base {
public:
typedef boost::shared_ptr<Null> shared_ptr;
Null(const ReweightScheme reweight = Block) : Base(reweight) {}
~Null() {}
double weight(double /*error*/) const { return 1.0; }
double residual(double error) const { return error; }
void print(const std::string &s) const;
bool equals(const Base & /*expected*/, double /*tol*/) const { return true; }
static shared_ptr Create();
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
}
};
/// Fair implements the "Fair" robust error model (Zhang97ivc)
class GTSAM_EXPORT Fair : public Base {
protected:
double c_;
public:
typedef boost::shared_ptr<Fair> shared_ptr;
Fair(double c = 1.3998, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double c, const ReweightScheme reweight = Block);
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(c_);
}
};
/// Huber implements the "Huber" robust error model (Zhang97ivc)
class GTSAM_EXPORT Huber : public Base {
protected:
double k_;
public:
typedef boost::shared_ptr<Huber> shared_ptr;
Huber(double k = 1.345, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(k_);
}
};
/// Cauchy implements the "Cauchy" robust error model (Lee2013IROS). Contributed
/// by:
/// Dipl.-Inform. Jan Oberlaender (M.Sc.), FZI Research Center for
/// Information Technology, Karlsruhe, Germany.
/// oberlaender@fzi.de
/// Thanks Jan!
class GTSAM_EXPORT Cauchy : public Base {
protected:
double k_, ksquared_;
public:
typedef boost::shared_ptr<Cauchy> shared_ptr;
Cauchy(double k = 0.1, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(k_);
}
};
/// Tukey implements the "Tukey" robust error model (Zhang97ivc)
class GTSAM_EXPORT Tukey : public Base {
protected:
double c_, csquared_;
public:
typedef boost::shared_ptr<Tukey> shared_ptr;
Tukey(double c = 4.6851, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(c_);
}
};
/// Welsch implements the "Welsch" robust error model (Zhang97ivc)
class GTSAM_EXPORT Welsch : public Base {
protected:
double c_, csquared_;
public:
typedef boost::shared_ptr<Welsch> shared_ptr;
Welsch(double c = 2.9846, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(c_);
}
};
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// @name Deprecated
/// @{
// Welsh implements the "Welsch" robust error model (Zhang97ivc)
// This was misspelled in previous versions of gtsam and should be
// removed in the future.
using Welsh = Welsch;
#endif
/// GemanMcClure implements the "Geman-McClure" robust error model
/// (Zhang97ivc).
///
/// Note that Geman-McClure weight function uses the parameter c == 1.0,
/// but here it's allowed to use different values, so we actually have
/// the generalized Geman-McClure from (Agarwal15phd).
class GTSAM_EXPORT GemanMcClure : public Base {
public:
typedef boost::shared_ptr<GemanMcClure> shared_ptr;
GemanMcClure(double c = 1.0, const ReweightScheme reweight = Block);
~GemanMcClure() {}
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
protected:
double c_;
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(c_);
}
};
/// DCS implements the Dynamic Covariance Scaling robust error model
/// from the paper Robust Map Optimization (Agarwal13icra).
///
/// Under the special condition of the parameter c == 1.0 and not
/// forcing the output weight s <= 1.0, DCS is similar to Geman-McClure.
class GTSAM_EXPORT DCS : public Base {
public:
typedef boost::shared_ptr<DCS> shared_ptr;
DCS(double c = 1.0, const ReweightScheme reweight = Block);
~DCS() {}
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
protected:
double c_;
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(c_);
}
};
/// L2WithDeadZone implements a standard L2 penalty, but with a dead zone of
/// width 2*k, centered at the origin. The resulting penalty within the dead
/// zone is always zero, and grows quadratically outside the dead zone. In this
/// sense, the L2WithDeadZone penalty is "robust to inliers", rather than being
/// robust to outliers. This penalty can be used to create barrier functions in
/// a general way.
class GTSAM_EXPORT L2WithDeadZone : public Base {
protected:
double k_;
public:
typedef boost::shared_ptr<L2WithDeadZone> shared_ptr;
L2WithDeadZone(double k = 1.0, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(k_);
}
};
} // namespace mEstimator
} // namespace noiseModel
} // namespace gtsam

View File

@ -616,403 +616,6 @@ void Unit::print(const std::string& name) const {
cout << name << "unit (" << dim_ << ") " << endl;
}
/* ************************************************************************* */
// M-Estimator
/* ************************************************************************* */
namespace mEstimator {
Vector Base::weight(const Vector& error) const {
const size_t n = error.rows();
Vector w(n);
for (size_t i = 0; i < n; ++i)
w(i) = weight(error(i));
return w;
}
// The following three functions re-weight block matrices and a vector
// according to their weight implementation
void Base::reweight(Vector& error) const {
if (reweight_ == Block) {
const double w = sqrtWeight(error.norm());
error *= w;
} else {
error.array() *= weight(error).cwiseSqrt().array();
}
}
// Reweight n block matrices with one error vector
void Base::reweight(vector<Matrix> &A, Vector &error) const {
if ( reweight_ == Block ) {
const double w = sqrtWeight(error.norm());
for(Matrix& Aj: A) {
Aj *= w;
}
error *= w;
}
else {
const Vector W = sqrtWeight(error);
for(Matrix& Aj: A) {
vector_scale_inplace(W,Aj);
}
error = W.cwiseProduct(error);
}
}
// Reweight one block matrix with one error vector
void Base::reweight(Matrix &A, Vector &error) const {
if ( reweight_ == Block ) {
const double w = sqrtWeight(error.norm());
A *= w;
error *= w;
}
else {
const Vector W = sqrtWeight(error);
vector_scale_inplace(W,A);
error = W.cwiseProduct(error);
}
}
// Reweight two block matrix with one error vector
void Base::reweight(Matrix &A1, Matrix &A2, Vector &error) const {
if ( reweight_ == Block ) {
const double w = sqrtWeight(error.norm());
A1 *= w;
A2 *= w;
error *= w;
}
else {
const Vector W = sqrtWeight(error);
vector_scale_inplace(W,A1);
vector_scale_inplace(W,A2);
error = W.cwiseProduct(error);
}
}
// Reweight three block matrix with one error vector
void Base::reweight(Matrix &A1, Matrix &A2, Matrix &A3, Vector &error) const {
if ( reweight_ == Block ) {
const double w = sqrtWeight(error.norm());
A1 *= w;
A2 *= w;
A3 *= w;
error *= w;
}
else {
const Vector W = sqrtWeight(error);
vector_scale_inplace(W,A1);
vector_scale_inplace(W,A2);
vector_scale_inplace(W,A3);
error = W.cwiseProduct(error);
}
}
/* ************************************************************************* */
// Null model
/* ************************************************************************* */
void Null::print(const std::string &s="") const
{ cout << s << "null ()" << endl; }
Null::shared_ptr Null::Create()
{ return shared_ptr(new Null()); }
/* ************************************************************************* */
// Fair
/* ************************************************************************* */
Fair::Fair(double c, const ReweightScheme reweight) : Base(reweight), c_(c) {
if (c_ <= 0) {
throw runtime_error("mEstimator Fair takes only positive double in constructor.");
}
}
double Fair::weight(double error) const {
return 1.0 / (1.0 + std::abs(error) / c_);
}
double Fair::residual(double error) const {
const double absError = std::abs(error);
const double normalizedError = absError / c_;
const double c_2 = c_ * c_;
return c_2 * (normalizedError - std::log(1 + normalizedError));
}
void Fair::print(const std::string &s="") const
{ cout << s << "fair (" << c_ << ")" << endl; }
bool Fair::equals(const Base &expected, double tol) const {
const Fair* p = dynamic_cast<const Fair*> (&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_ ) < tol;
}
Fair::shared_ptr Fair::Create(double c, const ReweightScheme reweight)
{ return shared_ptr(new Fair(c, reweight)); }
/* ************************************************************************* */
// Huber
/* ************************************************************************* */
Huber::Huber(double k, const ReweightScheme reweight) : Base(reweight), k_(k) {
if (k_ <= 0) {
throw runtime_error("mEstimator Huber takes only positive double in constructor.");
}
}
double Huber::weight(double error) const {
const double absError = std::abs(error);
return (absError <= k_) ? (1.0) : (k_ / absError);
}
double Huber::residual(double error) const {
const double absError = std::abs(error);
if (absError <= k_) { // |x| <= k
return error*error / 2;
} else { // |x| > k
return k_ * (absError - (k_/2));
}
}
void Huber::print(const std::string &s="") const {
cout << s << "huber (" << k_ << ")" << endl;
}
bool Huber::equals(const Base &expected, double tol) const {
const Huber* p = dynamic_cast<const Huber*>(&expected);
if (p == NULL) return false;
return std::abs(k_ - p->k_) < tol;
}
Huber::shared_ptr Huber::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Huber(c, reweight));
}
/* ************************************************************************* */
// Cauchy
/* ************************************************************************* */
Cauchy::Cauchy(double k, const ReweightScheme reweight) : Base(reweight), k_(k), ksquared_(k * k) {
if (k <= 0) {
throw runtime_error("mEstimator Cauchy takes only positive double in constructor.");
}
}
double Cauchy::weight(double error) const {
return ksquared_ / (ksquared_ + error*error);
}
double Cauchy::residual(double error) const {
const double xc2 = error / k_;
const double val = std::log(1 + (xc2*xc2));
return ksquared_ * val * 0.5;
}
void Cauchy::print(const std::string &s="") const {
cout << s << "cauchy (" << k_ << ")" << endl;
}
bool Cauchy::equals(const Base &expected, double tol) const {
const Cauchy* p = dynamic_cast<const Cauchy*>(&expected);
if (p == NULL) return false;
return std::abs(ksquared_ - p->ksquared_) < tol;
}
Cauchy::shared_ptr Cauchy::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Cauchy(c, reweight));
}
/* ************************************************************************* */
// Tukey
/* ************************************************************************* */
Tukey::Tukey(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {
if (c <= 0) {
throw runtime_error("mEstimator Tukey takes only positive double in constructor.");
}
}
double Tukey::weight(double error) const {
if (std::abs(error) <= c_) {
const double xc2 = error*error/csquared_;
return (1.0-xc2)*(1.0-xc2);
}
return 0.0;
}
double Tukey::residual(double error) const {
double absError = std::abs(error);
if (absError <= c_) {
const double xc2 = error*error/csquared_;
const double t = (1 - xc2)*(1 - xc2)*(1 - xc2);
return csquared_ * (1 - t) / 6.0;
} else {
return csquared_ / 6.0;
}
}
void Tukey::print(const std::string &s="") const {
std::cout << s << ": Tukey (" << c_ << ")" << std::endl;
}
bool Tukey::equals(const Base &expected, double tol) const {
const Tukey* p = dynamic_cast<const Tukey*>(&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_) < tol;
}
Tukey::shared_ptr Tukey::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Tukey(c, reweight));
}
/* ************************************************************************* */
// Welsch
/* ************************************************************************* */
Welsch::Welsch(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {}
double Welsch::weight(double error) const {
const double xc2 = (error*error)/csquared_;
return std::exp(-xc2);
}
double Welsch::residual(double error) const {
const double xc2 = (error*error)/csquared_;
return csquared_ * 0.5 * (1 - std::exp(-xc2) );
}
void Welsch::print(const std::string &s="") const {
std::cout << s << ": Welsch (" << c_ << ")" << std::endl;
}
bool Welsch::equals(const Base &expected, double tol) const {
const Welsch* p = dynamic_cast<const Welsch*>(&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_) < tol;
}
Welsch::shared_ptr Welsch::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Welsch(c, reweight));
}
/* ************************************************************************* */
// GemanMcClure
/* ************************************************************************* */
GemanMcClure::GemanMcClure(double c, const ReweightScheme reweight)
: Base(reweight), c_(c) {
}
double GemanMcClure::weight(double error) const {
const double c2 = c_*c_;
const double c4 = c2*c2;
const double c2error = c2 + error*error;
return c4/(c2error*c2error);
}
double GemanMcClure::residual(double error) const {
const double c2 = c_*c_;
const double error2 = error*error;
return 0.5 * (c2 * error2) / (c2 + error2);
}
void GemanMcClure::print(const std::string &s="") const {
std::cout << s << ": Geman-McClure (" << c_ << ")" << std::endl;
}
bool GemanMcClure::equals(const Base &expected, double tol) const {
const GemanMcClure* p = dynamic_cast<const GemanMcClure*>(&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_) < tol;
}
GemanMcClure::shared_ptr GemanMcClure::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new GemanMcClure(c, reweight));
}
/* ************************************************************************* */
// DCS
/* ************************************************************************* */
DCS::DCS(double c, const ReweightScheme reweight)
: Base(reweight), c_(c) {
}
double DCS::weight(double error) const {
const double e2 = error*error;
if (e2 > c_)
{
const double w = 2.0*c_/(c_ + e2);
return w*w;
}
return 1.0;
}
double DCS::residual(double error) const {
// This is the simplified version of Eq 9 from (Agarwal13icra)
// after you simplify and cancel terms.
const double e2 = error*error;
const double e4 = e2*e2;
const double c2 = c_*c_;
return (c2*e2 + c_*e4) / ((e2 + c_)*(e2 + c_));
}
void DCS::print(const std::string &s="") const {
std::cout << s << ": DCS (" << c_ << ")" << std::endl;
}
bool DCS::equals(const Base &expected, double tol) const {
const DCS* p = dynamic_cast<const DCS*>(&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_) < tol;
}
DCS::shared_ptr DCS::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new DCS(c, reweight));
}
/* ************************************************************************* */
// L2WithDeadZone
/* ************************************************************************* */
L2WithDeadZone::L2WithDeadZone(double k, const ReweightScheme reweight)
: Base(reweight), k_(k) {
if (k_ <= 0) {
throw runtime_error("mEstimator L2WithDeadZone takes only positive double in constructor.");
}
}
double L2WithDeadZone::weight(double error) const {
// note that this code is slightly uglier than residual, because there are three distinct
// cases to handle (left of deadzone, deadzone, right of deadzone) instead of the two
// cases (deadzone, non-deadzone) in residual.
if (std::abs(error) <= k_) return 0.0;
else if (error > k_) return (-k_+error)/error;
else return (k_+error)/error;
}
double L2WithDeadZone::residual(double error) const {
const double abs_error = std::abs(error);
return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error);
}
void L2WithDeadZone::print(const std::string &s="") const {
std::cout << s << ": L2WithDeadZone (" << k_ << ")" << std::endl;
}
bool L2WithDeadZone::equals(const Base &expected, double tol) const {
const L2WithDeadZone* p = dynamic_cast<const L2WithDeadZone*>(&expected);
if (p == NULL) return false;
return std::abs(k_ - p->k_) < tol;
}
L2WithDeadZone::shared_ptr L2WithDeadZone::Create(double k, const ReweightScheme reweight) {
return shared_ptr(new L2WithDeadZone(k, reweight));
}
} // namespace mEstimator
/* ************************************************************************* */
// Robust
/* ************************************************************************* */

View File

@ -21,6 +21,7 @@
#include <gtsam/base/Testable.h>
#include <gtsam/base/Matrix.h>
#include <gtsam/dllexport.h>
#include <gtsam/linear/LossFunctions.h>
#include <boost/serialization/nvp.hpp>
#include <boost/serialization/extended_type_info.hpp>
@ -39,6 +40,7 @@ namespace gtsam {
class Constrained;
class Isotropic;
class Unit;
class RobustModel;
//---------------------------------------------------------------------------------------
@ -626,350 +628,6 @@ namespace gtsam {
}
};
/**
* The mEstimator name space contains all robust error functions.
* It mirrors the exposition at
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* which talks about minimizing \sum \rho(r_i), where \rho is a residual function of choice.
*
* To illustrate, let's consider the least-squares (L2), L1, and Huber estimators as examples:
*
* Name Symbol Least-Squares L1-norm Huber
* Residual \rho(x) 0.5*x^2 |x| 0.5*x^2 if |x|<k, 0.5*k^2 + k|x-k| otherwise
* Derivative \phi(x) x sgn(x) x if |x|<k, k sgn(x) otherwise
* Weight w(x)=\phi(x)/x 1 1/|x| 1 if |x|<k, k/|x| otherwise
*
* With these definitions, D(\rho(x), p) = \phi(x) D(x,p) = w(x) x D(x,p) = w(x) D(L2(x), p),
* and hence we can solve the equivalent weighted least squares problem \sum w(r_i) \rho(r_i)
*
* Each M-estimator in the mEstimator name space simply implements the above functions.
*/
namespace mEstimator {
//---------------------------------------------------------------------------------------
class GTSAM_EXPORT Base {
public:
enum ReweightScheme { Scalar, Block };
typedef boost::shared_ptr<Base> shared_ptr;
protected:
/** the rows can be weighted independently according to the error
* or uniformly with the norm of the right hand side */
ReweightScheme reweight_;
public:
Base(const ReweightScheme reweight = Block):reweight_(reweight) {}
virtual ~Base() {}
/*
* This method is responsible for returning the total penalty for a given amount of error.
* For example, this method is responsible for implementing the quadratic function for an
* L2 penalty, the absolute value function for an L1 penalty, etc.
*
* TODO(mike): There is currently a bug in GTSAM, where none of the mEstimator classes
* implement a residual function, and GTSAM calls the weight function to evaluate the
* total penalty, rather than calling the residual function. The weight function should be
* used during iteratively reweighted least squares optimization, but should not be used to
* evaluate the total penalty. The long-term solution is for all mEstimators to implement
* both a weight and a residual function, and for GTSAM to call the residual function when
* evaluating the total penalty. But for now, I'm leaving this residual method as pure
* virtual, so the existing mEstimators can inherit this default fallback behavior.
*/
virtual double residual(double error) const { return 0; };
/*
* This method is responsible for returning the weight function for a given amount of error.
* The weight function is related to the analytic derivative of the residual function. See
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* for details. This method is required when optimizing cost functions with robust penalties
* using iteratively re-weighted least squares.
*/
virtual double weight(double error) const = 0;
virtual void print(const std::string &s) const = 0;
virtual bool equals(const Base& expected, double tol=1e-8) const = 0;
double sqrtWeight(double error) const {
return std::sqrt(weight(error));
}
/** produce a weight vector according to an error vector and the implemented
* robust function */
Vector weight(const Vector &error) const;
/** square root version of the weight function */
Vector sqrtWeight(const Vector &error) const {
return weight(error).cwiseSqrt();
}
/** reweight block matrices and a vector according to their weight implementation */
void reweight(Vector &error) const;
void reweight(std::vector<Matrix> &A, Vector &error) const;
void reweight(Matrix &A, Vector &error) const;
void reweight(Matrix &A1, Matrix &A2, Vector &error) const;
void reweight(Matrix &A1, Matrix &A2, Matrix &A3, Vector &error) const;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_NVP(reweight_);
}
};
/// Null class is not robust so is a Gaussian ?
class GTSAM_EXPORT Null : public Base {
public:
typedef boost::shared_ptr<Null> shared_ptr;
Null(const ReweightScheme reweight = Block) : Base(reweight) {}
~Null() {}
double weight(double /*error*/) const { return 1.0; }
double residual(double error) const { return error; }
void print(const std::string &s) const;
bool equals(const Base& /*expected*/, double /*tol*/) const { return true; }
static shared_ptr Create() ;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
}
};
/// Fair implements the "Fair" robust error model (Zhang97ivc)
class GTSAM_EXPORT Fair : public Base {
protected:
double c_;
public:
typedef boost::shared_ptr<Fair> shared_ptr;
Fair(double c = 1.3998, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double c, const ReweightScheme reweight = Block) ;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(c_);
}
};
/// Huber implements the "Huber" robust error model (Zhang97ivc)
class GTSAM_EXPORT Huber : public Base {
protected:
double k_;
public:
typedef boost::shared_ptr<Huber> shared_ptr;
Huber(double k = 1.345, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(k_);
}
};
/// Cauchy implements the "Cauchy" robust error model (Lee2013IROS). Contributed by:
/// Dipl.-Inform. Jan Oberlaender (M.Sc.), FZI Research Center for
/// Information Technology, Karlsruhe, Germany.
/// oberlaender@fzi.de
/// Thanks Jan!
class GTSAM_EXPORT Cauchy : public Base {
protected:
double k_, ksquared_;
public:
typedef boost::shared_ptr<Cauchy> shared_ptr;
Cauchy(double k = 0.1, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(k_);
}
};
/// Tukey implements the "Tukey" robust error model (Zhang97ivc)
class GTSAM_EXPORT Tukey : public Base {
protected:
double c_, csquared_;
public:
typedef boost::shared_ptr<Tukey> shared_ptr;
Tukey(double c = 4.6851, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(c_);
}
};
/// Welsch implements the "Welsch" robust error model (Zhang97ivc)
class GTSAM_EXPORT Welsch : public Base {
protected:
double c_, csquared_;
public:
typedef boost::shared_ptr<Welsch> shared_ptr;
Welsch(double c = 2.9846, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(c_);
}
};
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// @name Deprecated
/// @{
// Welsh implements the "Welsch" robust error model (Zhang97ivc)
// This was misspelled in previous versions of gtsam and should be
// removed in the future.
using Welsh = Welsch;
#endif
/// GemanMcClure implements the "Geman-McClure" robust error model
/// (Zhang97ivc).
///
/// Note that Geman-McClure weight function uses the parameter c == 1.0,
/// but here it's allowed to use different values, so we actually have
/// the generalized Geman-McClure from (Agarwal15phd).
class GTSAM_EXPORT GemanMcClure : public Base {
public:
typedef boost::shared_ptr<GemanMcClure> shared_ptr;
GemanMcClure(double c = 1.0, const ReweightScheme reweight = Block);
~GemanMcClure() {}
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol=1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
protected:
double c_;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(c_);
}
};
/// DCS implements the Dynamic Covariance Scaling robust error model
/// from the paper Robust Map Optimization (Agarwal13icra).
///
/// Under the special condition of the parameter c == 1.0 and not
/// forcing the output weight s <= 1.0, DCS is similar to Geman-McClure.
class GTSAM_EXPORT DCS : public Base {
public:
typedef boost::shared_ptr<DCS> shared_ptr;
DCS(double c = 1.0, const ReweightScheme reweight = Block);
~DCS() {}
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
protected:
double c_;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(c_);
}
};
/// L2WithDeadZone implements a standard L2 penalty, but with a dead zone of width 2*k,
/// centered at the origin. The resulting penalty within the dead zone is always zero, and
/// grows quadratically outside the dead zone. In this sense, the L2WithDeadZone penalty is
/// "robust to inliers", rather than being robust to outliers. This penalty can be used to
/// create barrier functions in a general way.
class GTSAM_EXPORT L2WithDeadZone : public Base {
protected:
double k_;
public:
typedef boost::shared_ptr<L2WithDeadZone> shared_ptr;
L2WithDeadZone(double k = 1.0, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(k_);
}
};
} ///\namespace mEstimator
/**
* Base class for robust error models
* The robust M-estimators above simply tell us how to re-weight the residual, and are