Merge branch 'develop' into feature/rotation_group

release/4.3a0
Fan Jiang 2019-12-21 20:56:28 -08:00
commit d1817eb7c9
22 changed files with 968 additions and 854 deletions

View File

@ -1,7 +1,7 @@
#!/bin/bash
# common tasks before either build or test
function prepare ()
function configure()
{
set -e # Make sure any error makes the script to return an error code
set -x # echo
@ -14,21 +14,24 @@ function prepare ()
rm -fr $BUILD_DIR || true
mkdir $BUILD_DIR && cd $BUILD_DIR
if [ -z "$CMAKE_BUILD_TYPE" ]; then
CMAKE_BUILD_TYPE=Debug
fi
if [ -z "$GTSAM_ALLOW_DEPRECATED_SINCE_V4" ]; then
GTSAM_ALLOW_DEPRECATED_SINCE_V4=OFF
fi
if [ ! -z "$GCC_VERSION" ]; then
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-$GCC_VERSION 60 \
--slave /usr/bin/g++ g++ /usr/bin/g++-$GCC_VERSION
sudo update-alternatives --set gcc /usr/bin/gcc-$GCC_VERSION
export CC=gcc-$GCC_VERSION
export CXX=g++-$GCC_VERSION
fi
# GTSAM_BUILD_WITH_MARCH_NATIVE=OFF: to avoid crashes in builder VMs
cmake $SOURCE_DIR \
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Debug} \
-DGTSAM_BUILD_TESTS=${GTSAM_BUILD_TESTS:-OFF} \
-DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \
-DGTSAM_USE_QUATERNIONS=${GTSAM_BUILD_UNSTABLE:-OFF} \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V4:-OFF} \
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
-DCMAKE_VERBOSE_MAKEFILE=ON
}
# common tasks after either build or test
function finish ()
{
@ -41,18 +44,12 @@ function finish ()
# compile the code with the intent of populating the cache
function build ()
{
prepare
export GTSAM_BUILD_EXAMPLES_ALWAYS=ON
export GTSAM_BUILD_TESTS=OFF
cmake $SOURCE_DIR \
-DCMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE \
-DGTSAM_BUILD_TESTS=OFF \
-DGTSAM_BUILD_UNSTABLE=$GTSAM_BUILD_UNSTABLE \
-DGTSAM_USE_QUATERNIONS=$GTSAM_USE_QUATERNIONS \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=ON \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V4=$GTSAM_ALLOW_DEPRECATED_SINCE_V4
configure
# Actual build:
VERBOSE=1 make -j2
make -j2
finish
}
@ -60,15 +57,10 @@ function build ()
# run the tests
function test ()
{
prepare
export GTSAM_BUILD_EXAMPLES_ALWAYS=OFF
export GTSAM_BUILD_TESTS=ON
cmake $SOURCE_DIR \
-DCMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE \
-DGTSAM_BUILD_TESTS=ON \
-DGTSAM_BUILD_UNSTABLE=$GTSAM_BUILD_UNSTABLE \
-DGTSAM_USE_QUATERNIONS=$GTSAM_USE_QUATERNIONS \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
-DGTSAM_ALLOW_DEPRECATED_SINCE_V4=OFF
configure
# Actual build:
make -j2 check

View File

@ -7,11 +7,12 @@ addons:
apt:
sources:
- ubuntu-toolchain-r-test
- sourceline: 'deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-9 main'
key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
packages:
- g++-8
- clang-3.8
- build-essential
- pkg-config
- g++-9
- clang-9
- build-essential pkg-config
- cmake
- libpython-dev python-numpy
- libboost-all-dev
@ -29,8 +30,14 @@ stages:
- test
- special
env:
global:
- MAKEFLAGS="-j2"
- CCACHE_SLOPPINESS=pch_defines,time_macros
# Compile stage without building examples/tests to populate the caches.
jobs:
# -------- STAGE 1: COMPILE -----------
include:
# on Mac, GCC
- stage: compile
@ -69,53 +76,49 @@ jobs:
- stage: compile
os: linux
compiler: clang
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
script: bash .travis.sh -b
- stage: compile
os: linux
compiler: clang
env: CMAKE_BUILD_TYPE=Release
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -b
# on Linux, with deprecated ON to make sure that path still compiles/tests
- stage: special
os: linux
compiler: clang
env: CMAKE_BUILD_TYPE=Release GTSAM_BUILD_UNSTABLE=OFF GTSAM_ALLOW_DEPRECATED_SINCE_V4=ON
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_ALLOW_DEPRECATED_SINCE_V4=ON
script: bash .travis.sh -b
# -------- STAGE 2: TESTS -----------
# on Mac, GCC
- stage: test
os: osx
compiler: clang
env: CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -t
- stage: test
os: osx
compiler: clang
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
script: bash .travis.sh -t
- stage: test
os: linux
compiler: gcc
env: CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -t
- stage: test
os: linux
compiler: gcc
env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
script: bash .travis.sh -t
- stage: test
os: linux
compiler: clang
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release
script: bash .travis.sh -t
# on Linux, with quaternions ON to make sure that path still compiles/tests
- stage: special
os: linux
compiler: clang
env: CMAKE_BUILD_TYPE=Release GTSAM_BUILD_UNSTABLE=OFF GTSAM_USE_QUATERNIONS=ON
env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release GTSAM_BUILD_UNSTABLE=OFF GTSAM_USE_QUATERNIONS=ON
script: bash .travis.sh -t
# Matrix configuration:
os:
- osx
- linux
compiler:
- gcc
- clang
env:
global:
- MAKEFLAGS="-j2"
- CCACHE_SLOPPINESS=pch_defines,time_macros
- GTSAM_ALLOW_DEPRECATED_SINCE_V4=OFF
- GTSAM_USE_QUATERNIONS=OFF
- GTSAM_BUILD_UNSTABLE=ON
matrix:
- CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
- CMAKE_BUILD_TYPE=Release
script:
- bash .travis.sh -t
matrix:
exclude:
# Exclude g++ debug on Linux as it consistently times out
- os: linux
compiler: gcc
env : CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
# Exclude clang on Linux/clang in release until issue #57 is solved
- os: linux
compiler: clang
env : CMAKE_BUILD_TYPE=Release

View File

@ -434,7 +434,7 @@ add_subdirectory(timing)
# Build gtsam_unstable
if (GTSAM_BUILD_UNSTABLE)
add_subdirectory(gtsam_unstable)
endif(GTSAM_BUILD_UNSTABLE)
endif()
# Matlab toolbox
if (GTSAM_INSTALL_MATLAB_TOOLBOX)

View File

@ -20,6 +20,7 @@ install(FILES
GtsamPythonWrap.cmake
GtsamCythonWrap.cmake
GtsamTesting.cmake
GtsamPrinting.cmake
FindCython.cmake
FindNumPy.cmake
README.html

View File

@ -136,13 +136,13 @@ endif()
target_include_directories(gtsam BEFORE PUBLIC
# SuiteSparse_config
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/SuiteSparse_config>
$<INSTALL_INTERFACE:${CMAKE_INSTALL_PREFIX}/include/gtsam/3rdparty/SuiteSparse_config>
$<INSTALL_INTERFACE:include/gtsam/3rdparty/SuiteSparse_config>
# CCOLAMD
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/CCOLAMD/Include>
$<INSTALL_INTERFACE:${CMAKE_INSTALL_PREFIX}/include/gtsam/3rdparty/CCOLAMD>
$<INSTALL_INTERFACE:include/gtsam/3rdparty/CCOLAMD>
# main gtsam includes:
$<BUILD_INTERFACE:${CMAKE_SOURCE_DIR}>
$<INSTALL_INTERFACE:${CMAKE_INSTALL_PREFIX}/include/>
$<INSTALL_INTERFACE:include/>
# config.h
$<BUILD_INTERFACE:${CMAKE_BINARY_DIR}>
# unit tests:

View File

@ -29,14 +29,22 @@
# undef max
# undef ERROR
#include <functional> // std::hash()
// Use TBB concurrent_unordered_map for ConcurrentMap
# define CONCURRENT_MAP_BASE tbb::concurrent_unordered_map<KEY, VALUE>
template <typename KEY, typename VALUE>
using ConcurrentMapBase = tbb::concurrent_unordered_map<
KEY,
VALUE,
std::hash<KEY>
>;
#else
// If we're not using TBB, use a FastMap for ConcurrentMap
#include <gtsam/base/FastMap.h>
# define CONCURRENT_MAP_BASE gtsam::FastMap<KEY, VALUE>
template <typename KEY, typename VALUE>
using ConcurrentMapBase = gtsam::FastMap<KEY, VALUE>;
#endif
@ -57,11 +65,11 @@ namespace gtsam {
* @addtogroup base
*/
template<typename KEY, typename VALUE>
class ConcurrentMap : public CONCURRENT_MAP_BASE {
class ConcurrentMap : public ConcurrentMapBase<KEY,VALUE> {
public:
typedef CONCURRENT_MAP_BASE Base;
typedef ConcurrentMapBase<KEY,VALUE> Base;
/** Default constructor */
ConcurrentMap() {}

View File

@ -115,8 +115,8 @@ class DSFMap {
/// Small utility class for representing a wrappable pairs of ints.
class IndexPair : public std::pair<size_t,size_t> {
public:
IndexPair(): std::pair<size_t,size_t>(0,0) {}
IndexPair(size_t i, size_t j) : std::pair<size_t,size_t>(i,j) {}
inline IndexPair(): std::pair<size_t,size_t>(0,0) {}
inline IndexPair(size_t i, size_t j) : std::pair<size_t,size_t>(i,j) {}
inline size_t i() const { return first; };
inline size_t j() const { return second; };
};

View File

@ -89,12 +89,9 @@ public:
/**
* Create a duplicate object returned as a pointer to the generic Value interface.
* For the sake of performance, this function use singleton pool allocator instead of the normal heap allocator.
* The result must be deleted with Value::deallocate_, not with the 'delete' operator.
*/
virtual Value* clone_() const {
void *place = boost::singleton_pool<PoolTag, sizeof(GenericValue)>::malloc();
GenericValue* ptr = new (place) GenericValue(*this); // calls copy constructor to fill in
GenericValue* ptr = new GenericValue(*this); // calls copy constructor to fill in
return ptr;
}
@ -102,8 +99,7 @@ public:
* Destroy and deallocate this object, only if it was originally allocated using clone_().
*/
virtual void deallocate_() const {
this->~GenericValue(); // Virtual destructor cleans up the derived object
boost::singleton_pool<PoolTag, sizeof(GenericValue)>::free((void*) this); // Release memory from pool
delete this;
}
/**
@ -118,10 +114,7 @@ public:
// Call retract on the derived class using the retract trait function
const T retractResult = traits<T>::Retract(GenericValue<T>::value(), delta);
// Create a Value pointer copy of the result
void* resultAsValuePlace =
boost::singleton_pool<PoolTag, sizeof(GenericValue)>::malloc();
Value* resultAsValue = new (resultAsValuePlace) GenericValue(retractResult);
Value* resultAsValue = new GenericValue(retractResult);
// Return the pointer to the Value base class
return resultAsValue;
@ -172,12 +165,6 @@ public:
return *this;
}
private:
/// Fake Tag struct for singleton pool allocator. In fact, it is never used!
struct PoolTag {
};
private:
/** Serialization function */

View File

@ -22,6 +22,7 @@
#include <gtsam/config.h> // for GTSAM_USE_TBB
#include <boost/optional/optional.hpp>
#include <gtsam/dllexport.h>
#include <string>
#include <typeinfo>
@ -117,7 +118,7 @@ public:
};
/// Thread-safe runtime error exception
class RuntimeErrorThreadsafe: public ThreadsafeException<RuntimeErrorThreadsafe> {
class GTSAM_EXPORT RuntimeErrorThreadsafe: public ThreadsafeException<RuntimeErrorThreadsafe> {
public:
/// Construct with a string describing the exception
RuntimeErrorThreadsafe(const std::string& description) :

View File

@ -196,7 +196,7 @@ namespace gtsam {
/**
* Small class that calls internal::tic at construction, and internol::toc when destroyed
*/
class AutoTicToc {
class GTSAM_EXPORT AutoTicToc {
private:
size_t id_;
const char* label_;

View File

@ -0,0 +1,424 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file NoiseModel.cpp
* @date Jan 13, 2010
* @author Richard Roberts
* @author Frank Dellaert
*/
#include <gtsam/linear/LossFunctions.h>
#include <iostream>
using namespace std;
namespace gtsam {
namespace noiseModel {
/* ************************************************************************* */
// M-Estimator
/* ************************************************************************* */
namespace mEstimator {
Vector Base::weight(const Vector& error) const {
const size_t n = error.rows();
Vector w(n);
for (size_t i = 0; i < n; ++i)
w(i) = weight(error(i));
return w;
}
// The following three functions re-weight block matrices and a vector
// according to their weight implementation
void Base::reweight(Vector& error) const {
if (reweight_ == Block) {
const double w = sqrtWeight(error.norm());
error *= w;
} else {
error.array() *= weight(error).cwiseSqrt().array();
}
}
// Reweight n block matrices with one error vector
void Base::reweight(vector<Matrix> &A, Vector &error) const {
if ( reweight_ == Block ) {
const double w = sqrtWeight(error.norm());
for(Matrix& Aj: A) {
Aj *= w;
}
error *= w;
}
else {
const Vector W = sqrtWeight(error);
for(Matrix& Aj: A) {
vector_scale_inplace(W,Aj);
}
error = W.cwiseProduct(error);
}
}
// Reweight one block matrix with one error vector
void Base::reweight(Matrix &A, Vector &error) const {
if ( reweight_ == Block ) {
const double w = sqrtWeight(error.norm());
A *= w;
error *= w;
}
else {
const Vector W = sqrtWeight(error);
vector_scale_inplace(W,A);
error = W.cwiseProduct(error);
}
}
// Reweight two block matrix with one error vector
void Base::reweight(Matrix &A1, Matrix &A2, Vector &error) const {
if ( reweight_ == Block ) {
const double w = sqrtWeight(error.norm());
A1 *= w;
A2 *= w;
error *= w;
}
else {
const Vector W = sqrtWeight(error);
vector_scale_inplace(W,A1);
vector_scale_inplace(W,A2);
error = W.cwiseProduct(error);
}
}
// Reweight three block matrix with one error vector
void Base::reweight(Matrix &A1, Matrix &A2, Matrix &A3, Vector &error) const {
if ( reweight_ == Block ) {
const double w = sqrtWeight(error.norm());
A1 *= w;
A2 *= w;
A3 *= w;
error *= w;
}
else {
const Vector W = sqrtWeight(error);
vector_scale_inplace(W,A1);
vector_scale_inplace(W,A2);
vector_scale_inplace(W,A3);
error = W.cwiseProduct(error);
}
}
/* ************************************************************************* */
// Null model
/* ************************************************************************* */
void Null::print(const std::string &s="") const
{ cout << s << "null ()" << endl; }
Null::shared_ptr Null::Create()
{ return shared_ptr(new Null()); }
/* ************************************************************************* */
// Fair
/* ************************************************************************* */
Fair::Fair(double c, const ReweightScheme reweight) : Base(reweight), c_(c) {
if (c_ <= 0) {
throw runtime_error("mEstimator Fair takes only positive double in constructor.");
}
}
double Fair::weight(double error) const {
return 1.0 / (1.0 + std::abs(error) / c_);
}
double Fair::residual(double error) const {
const double absError = std::abs(error);
const double normalizedError = absError / c_;
const double c_2 = c_ * c_;
return c_2 * (normalizedError - std::log1p(normalizedError));
}
void Fair::print(const std::string &s="") const
{ cout << s << "fair (" << c_ << ")" << endl; }
bool Fair::equals(const Base &expected, double tol) const {
const Fair* p = dynamic_cast<const Fair*> (&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_ ) < tol;
}
Fair::shared_ptr Fair::Create(double c, const ReweightScheme reweight)
{ return shared_ptr(new Fair(c, reweight)); }
/* ************************************************************************* */
// Huber
/* ************************************************************************* */
Huber::Huber(double k, const ReweightScheme reweight) : Base(reweight), k_(k) {
if (k_ <= 0) {
throw runtime_error("mEstimator Huber takes only positive double in constructor.");
}
}
double Huber::weight(double error) const {
const double absError = std::abs(error);
return (absError <= k_) ? (1.0) : (k_ / absError);
}
double Huber::residual(double error) const {
const double absError = std::abs(error);
if (absError <= k_) { // |x| <= k
return error*error / 2;
} else { // |x| > k
return k_ * (absError - (k_/2));
}
}
void Huber::print(const std::string &s="") const {
cout << s << "huber (" << k_ << ")" << endl;
}
bool Huber::equals(const Base &expected, double tol) const {
const Huber* p = dynamic_cast<const Huber*>(&expected);
if (p == NULL) return false;
return std::abs(k_ - p->k_) < tol;
}
Huber::shared_ptr Huber::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Huber(c, reweight));
}
/* ************************************************************************* */
// Cauchy
/* ************************************************************************* */
Cauchy::Cauchy(double k, const ReweightScheme reweight) : Base(reweight), k_(k), ksquared_(k * k) {
if (k <= 0) {
throw runtime_error("mEstimator Cauchy takes only positive double in constructor.");
}
}
double Cauchy::weight(double error) const {
return ksquared_ / (ksquared_ + error*error);
}
double Cauchy::residual(double error) const {
const double val = std::log1p(error * error / ksquared_);
return ksquared_ * val * 0.5;
}
void Cauchy::print(const std::string &s="") const {
cout << s << "cauchy (" << k_ << ")" << endl;
}
bool Cauchy::equals(const Base &expected, double tol) const {
const Cauchy* p = dynamic_cast<const Cauchy*>(&expected);
if (p == NULL) return false;
return std::abs(ksquared_ - p->ksquared_) < tol;
}
Cauchy::shared_ptr Cauchy::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Cauchy(c, reweight));
}
/* ************************************************************************* */
// Tukey
/* ************************************************************************* */
Tukey::Tukey(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {
if (c <= 0) {
throw runtime_error("mEstimator Tukey takes only positive double in constructor.");
}
}
double Tukey::weight(double error) const {
if (std::abs(error) <= c_) {
const double one_minus_xc2 = 1.0 - error*error/csquared_;
return one_minus_xc2 * one_minus_xc2;
}
return 0.0;
}
double Tukey::residual(double error) const {
double absError = std::abs(error);
if (absError <= c_) {
const double one_minus_xc2 = 1.0 - error*error/csquared_;
const double t = one_minus_xc2*one_minus_xc2*one_minus_xc2;
return csquared_ * (1 - t) / 6.0;
} else {
return csquared_ / 6.0;
}
}
void Tukey::print(const std::string &s="") const {
std::cout << s << ": Tukey (" << c_ << ")" << std::endl;
}
bool Tukey::equals(const Base &expected, double tol) const {
const Tukey* p = dynamic_cast<const Tukey*>(&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_) < tol;
}
Tukey::shared_ptr Tukey::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Tukey(c, reweight));
}
/* ************************************************************************* */
// Welsch
/* ************************************************************************* */
Welsch::Welsch(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {}
double Welsch::weight(double error) const {
const double xc2 = (error*error)/csquared_;
return std::exp(-xc2);
}
double Welsch::residual(double error) const {
const double xc2 = (error*error)/csquared_;
return csquared_ * 0.5 * -std::expm1(-xc2);
}
void Welsch::print(const std::string &s="") const {
std::cout << s << ": Welsch (" << c_ << ")" << std::endl;
}
bool Welsch::equals(const Base &expected, double tol) const {
const Welsch* p = dynamic_cast<const Welsch*>(&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_) < tol;
}
Welsch::shared_ptr Welsch::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Welsch(c, reweight));
}
/* ************************************************************************* */
// GemanMcClure
/* ************************************************************************* */
GemanMcClure::GemanMcClure(double c, const ReweightScheme reweight)
: Base(reweight), c_(c) {
}
double GemanMcClure::weight(double error) const {
const double c2 = c_*c_;
const double c4 = c2*c2;
const double c2error = c2 + error*error;
return c4/(c2error*c2error);
}
double GemanMcClure::residual(double error) const {
const double c2 = c_*c_;
const double error2 = error*error;
return 0.5 * (c2 * error2) / (c2 + error2);
}
void GemanMcClure::print(const std::string &s="") const {
std::cout << s << ": Geman-McClure (" << c_ << ")" << std::endl;
}
bool GemanMcClure::equals(const Base &expected, double tol) const {
const GemanMcClure* p = dynamic_cast<const GemanMcClure*>(&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_) < tol;
}
GemanMcClure::shared_ptr GemanMcClure::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new GemanMcClure(c, reweight));
}
/* ************************************************************************* */
// DCS
/* ************************************************************************* */
DCS::DCS(double c, const ReweightScheme reweight)
: Base(reweight), c_(c) {
}
double DCS::weight(double error) const {
const double e2 = error*error;
if (e2 > c_)
{
const double w = 2.0*c_/(c_ + e2);
return w*w;
}
return 1.0;
}
double DCS::residual(double error) const {
// This is the simplified version of Eq 9 from (Agarwal13icra)
// after you simplify and cancel terms.
const double e2 = error*error;
const double e4 = e2*e2;
const double c2 = c_*c_;
return (c2*e2 + c_*e4) / ((e2 + c_)*(e2 + c_));
}
void DCS::print(const std::string &s="") const {
std::cout << s << ": DCS (" << c_ << ")" << std::endl;
}
bool DCS::equals(const Base &expected, double tol) const {
const DCS* p = dynamic_cast<const DCS*>(&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_) < tol;
}
DCS::shared_ptr DCS::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new DCS(c, reweight));
}
/* ************************************************************************* */
// L2WithDeadZone
/* ************************************************************************* */
L2WithDeadZone::L2WithDeadZone(double k, const ReweightScheme reweight)
: Base(reweight), k_(k) {
if (k_ <= 0) {
throw runtime_error("mEstimator L2WithDeadZone takes only positive double in constructor.");
}
}
double L2WithDeadZone::weight(double error) const {
// note that this code is slightly uglier than residual, because there are three distinct
// cases to handle (left of deadzone, deadzone, right of deadzone) instead of the two
// cases (deadzone, non-deadzone) in residual.
if (std::abs(error) <= k_) return 0.0;
else if (error > k_) return (-k_+error)/error;
else return (k_+error)/error;
}
double L2WithDeadZone::residual(double error) const {
const double abs_error = std::abs(error);
return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error);
}
void L2WithDeadZone::print(const std::string &s="") const {
std::cout << s << ": L2WithDeadZone (" << k_ << ")" << std::endl;
}
bool L2WithDeadZone::equals(const Base &expected, double tol) const {
const L2WithDeadZone* p = dynamic_cast<const L2WithDeadZone*>(&expected);
if (p == NULL) return false;
return std::abs(k_ - p->k_) < tol;
}
L2WithDeadZone::shared_ptr L2WithDeadZone::Create(double k, const ReweightScheme reweight) {
return shared_ptr(new L2WithDeadZone(k, reweight));
}
} // namespace mEstimator
} // namespace noiseModel
} // gtsam

View File

@ -0,0 +1,379 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file NoiseModel.h
* @date Jan 13, 2010
* @author Richard Roberts
* @author Frank Dellaert
*/
#pragma once
#include <gtsam/base/Matrix.h>
#include <gtsam/base/Testable.h>
#include <gtsam/dllexport.h>
#include <boost/serialization/extended_type_info.hpp>
#include <boost/serialization/nvp.hpp>
#include <boost/serialization/optional.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/singleton.hpp>
namespace gtsam {
namespace noiseModel {
// clang-format off
/**
* The mEstimator name space contains all robust error functions.
* It mirrors the exposition at
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* which talks about minimizing \sum \rho(r_i), where \rho is a residual function of choice.
*
* To illustrate, let's consider the least-squares (L2), L1, and Huber estimators as examples:
*
* Name Symbol Least-Squares L1-norm Huber
* Residual \rho(x) 0.5*x^2 |x| 0.5*x^2 if |x|<k, 0.5*k^2 + k|x-k| otherwise
* Derivative \phi(x) x sgn(x) x if |x|<k, k sgn(x) otherwise
* Weight w(x)=\phi(x)/x 1 1/|x| 1 if |x|<k, k/|x| otherwise
*
* With these definitions, D(\rho(x), p) = \phi(x) D(x,p) = w(x) x D(x,p) = w(x) D(L2(x), p),
* and hence we can solve the equivalent weighted least squares problem \sum w(r_i) \rho(r_i)
*
* Each M-estimator in the mEstimator name space simply implements the above functions.
*/
// clang-format on
namespace mEstimator {
//---------------------------------------------------------------------------------------
class GTSAM_EXPORT Base {
public:
enum ReweightScheme { Scalar, Block };
typedef boost::shared_ptr<Base> shared_ptr;
protected:
/** the rows can be weighted independently according to the error
* or uniformly with the norm of the right hand side */
ReweightScheme reweight_;
public:
Base(const ReweightScheme reweight = Block) : reweight_(reweight) {}
virtual ~Base() {}
/*
* This method is responsible for returning the total penalty for a given
* amount of error. For example, this method is responsible for implementing
* the quadratic function for an L2 penalty, the absolute value function for
* an L1 penalty, etc.
*
* TODO(mikebosse): When the residual function has as input the norm of the
* residual vector, then it prevents implementations of asymmeric loss
* functions. It would be better for this function to accept the vector and
* internally call the norm if necessary.
*/
virtual double residual(double error) const { return 0; };
/*
* This method is responsible for returning the weight function for a given
* amount of error. The weight function is related to the analytic derivative
* of the residual function. See
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* for details. This method is required when optimizing cost functions with
* robust penalties using iteratively re-weighted least squares.
*/
virtual double weight(double error) const = 0;
virtual void print(const std::string &s) const = 0;
virtual bool equals(const Base &expected, double tol = 1e-8) const = 0;
double sqrtWeight(double error) const { return std::sqrt(weight(error)); }
/** produce a weight vector according to an error vector and the implemented
* robust function */
Vector weight(const Vector &error) const;
/** square root version of the weight function */
Vector sqrtWeight(const Vector &error) const {
return weight(error).cwiseSqrt();
}
/** reweight block matrices and a vector according to their weight
* implementation */
void reweight(Vector &error) const;
void reweight(std::vector<Matrix> &A, Vector &error) const;
void reweight(Matrix &A, Vector &error) const;
void reweight(Matrix &A1, Matrix &A2, Vector &error) const;
void reweight(Matrix &A1, Matrix &A2, Matrix &A3, Vector &error) const;
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_NVP(reweight_);
}
};
/// Null class is not robust so is a Gaussian ?
class GTSAM_EXPORT Null : public Base {
public:
typedef boost::shared_ptr<Null> shared_ptr;
Null(const ReweightScheme reweight = Block) : Base(reweight) {}
~Null() {}
double weight(double /*error*/) const { return 1.0; }
double residual(double error) const { return error; }
void print(const std::string &s) const;
bool equals(const Base & /*expected*/, double /*tol*/) const { return true; }
static shared_ptr Create();
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
}
};
/// Fair implements the "Fair" robust error model (Zhang97ivc)
class GTSAM_EXPORT Fair : public Base {
protected:
double c_;
public:
typedef boost::shared_ptr<Fair> shared_ptr;
Fair(double c = 1.3998, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double c, const ReweightScheme reweight = Block);
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(c_);
}
};
/// Huber implements the "Huber" robust error model (Zhang97ivc)
class GTSAM_EXPORT Huber : public Base {
protected:
double k_;
public:
typedef boost::shared_ptr<Huber> shared_ptr;
Huber(double k = 1.345, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(k_);
}
};
/// Cauchy implements the "Cauchy" robust error model (Lee2013IROS). Contributed
/// by:
/// Dipl.-Inform. Jan Oberlaender (M.Sc.), FZI Research Center for
/// Information Technology, Karlsruhe, Germany.
/// oberlaender@fzi.de
/// Thanks Jan!
class GTSAM_EXPORT Cauchy : public Base {
protected:
double k_, ksquared_;
public:
typedef boost::shared_ptr<Cauchy> shared_ptr;
Cauchy(double k = 0.1, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(k_);
}
};
/// Tukey implements the "Tukey" robust error model (Zhang97ivc)
class GTSAM_EXPORT Tukey : public Base {
protected:
double c_, csquared_;
public:
typedef boost::shared_ptr<Tukey> shared_ptr;
Tukey(double c = 4.6851, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(c_);
}
};
/// Welsch implements the "Welsch" robust error model (Zhang97ivc)
class GTSAM_EXPORT Welsch : public Base {
protected:
double c_, csquared_;
public:
typedef boost::shared_ptr<Welsch> shared_ptr;
Welsch(double c = 2.9846, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(c_);
}
};
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// @name Deprecated
/// @{
// Welsh implements the "Welsch" robust error model (Zhang97ivc)
// This was misspelled in previous versions of gtsam and should be
// removed in the future.
using Welsh = Welsch;
#endif
/// GemanMcClure implements the "Geman-McClure" robust error model
/// (Zhang97ivc).
///
/// Note that Geman-McClure weight function uses the parameter c == 1.0,
/// but here it's allowed to use different values, so we actually have
/// the generalized Geman-McClure from (Agarwal15phd).
class GTSAM_EXPORT GemanMcClure : public Base {
public:
typedef boost::shared_ptr<GemanMcClure> shared_ptr;
GemanMcClure(double c = 1.0, const ReweightScheme reweight = Block);
~GemanMcClure() {}
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
protected:
double c_;
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(c_);
}
};
/// DCS implements the Dynamic Covariance Scaling robust error model
/// from the paper Robust Map Optimization (Agarwal13icra).
///
/// Under the special condition of the parameter c == 1.0 and not
/// forcing the output weight s <= 1.0, DCS is similar to Geman-McClure.
class GTSAM_EXPORT DCS : public Base {
public:
typedef boost::shared_ptr<DCS> shared_ptr;
DCS(double c = 1.0, const ReweightScheme reweight = Block);
~DCS() {}
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
protected:
double c_;
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(c_);
}
};
/// L2WithDeadZone implements a standard L2 penalty, but with a dead zone of
/// width 2*k, centered at the origin. The resulting penalty within the dead
/// zone is always zero, and grows quadratically outside the dead zone. In this
/// sense, the L2WithDeadZone penalty is "robust to inliers", rather than being
/// robust to outliers. This penalty can be used to create barrier functions in
/// a general way.
class GTSAM_EXPORT L2WithDeadZone : public Base {
protected:
double k_;
public:
typedef boost::shared_ptr<L2WithDeadZone> shared_ptr;
L2WithDeadZone(double k = 1.0, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base &expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
private:
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar &BOOST_SERIALIZATION_NVP(k_);
}
};
} // namespace mEstimator
} // namespace noiseModel
} // namespace gtsam

View File

@ -616,403 +616,6 @@ void Unit::print(const std::string& name) const {
cout << name << "unit (" << dim_ << ") " << endl;
}
/* ************************************************************************* */
// M-Estimator
/* ************************************************************************* */
namespace mEstimator {
Vector Base::weight(const Vector& error) const {
const size_t n = error.rows();
Vector w(n);
for (size_t i = 0; i < n; ++i)
w(i) = weight(error(i));
return w;
}
// The following three functions re-weight block matrices and a vector
// according to their weight implementation
void Base::reweight(Vector& error) const {
if (reweight_ == Block) {
const double w = sqrtWeight(error.norm());
error *= w;
} else {
error.array() *= weight(error).cwiseSqrt().array();
}
}
// Reweight n block matrices with one error vector
void Base::reweight(vector<Matrix> &A, Vector &error) const {
if ( reweight_ == Block ) {
const double w = sqrtWeight(error.norm());
for(Matrix& Aj: A) {
Aj *= w;
}
error *= w;
}
else {
const Vector W = sqrtWeight(error);
for(Matrix& Aj: A) {
vector_scale_inplace(W,Aj);
}
error = W.cwiseProduct(error);
}
}
// Reweight one block matrix with one error vector
void Base::reweight(Matrix &A, Vector &error) const {
if ( reweight_ == Block ) {
const double w = sqrtWeight(error.norm());
A *= w;
error *= w;
}
else {
const Vector W = sqrtWeight(error);
vector_scale_inplace(W,A);
error = W.cwiseProduct(error);
}
}
// Reweight two block matrix with one error vector
void Base::reweight(Matrix &A1, Matrix &A2, Vector &error) const {
if ( reweight_ == Block ) {
const double w = sqrtWeight(error.norm());
A1 *= w;
A2 *= w;
error *= w;
}
else {
const Vector W = sqrtWeight(error);
vector_scale_inplace(W,A1);
vector_scale_inplace(W,A2);
error = W.cwiseProduct(error);
}
}
// Reweight three block matrix with one error vector
void Base::reweight(Matrix &A1, Matrix &A2, Matrix &A3, Vector &error) const {
if ( reweight_ == Block ) {
const double w = sqrtWeight(error.norm());
A1 *= w;
A2 *= w;
A3 *= w;
error *= w;
}
else {
const Vector W = sqrtWeight(error);
vector_scale_inplace(W,A1);
vector_scale_inplace(W,A2);
vector_scale_inplace(W,A3);
error = W.cwiseProduct(error);
}
}
/* ************************************************************************* */
// Null model
/* ************************************************************************* */
void Null::print(const std::string &s="") const
{ cout << s << "null ()" << endl; }
Null::shared_ptr Null::Create()
{ return shared_ptr(new Null()); }
/* ************************************************************************* */
// Fair
/* ************************************************************************* */
Fair::Fair(double c, const ReweightScheme reweight) : Base(reweight), c_(c) {
if (c_ <= 0) {
throw runtime_error("mEstimator Fair takes only positive double in constructor.");
}
}
double Fair::weight(double error) const {
return 1.0 / (1.0 + std::abs(error) / c_);
}
double Fair::residual(double error) const {
const double absError = std::abs(error);
const double normalizedError = absError / c_;
const double c_2 = c_ * c_;
return c_2 * (normalizedError - std::log(1 + normalizedError));
}
void Fair::print(const std::string &s="") const
{ cout << s << "fair (" << c_ << ")" << endl; }
bool Fair::equals(const Base &expected, double tol) const {
const Fair* p = dynamic_cast<const Fair*> (&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_ ) < tol;
}
Fair::shared_ptr Fair::Create(double c, const ReweightScheme reweight)
{ return shared_ptr(new Fair(c, reweight)); }
/* ************************************************************************* */
// Huber
/* ************************************************************************* */
Huber::Huber(double k, const ReweightScheme reweight) : Base(reweight), k_(k) {
if (k_ <= 0) {
throw runtime_error("mEstimator Huber takes only positive double in constructor.");
}
}
double Huber::weight(double error) const {
const double absError = std::abs(error);
return (absError <= k_) ? (1.0) : (k_ / absError);
}
double Huber::residual(double error) const {
const double absError = std::abs(error);
if (absError <= k_) { // |x| <= k
return error*error / 2;
} else { // |x| > k
return k_ * (absError - (k_/2));
}
}
void Huber::print(const std::string &s="") const {
cout << s << "huber (" << k_ << ")" << endl;
}
bool Huber::equals(const Base &expected, double tol) const {
const Huber* p = dynamic_cast<const Huber*>(&expected);
if (p == NULL) return false;
return std::abs(k_ - p->k_) < tol;
}
Huber::shared_ptr Huber::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Huber(c, reweight));
}
/* ************************************************************************* */
// Cauchy
/* ************************************************************************* */
Cauchy::Cauchy(double k, const ReweightScheme reweight) : Base(reweight), k_(k), ksquared_(k * k) {
if (k <= 0) {
throw runtime_error("mEstimator Cauchy takes only positive double in constructor.");
}
}
double Cauchy::weight(double error) const {
return ksquared_ / (ksquared_ + error*error);
}
double Cauchy::residual(double error) const {
const double xc2 = error / k_;
const double val = std::log(1 + (xc2*xc2));
return ksquared_ * val * 0.5;
}
void Cauchy::print(const std::string &s="") const {
cout << s << "cauchy (" << k_ << ")" << endl;
}
bool Cauchy::equals(const Base &expected, double tol) const {
const Cauchy* p = dynamic_cast<const Cauchy*>(&expected);
if (p == NULL) return false;
return std::abs(ksquared_ - p->ksquared_) < tol;
}
Cauchy::shared_ptr Cauchy::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Cauchy(c, reweight));
}
/* ************************************************************************* */
// Tukey
/* ************************************************************************* */
Tukey::Tukey(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {
if (c <= 0) {
throw runtime_error("mEstimator Tukey takes only positive double in constructor.");
}
}
double Tukey::weight(double error) const {
if (std::abs(error) <= c_) {
const double xc2 = error*error/csquared_;
return (1.0-xc2)*(1.0-xc2);
}
return 0.0;
}
double Tukey::residual(double error) const {
double absError = std::abs(error);
if (absError <= c_) {
const double xc2 = error*error/csquared_;
const double t = (1 - xc2)*(1 - xc2)*(1 - xc2);
return csquared_ * (1 - t) / 6.0;
} else {
return csquared_ / 6.0;
}
}
void Tukey::print(const std::string &s="") const {
std::cout << s << ": Tukey (" << c_ << ")" << std::endl;
}
bool Tukey::equals(const Base &expected, double tol) const {
const Tukey* p = dynamic_cast<const Tukey*>(&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_) < tol;
}
Tukey::shared_ptr Tukey::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Tukey(c, reweight));
}
/* ************************************************************************* */
// Welsch
/* ************************************************************************* */
Welsch::Welsch(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {}
double Welsch::weight(double error) const {
const double xc2 = (error*error)/csquared_;
return std::exp(-xc2);
}
double Welsch::residual(double error) const {
const double xc2 = (error*error)/csquared_;
return csquared_ * 0.5 * (1 - std::exp(-xc2) );
}
void Welsch::print(const std::string &s="") const {
std::cout << s << ": Welsch (" << c_ << ")" << std::endl;
}
bool Welsch::equals(const Base &expected, double tol) const {
const Welsch* p = dynamic_cast<const Welsch*>(&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_) < tol;
}
Welsch::shared_ptr Welsch::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Welsch(c, reweight));
}
/* ************************************************************************* */
// GemanMcClure
/* ************************************************************************* */
GemanMcClure::GemanMcClure(double c, const ReweightScheme reweight)
: Base(reweight), c_(c) {
}
double GemanMcClure::weight(double error) const {
const double c2 = c_*c_;
const double c4 = c2*c2;
const double c2error = c2 + error*error;
return c4/(c2error*c2error);
}
double GemanMcClure::residual(double error) const {
const double c2 = c_*c_;
const double error2 = error*error;
return 0.5 * (c2 * error2) / (c2 + error2);
}
void GemanMcClure::print(const std::string &s="") const {
std::cout << s << ": Geman-McClure (" << c_ << ")" << std::endl;
}
bool GemanMcClure::equals(const Base &expected, double tol) const {
const GemanMcClure* p = dynamic_cast<const GemanMcClure*>(&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_) < tol;
}
GemanMcClure::shared_ptr GemanMcClure::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new GemanMcClure(c, reweight));
}
/* ************************************************************************* */
// DCS
/* ************************************************************************* */
DCS::DCS(double c, const ReweightScheme reweight)
: Base(reweight), c_(c) {
}
double DCS::weight(double error) const {
const double e2 = error*error;
if (e2 > c_)
{
const double w = 2.0*c_/(c_ + e2);
return w*w;
}
return 1.0;
}
double DCS::residual(double error) const {
// This is the simplified version of Eq 9 from (Agarwal13icra)
// after you simplify and cancel terms.
const double e2 = error*error;
const double e4 = e2*e2;
const double c2 = c_*c_;
return (c2*e2 + c_*e4) / ((e2 + c_)*(e2 + c_));
}
void DCS::print(const std::string &s="") const {
std::cout << s << ": DCS (" << c_ << ")" << std::endl;
}
bool DCS::equals(const Base &expected, double tol) const {
const DCS* p = dynamic_cast<const DCS*>(&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_) < tol;
}
DCS::shared_ptr DCS::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new DCS(c, reweight));
}
/* ************************************************************************* */
// L2WithDeadZone
/* ************************************************************************* */
L2WithDeadZone::L2WithDeadZone(double k, const ReweightScheme reweight)
: Base(reweight), k_(k) {
if (k_ <= 0) {
throw runtime_error("mEstimator L2WithDeadZone takes only positive double in constructor.");
}
}
double L2WithDeadZone::weight(double error) const {
// note that this code is slightly uglier than residual, because there are three distinct
// cases to handle (left of deadzone, deadzone, right of deadzone) instead of the two
// cases (deadzone, non-deadzone) in residual.
if (std::abs(error) <= k_) return 0.0;
else if (error > k_) return (-k_+error)/error;
else return (k_+error)/error;
}
double L2WithDeadZone::residual(double error) const {
const double abs_error = std::abs(error);
return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error);
}
void L2WithDeadZone::print(const std::string &s="") const {
std::cout << s << ": L2WithDeadZone (" << k_ << ")" << std::endl;
}
bool L2WithDeadZone::equals(const Base &expected, double tol) const {
const L2WithDeadZone* p = dynamic_cast<const L2WithDeadZone*>(&expected);
if (p == NULL) return false;
return std::abs(k_ - p->k_) < tol;
}
L2WithDeadZone::shared_ptr L2WithDeadZone::Create(double k, const ReweightScheme reweight) {
return shared_ptr(new L2WithDeadZone(k, reweight));
}
} // namespace mEstimator
/* ************************************************************************* */
// Robust
/* ************************************************************************* */

View File

@ -21,6 +21,7 @@
#include <gtsam/base/Testable.h>
#include <gtsam/base/Matrix.h>
#include <gtsam/dllexport.h>
#include <gtsam/linear/LossFunctions.h>
#include <boost/serialization/nvp.hpp>
#include <boost/serialization/extended_type_info.hpp>
@ -39,6 +40,7 @@ namespace gtsam {
class Constrained;
class Isotropic;
class Unit;
class RobustModel;
//---------------------------------------------------------------------------------------
@ -115,6 +117,14 @@ namespace gtsam {
v = unwhiten(v);
}
/** Useful function for robust noise models to get the unweighted but whitened error */
virtual Vector unweightedWhiten(const Vector& v) const {
return whiten(v);
}
/** get the weight from the effective loss function on residual vector v */
virtual double weight(const Vector& v) const { return 1.0; }
private:
/** Serialization function */
friend class boost::serialization::access;
@ -626,350 +636,6 @@ namespace gtsam {
}
};
/**
* The mEstimator name space contains all robust error functions.
* It mirrors the exposition at
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* which talks about minimizing \sum \rho(r_i), where \rho is a residual function of choice.
*
* To illustrate, let's consider the least-squares (L2), L1, and Huber estimators as examples:
*
* Name Symbol Least-Squares L1-norm Huber
* Residual \rho(x) 0.5*x^2 |x| 0.5*x^2 if |x|<k, 0.5*k^2 + k|x-k| otherwise
* Derivative \phi(x) x sgn(x) x if |x|<k, k sgn(x) otherwise
* Weight w(x)=\phi(x)/x 1 1/|x| 1 if |x|<k, k/|x| otherwise
*
* With these definitions, D(\rho(x), p) = \phi(x) D(x,p) = w(x) x D(x,p) = w(x) D(L2(x), p),
* and hence we can solve the equivalent weighted least squares problem \sum w(r_i) \rho(r_i)
*
* Each M-estimator in the mEstimator name space simply implements the above functions.
*/
namespace mEstimator {
//---------------------------------------------------------------------------------------
class GTSAM_EXPORT Base {
public:
enum ReweightScheme { Scalar, Block };
typedef boost::shared_ptr<Base> shared_ptr;
protected:
/** the rows can be weighted independently according to the error
* or uniformly with the norm of the right hand side */
ReweightScheme reweight_;
public:
Base(const ReweightScheme reweight = Block):reweight_(reweight) {}
virtual ~Base() {}
/*
* This method is responsible for returning the total penalty for a given amount of error.
* For example, this method is responsible for implementing the quadratic function for an
* L2 penalty, the absolute value function for an L1 penalty, etc.
*
* TODO(mike): There is currently a bug in GTSAM, where none of the mEstimator classes
* implement a residual function, and GTSAM calls the weight function to evaluate the
* total penalty, rather than calling the residual function. The weight function should be
* used during iteratively reweighted least squares optimization, but should not be used to
* evaluate the total penalty. The long-term solution is for all mEstimators to implement
* both a weight and a residual function, and for GTSAM to call the residual function when
* evaluating the total penalty. But for now, I'm leaving this residual method as pure
* virtual, so the existing mEstimators can inherit this default fallback behavior.
*/
virtual double residual(double error) const { return 0; };
/*
* This method is responsible for returning the weight function for a given amount of error.
* The weight function is related to the analytic derivative of the residual function. See
* https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf
* for details. This method is required when optimizing cost functions with robust penalties
* using iteratively re-weighted least squares.
*/
virtual double weight(double error) const = 0;
virtual void print(const std::string &s) const = 0;
virtual bool equals(const Base& expected, double tol=1e-8) const = 0;
double sqrtWeight(double error) const {
return std::sqrt(weight(error));
}
/** produce a weight vector according to an error vector and the implemented
* robust function */
Vector weight(const Vector &error) const;
/** square root version of the weight function */
Vector sqrtWeight(const Vector &error) const {
return weight(error).cwiseSqrt();
}
/** reweight block matrices and a vector according to their weight implementation */
void reweight(Vector &error) const;
void reweight(std::vector<Matrix> &A, Vector &error) const;
void reweight(Matrix &A, Vector &error) const;
void reweight(Matrix &A1, Matrix &A2, Vector &error) const;
void reweight(Matrix &A1, Matrix &A2, Matrix &A3, Vector &error) const;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_NVP(reweight_);
}
};
/// Null class is not robust so is a Gaussian ?
class GTSAM_EXPORT Null : public Base {
public:
typedef boost::shared_ptr<Null> shared_ptr;
Null(const ReweightScheme reweight = Block) : Base(reweight) {}
~Null() {}
double weight(double /*error*/) const { return 1.0; }
double residual(double error) const { return error; }
void print(const std::string &s) const;
bool equals(const Base& /*expected*/, double /*tol*/) const { return true; }
static shared_ptr Create() ;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
}
};
/// Fair implements the "Fair" robust error model (Zhang97ivc)
class GTSAM_EXPORT Fair : public Base {
protected:
double c_;
public:
typedef boost::shared_ptr<Fair> shared_ptr;
Fair(double c = 1.3998, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double c, const ReweightScheme reweight = Block) ;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(c_);
}
};
/// Huber implements the "Huber" robust error model (Zhang97ivc)
class GTSAM_EXPORT Huber : public Base {
protected:
double k_;
public:
typedef boost::shared_ptr<Huber> shared_ptr;
Huber(double k = 1.345, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(k_);
}
};
/// Cauchy implements the "Cauchy" robust error model (Lee2013IROS). Contributed by:
/// Dipl.-Inform. Jan Oberlaender (M.Sc.), FZI Research Center for
/// Information Technology, Karlsruhe, Germany.
/// oberlaender@fzi.de
/// Thanks Jan!
class GTSAM_EXPORT Cauchy : public Base {
protected:
double k_, ksquared_;
public:
typedef boost::shared_ptr<Cauchy> shared_ptr;
Cauchy(double k = 0.1, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(k_);
}
};
/// Tukey implements the "Tukey" robust error model (Zhang97ivc)
class GTSAM_EXPORT Tukey : public Base {
protected:
double c_, csquared_;
public:
typedef boost::shared_ptr<Tukey> shared_ptr;
Tukey(double c = 4.6851, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(c_);
}
};
/// Welsch implements the "Welsch" robust error model (Zhang97ivc)
class GTSAM_EXPORT Welsch : public Base {
protected:
double c_, csquared_;
public:
typedef boost::shared_ptr<Welsch> shared_ptr;
Welsch(double c = 2.9846, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(c_);
}
};
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// @name Deprecated
/// @{
// Welsh implements the "Welsch" robust error model (Zhang97ivc)
// This was misspelled in previous versions of gtsam and should be
// removed in the future.
using Welsh = Welsch;
#endif
/// GemanMcClure implements the "Geman-McClure" robust error model
/// (Zhang97ivc).
///
/// Note that Geman-McClure weight function uses the parameter c == 1.0,
/// but here it's allowed to use different values, so we actually have
/// the generalized Geman-McClure from (Agarwal15phd).
class GTSAM_EXPORT GemanMcClure : public Base {
public:
typedef boost::shared_ptr<GemanMcClure> shared_ptr;
GemanMcClure(double c = 1.0, const ReweightScheme reweight = Block);
~GemanMcClure() {}
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol=1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
protected:
double c_;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(c_);
}
};
/// DCS implements the Dynamic Covariance Scaling robust error model
/// from the paper Robust Map Optimization (Agarwal13icra).
///
/// Under the special condition of the parameter c == 1.0 and not
/// forcing the output weight s <= 1.0, DCS is similar to Geman-McClure.
class GTSAM_EXPORT DCS : public Base {
public:
typedef boost::shared_ptr<DCS> shared_ptr;
DCS(double c = 1.0, const ReweightScheme reweight = Block);
~DCS() {}
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
protected:
double c_;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(c_);
}
};
/// L2WithDeadZone implements a standard L2 penalty, but with a dead zone of width 2*k,
/// centered at the origin. The resulting penalty within the dead zone is always zero, and
/// grows quadratically outside the dead zone. In this sense, the L2WithDeadZone penalty is
/// "robust to inliers", rather than being robust to outliers. This penalty can be used to
/// create barrier functions in a general way.
class GTSAM_EXPORT L2WithDeadZone : public Base {
protected:
double k_;
public:
typedef boost::shared_ptr<L2WithDeadZone> shared_ptr;
L2WithDeadZone(double k = 1.0, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(k_);
}
};
} ///\namespace mEstimator
/**
* Base class for robust error models
* The robust M-estimators above simply tell us how to re-weight the residual, and are
@ -1026,9 +692,9 @@ namespace gtsam {
{ Vector b; Matrix B=A; this->WhitenSystem(B,b); return B; }
inline virtual Vector unwhiten(const Vector& /*v*/) const
{ throw std::invalid_argument("unwhiten is not currently supported for robust noise models."); }
// Fold the use of the m-estimator residual(...) function into distance(...)
inline virtual double distance(const Vector& v) const
{ return this->whiten(v).squaredNorm(); }
// TODO(mike): fold the use of the m-estimator residual(...) function into distance(...)
{ return robust_->residual(this->unweightedWhiten(v).norm()); }
inline virtual double distance_non_whitened(const Vector& v) const
{ return robust_->residual(v.norm()); }
// TODO: these are really robust iterated re-weighting support functions
@ -1038,6 +704,14 @@ namespace gtsam {
virtual void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const;
virtual void WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const;
virtual Vector unweightedWhiten(const Vector& v) const {
return noise_->unweightedWhiten(v);
}
virtual double weight(const Vector& v) const {
// Todo(mikebosse): make the robust weight function input a vector.
return robust_->weight(v.norm());
}
static shared_ptr Create(
const RobustModel::shared_ptr &robust, const NoiseModel::shared_ptr noise);

View File

@ -93,6 +93,28 @@ Vector NoiseModelFactor::whitenedError(const Values& c) const {
return noiseModel_ ? noiseModel_->whiten(b) : b;
}
/* ************************************************************************* */
Vector NoiseModelFactor::unweightedWhitenedError(const Values& c) const {
const Vector b = unwhitenedError(c);
check(noiseModel_, b.size());
return noiseModel_ ? noiseModel_->unweightedWhiten(b) : b;
}
/* ************************************************************************* */
double NoiseModelFactor::weight(const Values& c) const {
if (active(c)) {
if (noiseModel_) {
const Vector b = unwhitenedError(c);
check(noiseModel_, b.size());
return 0.5 * noiseModel_->weight(b);
}
else
return 1.0;
} else {
return 0.0;
}
}
/* ************************************************************************* */
double NoiseModelFactor::error(const Values& c) const {
if (active(c)) {

View File

@ -226,6 +226,16 @@ public:
*/
Vector whitenedError(const Values& c) const;
/**
* Vector of errors, whitened, but unweighted by any loss function
*/
Vector unweightedWhitenedError(const Values& c) const;
/**
* Compute the effective weight of the factor from the noise model.
*/
double weight(const Values& c) const;
/**
* Calculate the error of the factor.
* This is the log-likelihood, e.g. \f$ 0.5(h(x)-z)^2/\sigma^2 \f$ in case of Gaussian.

View File

@ -31,7 +31,7 @@ namespace gtsam {
/** The common parameters for Nonlinear optimizers. Most optimizers
* deriving from NonlinearOptimizer also subclass the parameters.
*/
class NonlinearOptimizerParams {
class GTSAM_EXPORT NonlinearOptimizerParams {
public:
/** See NonlinearOptimizerParams::verbosity */
enum Verbosity {
@ -52,7 +52,7 @@ public:
virtual ~NonlinearOptimizerParams() {
}
GTSAM_EXPORT virtual void print(const std::string& str = "") const;
virtual void print(const std::string& str = "") const;
size_t getMaxIterations() const { return maxIterations; }
double getRelativeErrorTol() const { return relativeErrorTol; }
@ -68,8 +68,8 @@ public:
verbosity = verbosityTranslator(src);
}
GTSAM_EXPORT static Verbosity verbosityTranslator(const std::string &s) ;
GTSAM_EXPORT static std::string verbosityTranslator(Verbosity value) ;
static Verbosity verbosityTranslator(const std::string &s) ;
static std::string verbosityTranslator(Verbosity value) ;
/** See NonlinearOptimizerParams::linearSolverType */
enum LinearSolverType {
@ -144,10 +144,10 @@ public:
}
private:
GTSAM_EXPORT std::string linearSolverTranslator(LinearSolverType linearSolverType) const;
GTSAM_EXPORT LinearSolverType linearSolverTranslator(const std::string& linearSolverType) const;
GTSAM_EXPORT std::string orderingTypeTranslator(Ordering::OrderingType type) const;
GTSAM_EXPORT Ordering::OrderingType orderingTypeTranslator(const std::string& type) const;
std::string linearSolverTranslator(LinearSolverType linearSolverType) const;
LinearSolverType linearSolverTranslator(const std::string& linearSolverType) const;
std::string orderingTypeTranslator(Ordering::OrderingType type) const;
Ordering::OrderingType orderingTypeTranslator(const std::string& type) const;
};
// For backward compatibility:

View File

@ -149,6 +149,8 @@ public:
ExecutionTraceStorage* traceStorage) const {
return constant_;
}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
//-----------------------------------------------------------------------------

View File

@ -112,6 +112,8 @@ public:
}
return error;
}
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
} // namespace gtsam

View File

@ -30,7 +30,7 @@ namespace gtsam {
/* ************************************************************************* */
/// A clique in a SymbolicBayesTree
class SymbolicBayesTreeClique :
class GTSAM_EXPORT SymbolicBayesTreeClique :
public BayesTreeCliqueBase<SymbolicBayesTreeClique, SymbolicFactorGraph>
{
public:
@ -45,7 +45,7 @@ namespace gtsam {
/* ************************************************************************* */
/// A Bayes tree that represents the connectivity between variables but is not associated with any
/// probability functions.
class SymbolicBayesTree :
class GTSAM_EXPORT SymbolicBayesTree :
public BayesTree<SymbolicBayesTreeClique>
{
private:
@ -59,7 +59,7 @@ namespace gtsam {
SymbolicBayesTree() {}
/** check equality */
GTSAM_EXPORT bool equals(const This& other, double tol = 1e-9) const;
bool equals(const This& other, double tol = 1e-9) const;
private:
/** Serialization function */

View File

@ -12,6 +12,7 @@
#include <list>
#include <vector>
#include <stdexcept>
#include <string>
#include <boost/shared_ptr.hpp>
#include "PartitionWorkSpace.h"

View File

@ -1,13 +1,18 @@
<?xml version="1.0"?>
<package format="2">
<name>gtsam</name>
<version>4.0.0</version>
<version>4.0.2</version>
<description>gtsam</description>
<maintainer email="gtsam@lists.gatech.edu">Frank Dellaert</maintainer>
<license>BSD</license>
<buildtool_depend>cmake</buildtool_depend>
<build_depend>boost</build_depend>
<build_depend>eigen</build_depend>
<build_depend>tbb</build_depend>
<export>
<!-- Specify that this is not really a Catkin package-->
<build_type>cmake</build_type>