/* ---------------------------------------------------------------------------- * GTSAM Copyright 2010, Georgia Tech Research Corporation, * Atlanta, Georgia 30332-0415 * All Rights Reserved * Authors: Frank Dellaert, et al. (see THANKS for the full author list) * See LICENSE for the license information * -------------------------------------------------------------------------- */ /** * @file NoiseModel.cpp * @date Jan 13, 2010 * @author Richard Roberts * @author Frank Dellaert */ #include #include #include #include #include #include #include #include #include #include static double inf = std::numeric_limits::infinity(); using namespace std; namespace gtsam { namespace noiseModel { /* ************************************************************************* */ // update A, b // A' \define A_{S}-ar and b'\define b-ad // Linear algebra: takes away projection on latest orthogonal // Graph: make a new factor on the separator S // __attribute__ ((noinline)) // uncomment to prevent inlining when profiling template void updateAb(MATRIX& Ab, int j, const Vector& a, const Vector& rd) { size_t n = Ab.cols()-1; Ab.middleCols(j+1,n-j) -= a * rd.segment(j+1, n-j).transpose(); } /* ************************************************************************* */ Gaussian::shared_ptr Gaussian::Covariance(const Matrix& covariance, bool smart) { size_t m = covariance.rows(), n = covariance.cols(); if (m != n) throw invalid_argument("Gaussian::Covariance: covariance not square"); if (smart) { // check all non-diagonal entries size_t i,j; for (i = 0; i < m; i++) for (j = 0; j < n; j++) if (i != j && fabs(covariance(i, j)) > 1e-9) goto full; Vector variances(n); for (j = 0; j < n; j++) variances(j) = covariance(j,j); return Diagonal::Variances(variances,true); } full: return shared_ptr(new Gaussian(n, inverse_square_root(covariance))); } /* ************************************************************************* */ void Gaussian::print(const string& name) const { gtsam::print(thisR(), "Gaussian"); } /* ************************************************************************* */ bool Gaussian::equals(const Base& expected, double tol) const { const Gaussian* p = dynamic_cast (&expected); if (p == NULL) return false; if (typeid(*this) != typeid(*p)) return false; //if (!sqrt_information_) return true; // ALEX todo; return equal_with_abs_tol(R(), p->R(), sqrt(tol)); } /* ************************************************************************* */ Vector Gaussian::whiten(const Vector& v) const { return thisR() * v; } /* ************************************************************************* */ Vector Gaussian::unwhiten(const Vector& v) const { return backSubstituteUpper(thisR(), v); } /* ************************************************************************* */ double Gaussian::Mahalanobis(const Vector& v) const { // Note: for Diagonal, which does ediv_, will be correct for constraints Vector w = whiten(v); return w.dot(w); } /* ************************************************************************* */ Matrix Gaussian::Whiten(const Matrix& H) const { return thisR() * H; } /* ************************************************************************* */ void Gaussian::WhitenInPlace(Matrix& H) const { H = thisR() * H; } /* ************************************************************************* */ void Gaussian::WhitenInPlace(Eigen::Block H) const { H = thisR() * H; } /* ************************************************************************* */ // General QR, see also special version in Constrained SharedDiagonal Gaussian::QR(Matrix& Ab) const { gttic(Gaussian_noise_model_QR); static const bool debug = false; // get size(A) and maxRank // TODO: really no rank problems ? // size_t m = Ab.rows(), n = Ab.cols()-1; // size_t maxRank = min(m,n); // pre-whiten everything (cheaply if possible) WhitenInPlace(Ab); if(debug) gtsam::print(Ab, "Whitened Ab: "); // Eigen QR - much faster than older householder approach inplace_QR(Ab); // hand-coded householder implementation // TODO: necessary to isolate last column? // householder(Ab, maxRank); return SharedDiagonal(); } void Gaussian::WhitenSystem(vector& A, Vector& b) const { BOOST_FOREACH(Matrix& Aj, A) { WhitenInPlace(Aj); } whitenInPlace(b); } void Gaussian::WhitenSystem(Matrix& A, Vector& b) const { WhitenInPlace(A); whitenInPlace(b); } void Gaussian::WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const { WhitenInPlace(A1); WhitenInPlace(A2); whitenInPlace(b); } void Gaussian::WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const{ WhitenInPlace(A1); WhitenInPlace(A2); WhitenInPlace(A3); whitenInPlace(b); } /* ************************************************************************* */ // Diagonal /* ************************************************************************* */ Diagonal::Diagonal() : Gaussian(1)//, sigmas_(ones(1)), invsigmas_(ones(1)), precisions_(ones(1)) { } /* ************************************************************************* */ Diagonal::Diagonal(const Vector& sigmas) : Gaussian(sigmas.size()), sigmas_(sigmas), invsigmas_(reciprocal(sigmas)), precisions_( emul(invsigmas_, invsigmas_)) { } /* ************************************************************************* */ Diagonal::shared_ptr Diagonal::Variances(const Vector& variances, bool smart) { if (smart) { // check whether all the same entry DenseIndex j, n = variances.size(); for (j = 1; j < n; j++) if (variances(j) != variances(0)) goto full; return Isotropic::Variance(n, variances(0), true); } full: return shared_ptr(new Diagonal(esqrt(variances))); } /* ************************************************************************* */ Diagonal::shared_ptr Diagonal::Sigmas(const Vector& sigmas, bool smart) { if (smart) { // look for zeros to make a constraint for (size_t i=0; i< (size_t) sigmas.size(); ++i) if (sigmas(i)<1e-8) return Constrained::MixedSigmas(sigmas); } return Diagonal::shared_ptr(new Diagonal(sigmas)); } /* ************************************************************************* */ void Diagonal::print(const string& name) const { gtsam::print(sigmas_, name + "diagonal sigmas"); } /* ************************************************************************* */ Vector Diagonal::whiten(const Vector& v) const { return emul(v, invsigmas()); } /* ************************************************************************* */ Vector Diagonal::unwhiten(const Vector& v) const { return emul(v, sigmas_); } /* ************************************************************************* */ Matrix Diagonal::Whiten(const Matrix& H) const { return vector_scale(invsigmas(), H); } /* ************************************************************************* */ void Diagonal::WhitenInPlace(Matrix& H) const { vector_scale_inplace(invsigmas(), H); } /* ************************************************************************* */ void Diagonal::WhitenInPlace(Eigen::Block H) const { H = invsigmas().asDiagonal() * H; } /* ************************************************************************* */ // Constrained /* ************************************************************************* */ /* ************************************************************************* */ Constrained::Constrained(const Vector& sigmas) : Diagonal(sigmas), mu_(repeat(sigmas.size(), 1000.0)) { for (int i=0; i= b.size() Vector c = a; for( DenseIndex i = 0; i < b.size(); i++ ) { const double& ai = a(i), &bi = b(i); if (bi!=0) c(i) = ai/bi; } return c; } /* ************************************************************************* */ double Constrained::distance(const Vector& v) const { Vector w = Diagonal::whiten(v); // get noisemodel for constrained elements // TODO Find a better way of doing these checks for (size_t i=0; i H) const { // selective scaling // Scale row i of H by sigmas[i], basically multiplying H with diag(sigmas) // Set inf_mask flag is true so that if invsigmas[i] is inf, i.e. sigmas[i] = 0, // indicating a hard constraint, we leave H's row i in place. const Vector& _invsigmas = invsigmas(); for(DenseIndex row = 0; row < _invsigmas.size(); ++row) if(isfinite(_invsigmas(row))) H.row(row) *= _invsigmas(row); } /* ************************************************************************* */ Constrained::shared_ptr Constrained::unit(size_t augmentedDim) const { Vector sigmas = ones(dim()+augmentedDim); for (size_t i=0; isigmas_(i) == 0.0) sigmas(i) = 0.0; Vector augmentedMu = zero(dim()+augmentedDim); subInsert(augmentedMu, mu_, 0); return MixedSigmas(augmentedMu, sigmas); } /* ************************************************************************* */ // Special version of QR for Constrained calls slower but smarter code // that deals with possibly zero sigmas // It is Gram-Schmidt orthogonalization rather than Householder // Previously Diagonal::QR SharedDiagonal Constrained::QR(Matrix& Ab) const { bool verbose = false; if (verbose) cout << "\nStarting Constrained::QR" << endl; // get size(A) and maxRank size_t m = Ab.rows(), n = Ab.cols()-1; size_t maxRank = min(m,n); // create storage for [R d] typedef boost::tuple Triple; list Rd; Vector pseudo(m); // allocate storage for pseudo-inverse Vector invsigmas = reciprocal(sigmas_); // Obtain the signs of each elements. // We use negative signs to denote inequality constraints // TODO: might be slow! Vector signs(sigmas_.size()); for (size_t s = 0; s H) const { H *= invsigma_; } /* ************************************************************************* */ // Unit /* ************************************************************************* */ void Unit::print(const std::string& name) const { cout << name << "unit (" << dim_ << ") " << endl; } /* ************************************************************************* */ // M-Estimator /* ************************************************************************* */ namespace mEstimator { /** produce a weight vector according to an error vector and the implemented * robust function */ Vector Base::weight(const Vector &error) const { const size_t n = error.rows(); Vector w(n); for ( size_t i = 0 ; i < n ; ++i ) w(i) = weight(error(i)); return w; } /** square root version of the weight function */ Vector Base::sqrtWeight(const Vector &error) const { const size_t n = error.rows(); Vector w(n); for ( size_t i = 0 ; i < n ; ++i ) w(i) = sqrtWeight(error(i)); return w; } /** The following three functions reweight block matrices and a vector * according to their weight implementation */ void Base::reweight(Vector& error) const { if(reweight_ == Block) { const double w = sqrtWeight(error.norm()); error *= w; } else { const Vector w = sqrtWeight(error); error.array() *= w.array(); } } /** Reweight n block matrices with one error vector */ void Base::reweight(vector &A, Vector &error) const { if ( reweight_ == Block ) { const double w = sqrtWeight(error.norm()); BOOST_FOREACH(Matrix& Aj, A) { Aj *= w; } error *= w; } else { const Vector W = sqrtWeight(error); BOOST_FOREACH(Matrix& Aj, A) { vector_scale_inplace(W,Aj); } error = emul(W, error); } } /** Reweight one block matrix with one error vector */ void Base::reweight(Matrix &A, Vector &error) const { if ( reweight_ == Block ) { const double w = sqrtWeight(error.norm()); A *= w; error *= w; } else { const Vector W = sqrtWeight(error); vector_scale_inplace(W,A); error = emul(W, error); } } /** Reweight two block matrix with one error vector */ void Base::reweight(Matrix &A1, Matrix &A2, Vector &error) const { if ( reweight_ == Block ) { const double w = sqrtWeight(error.norm()); A1 *= w; A2 *= w; error *= w; } else { const Vector W = sqrtWeight(error); vector_scale_inplace(W,A1); vector_scale_inplace(W,A2); error = emul(W, error); } } /** Reweight three block matrix with one error vector */ void Base::reweight(Matrix &A1, Matrix &A2, Matrix &A3, Vector &error) const { if ( reweight_ == Block ) { const double w = sqrtWeight(error.norm()); A1 *= w; A2 *= w; A3 *= w; error *= w; } else { const Vector W = sqrtWeight(error); vector_scale_inplace(W,A1); vector_scale_inplace(W,A2); vector_scale_inplace(W,A3); error = emul(W, error); } } /* ************************************************************************* */ // Null model /* ************************************************************************* */ void Null::print(const std::string &s="") const { cout << s << "null ()" << endl; } Null::shared_ptr Null::Create() { return shared_ptr(new Null()); } Fair::Fair(const double c, const ReweightScheme reweight) : Base(reweight), c_(c) { if ( c_ <= 0 ) { cout << "mEstimator Fair takes only positive double in constructor. forced to 1.0" << endl; c_ = 1.0; } } /* ************************************************************************* */ // Fair /* ************************************************************************* */ double Fair::weight(const double &error) const { return 1.0 / (1.0 + fabs(error)/c_); } void Fair::print(const std::string &s="") const { cout << s << "fair (" << c_ << ")" << endl; } bool Fair::equals(const Base &expected, const double tol) const { const Fair* p = dynamic_cast (&expected); if (p == NULL) return false; return fabs(c_ - p->c_ ) < tol; } Fair::shared_ptr Fair::Create(const double c, const ReweightScheme reweight) { return shared_ptr(new Fair(c, reweight)); } /* ************************************************************************* */ // Huber /* ************************************************************************* */ Huber::Huber(const double k, const ReweightScheme reweight) : Base(reweight), k_(k) { if ( k_ <= 0 ) { cout << "mEstimator Huber takes only positive double in constructor. forced to 1.0" << endl; k_ = 1.0; } } double Huber::weight(const double &error) const { return (error < k_) ? (1.0) : (k_ / fabs(error)); } void Huber::print(const std::string &s="") const { cout << s << "huber (" << k_ << ")" << endl; } bool Huber::equals(const Base &expected, const double tol) const { const Huber* p = dynamic_cast(&expected); if (p == NULL) return false; return fabs(k_ - p->k_) < tol; } Huber::shared_ptr Huber::Create(const double c, const ReweightScheme reweight) { return shared_ptr(new Huber(c, reweight)); } /* ************************************************************************* */ // Cauchy /* ************************************************************************* */ Cauchy::Cauchy(const double k, const ReweightScheme reweight) : Base(reweight), k_(k) { if ( k_ <= 0 ) { cout << "mEstimator Cauchy takes only positive double in constructor. forced to 1.0" << endl; k_ = 1.0; } } double Cauchy::weight(const double &error) const { return k_*k_ / (k_*k_ + error*error); } void Cauchy::print(const std::string &s="") const { cout << s << "cauchy (" << k_ << ")" << endl; } bool Cauchy::equals(const Base &expected, const double tol) const { const Cauchy* p = dynamic_cast(&expected); if (p == NULL) return false; return fabs(k_ - p->k_) < tol; } Cauchy::shared_ptr Cauchy::Create(const double c, const ReweightScheme reweight) { return shared_ptr(new Cauchy(c, reweight)); } /* ************************************************************************* */ // Tukey /* ************************************************************************* */ Tukey::Tukey(const double c, const ReweightScheme reweight) : Base(reweight), c_(c) { } double Tukey::weight(const double &error) const { if (fabs(error) <= c_) { double xc2 = (error/c_)*(error/c_); double one_xc22 = (1.0-xc2)*(1.0-xc2); return one_xc22; } return 0.0; } void Tukey::print(const std::string &s="") const { std::cout << s << ": Tukey (" << c_ << ")" << std::endl; } bool Tukey::equals(const Base &expected, const double tol) const { const Tukey* p = dynamic_cast(&expected); if (p == NULL) return false; return fabs(c_ - p->c_) < tol; } Tukey::shared_ptr Tukey::Create(const double c, const ReweightScheme reweight) { return shared_ptr(new Tukey(c, reweight)); } /* ************************************************************************* */ // Welsh /* ************************************************************************* */ Welsh::Welsh(const double c, const ReweightScheme reweight) : Base(reweight), c_(c) { } double Welsh::weight(const double &error) const { double xc2 = (error/c_)*(error/c_); return std::exp(-xc2); } void Welsh::print(const std::string &s="") const { std::cout << s << ": Welsh (" << c_ << ")" << std::endl; } bool Welsh::equals(const Base &expected, const double tol) const { const Welsh* p = dynamic_cast(&expected); if (p == NULL) return false; return fabs(c_ - p->c_) < tol; } Welsh::shared_ptr Welsh::Create(const double c, const ReweightScheme reweight) { return shared_ptr(new Welsh(c, reweight)); } } // namespace mEstimator /* ************************************************************************* */ // Robust /* ************************************************************************* */ void Robust::print(const std::string& name) const { robust_->print(name); noise_->print(name); } bool Robust::equals(const Base& expected, double tol) const { const Robust* p = dynamic_cast (&expected); if (p == NULL) return false; return noise_->equals(*p->noise_,tol) && robust_->equals(*p->robust_,tol); } void Robust::WhitenSystem(Vector& b) const { noise_->whitenInPlace(b); robust_->reweight(b); } void Robust::WhitenSystem(vector& A, Vector& b) const { noise_->WhitenSystem(A,b); robust_->reweight(A,b); } void Robust::WhitenSystem(Matrix& A, Vector& b) const { noise_->WhitenSystem(A,b); robust_->reweight(A,b); } void Robust::WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const { noise_->WhitenSystem(A1,A2,b); robust_->reweight(A1,A2,b); } void Robust::WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const{ noise_->WhitenSystem(A1,A2,A3,b); robust_->reweight(A1,A2,A3,b); } Robust::shared_ptr Robust::Create( const RobustModel::shared_ptr &robust, const NoiseModel::shared_ptr noise){ return shared_ptr(new Robust(robust,noise)); } /* ************************************************************************* */ } } // gtsam