From 3cdbaf81c71af9133ac8122b93b511221ad2f04e Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 19 Jan 2010 19:06:02 +0000 Subject: [PATCH] Factorgraph-specific QR factorization now done by the NoiseModel: slow Gram-Schmidt for Constrained, fast Housholder for any other (Gaussian) model. --- cpp/NoiseModel.cpp | 108 ++++++++++++++++++++++++++++++++++++++++- cpp/NoiseModel.h | 23 ++++++++- cpp/testNoiseModel.cpp | 40 +++++++++++++++ 3 files changed, 167 insertions(+), 4 deletions(-) diff --git a/cpp/NoiseModel.cpp b/cpp/NoiseModel.cpp index 39bc4801d..8745fbd9f 100644 --- a/cpp/NoiseModel.cpp +++ b/cpp/NoiseModel.cpp @@ -8,13 +8,15 @@ #include #include +#include +#include +#include #include "NoiseModel.h" namespace ublas = boost::numeric::ublas; typedef ublas::matrix_column column; static double inf = std::numeric_limits::infinity(); - using namespace std; namespace gtsam { @@ -53,6 +55,24 @@ namespace gtsam { H = sqrt_information_ * H; } + // General QR, see also special version in Constrained + sharedDiagonal Gaussian::QR(Matrix& Ab) const { + // get size(A) and maxRank + // TODO: really no rank problems ? + size_t m = Ab.size1(), n = Ab.size2()-1; + size_t maxRank = min(m,n); + + // pre-whiten everything (cheaply if possible) + WhitenInPlace(Ab); + + // Perform in-place Householder + // TODO: think about 1 on diagonal. + // Currently, GaussianConditional assumes unit upper + householder_(Ab, maxRank); + + return Unit::Create(maxRank); + } + /* ************************************************************************* */ // TODO: can we avoid calling reciprocal twice ? Diagonal::Diagonal(const Vector& sigmas) : @@ -99,6 +119,90 @@ namespace gtsam { throw logic_error("noiseModel::Constrained cannot Whiten"); } + /* ************************************************************************* */ + // update A, b + // A' \define A_{S}-ar and b'\define b-ad + // Linear algebra: takes away projection on latest orthogonal + // Graph: make a new factor on the separator S + // __attribute__ ((noinline)) // uncomment to prevent inlining when profiling + static void updateAb(Matrix& Ab, int j, const Vector& a, const Vector& rd) { + size_t m = Ab.size1(), n = Ab.size2()-1; + for (int i = 0; i < m; i++) { // update all rows + double ai = a(i); + double *Aij = Ab.data().begin() + i * (n+1) + j + 1; + const double *rptr = rd.data().begin() + j + 1; + // Ab(i,j+1:end) -= ai*rd(j+1:end) + for (int j2 = j + 1; j2 < n+1; j2++, Aij++, rptr++) + *Aij -= ai * (*rptr); + } + } + + // Special version of QR for Constrained calls slower but smarter code + // that deals with possibly zero sigmas + // It is Gram-Schmidt orthogonalization rather than Householder + sharedDiagonal Constrained::QR(Matrix& Ab) const { + // get size(A) and maxRank + size_t m = Ab.size1(), n = Ab.size2()-1; + size_t maxRank = min(m,n); + + // create storage for [R d] + typedef boost::tuple Triple; + list Rd; + + Vector pseudo(m); // allocate storage for pseudo-inverse + Vector weights = emul(invsigmas_,invsigmas_); // calculate weights once + + // We loop over all columns, because the columns that can be eliminated + // are not necessarily contiguous. For each one, estimate the corresponding + // scalar variable x as d-rS, with S the separator (remaining columns). + // Then update A and b by substituting x with d-rS, zero-ing out x's column. + for (size_t j=0; j(Ab, j2)); + + // construct solution (r, d, sigma) + Rd.push_back(boost::make_tuple(j, rd, precision)); + + // exit after rank exhausted + if (Rd.size()>=maxRank) break; + + // update Ab, expensive, using outer product + updateAb(Ab, j, a, rd); + } + + // Create storage for precisions + Vector precisions(Rd.size()); + + // Write back result in Ab, imperative as we are + // TODO: test that is correct if a column was skipped !!!! + size_t i = 0; // start with first row + BOOST_FOREACH(const Triple& t, Rd) { + const size_t& j = t.get<0>(); + const Vector& rd = t.get<1>(); + precisions(i) = t.get<2>(); + for (size_t j2=0; j2 sharedDiagonal; + /** * noiseModel::Base is the abstract base class for all noise models. * @@ -130,6 +133,15 @@ namespace gtsam { namespace noiseModel { whitenInPlace(b); } + /** + * Apply appropriately weighted QR factorization to the system [A b] + * Q' * [A b] = [R d] + * Dimensions: (r*m) * m*(n+1) = r*(n+1) + * @param Ab is the m*(n+1) augmented system matrix [A b] + * @return in-place QR factorization [R d]. Below-diagonal is undefined !!!!! + */ + virtual sharedDiagonal QR(Matrix& Ab) const; + /** * Return R itself, but note that Whiten(H) is cheaper than R*H */ @@ -198,6 +210,7 @@ namespace gtsam { namespace noiseModel { }; // Diagonal + /** * A Constrained constrained model is a specialization of Diagonal which allows * some or all of the sigmas to be zero, forcing the error to be zero there. @@ -209,6 +222,9 @@ namespace gtsam { namespace noiseModel { class Constrained : public Diagonal { protected: + // Constrained does not have member variables + // Instead (possibly zero) sigmas are stored in Diagonal Base class + /** protected constructor takes sigmas */ Constrained(const Vector& sigmas) :Diagonal(sigmas) {} @@ -240,6 +256,11 @@ namespace gtsam { namespace noiseModel { virtual Matrix Whiten(const Matrix& H) const; virtual void WhitenInPlace(Matrix& H) const; + /** + * Apply QR factorization to the system [A b], taking into account constraints + */ + virtual sharedDiagonal QR(Matrix& Ab) const; + }; // Constrained /** @@ -340,7 +361,5 @@ namespace gtsam { namespace noiseModel { #endif }; - typedef Diagonal::shared_ptr sharedDiagonal; - } // namespace gtsam diff --git a/cpp/testNoiseModel.cpp b/cpp/testNoiseModel.cpp index 8cd363028..f4a8ca76f 100644 --- a/cpp/testNoiseModel.cpp +++ b/cpp/testNoiseModel.cpp @@ -133,6 +133,46 @@ TEST(NoiseModel, ConstrainedAll ) DOUBLES_EQUAL(0.0,i->Mahalanobis(feasible),1e-9); } +/* ************************************************************************* */ +TEST( NoiseModel, QR ) +{ + // create a matrix to eliminate + Matrix Ab1 = Matrix_(4, 6+1, + -1., 0., 1., 0., 0., 0., -0.2, + 0., -1., 0., 1., 0., 0., 0.3, + 1., 0., 0., 0., -1., 0., 0.2, + 0., 1., 0., 0., 0., -1., -0.1); + Matrix Ab2 = Ab1; // otherwise overwritten ! + Vector sigmas = Vector_(4, 0.2, 0.2, 0.1, 0.1); + + // Expected result + Vector expectedSigmas = Vector_(4, 0.0894427, 0.0894427, 0.223607, 0.223607); + sharedDiagonal expectedModel = noiseModel::Diagonal::Sigmas(expectedSigmas); + + // Call Gaussian version + sharedDiagonal diagonal = noiseModel::Diagonal::Sigmas(sigmas); + sharedDiagonal actual1 = diagonal->QR(Ab1); + sharedDiagonal expected = noiseModel::Unit::Create(4); + CHECK(assert_equal(*expected,*actual1)); + Matrix expectedRd1 = Matrix_(4, 6+1, + 11.1803, 0.0, -2.23607, 0.0, -8.94427, 0.0, 2.23607, + 0.0, 11.1803, 0.0, -2.23607, 0.0, -8.94427,-1.56525, + -0.618034, 0.0, 4.47214, 0.0, -4.47214, 0.0, 0.0, + 0.0, -0.618034, 0.0, 4.47214, 0.0, -4.47214, 0.894427); + CHECK(assert_equal(expectedRd1,Ab1,1e-4)); // Ab was modified in place !!! + + // Call Constrained version + sharedDiagonal constrained = noiseModel::Constrained::Mixed(sigmas); + sharedDiagonal actual2 = constrained->QR(Ab2); + CHECK(assert_equal(*expectedModel,*actual2)); + Matrix expectedRd2 = Matrix_(4, 6+1, + 1., 0., -0.2, 0., -0.8, 0., 0.2, + 0., 1., 0.,-0.2, 0., -0.8,-0.14, + 0., 0., 1., 0., -1., 0., 0.0, + 0., 0., 0., 1., 0., -1., 0.2); + CHECK(assert_equal(expectedRd2,Ab2,1e-6)); // Ab was modified in place !!! +} + /* ************************************************************************* */ int main() { TestResult tr;