Factorgraph-specific QR factorization now done by the NoiseModel: slow Gram-Schmidt for Constrained, fast Housholder for any other (Gaussian) model.

release/4.3a0
Frank Dellaert 2010-01-19 19:06:02 +00:00
parent 6f2b5a904a
commit 3cdbaf81c7
3 changed files with 167 additions and 4 deletions

View File

@ -8,13 +8,15 @@
#include <limits> #include <limits>
#include <iostream> #include <iostream>
#include <boost/numeric/ublas/lu.hpp>
#include <boost/numeric/ublas/io.hpp>
#include <boost/foreach.hpp>
#include "NoiseModel.h" #include "NoiseModel.h"
namespace ublas = boost::numeric::ublas; namespace ublas = boost::numeric::ublas;
typedef ublas::matrix_column<Matrix> column; typedef ublas::matrix_column<Matrix> column;
static double inf = std::numeric_limits<double>::infinity(); static double inf = std::numeric_limits<double>::infinity();
using namespace std; using namespace std;
namespace gtsam { namespace gtsam {
@ -53,6 +55,24 @@ namespace gtsam {
H = sqrt_information_ * H; H = sqrt_information_ * H;
} }
// General QR, see also special version in Constrained
sharedDiagonal Gaussian::QR(Matrix& Ab) const {
// get size(A) and maxRank
// TODO: really no rank problems ?
size_t m = Ab.size1(), n = Ab.size2()-1;
size_t maxRank = min(m,n);
// pre-whiten everything (cheaply if possible)
WhitenInPlace(Ab);
// Perform in-place Householder
// TODO: think about 1 on diagonal.
// Currently, GaussianConditional assumes unit upper
householder_(Ab, maxRank);
return Unit::Create(maxRank);
}
/* ************************************************************************* */ /* ************************************************************************* */
// TODO: can we avoid calling reciprocal twice ? // TODO: can we avoid calling reciprocal twice ?
Diagonal::Diagonal(const Vector& sigmas) : Diagonal::Diagonal(const Vector& sigmas) :
@ -99,6 +119,90 @@ namespace gtsam {
throw logic_error("noiseModel::Constrained cannot Whiten"); throw logic_error("noiseModel::Constrained cannot Whiten");
} }
/* ************************************************************************* */
// update A, b
// A' \define A_{S}-ar and b'\define b-ad
// Linear algebra: takes away projection on latest orthogonal
// Graph: make a new factor on the separator S
// __attribute__ ((noinline)) // uncomment to prevent inlining when profiling
static void updateAb(Matrix& Ab, int j, const Vector& a, const Vector& rd) {
size_t m = Ab.size1(), n = Ab.size2()-1;
for (int i = 0; i < m; i++) { // update all rows
double ai = a(i);
double *Aij = Ab.data().begin() + i * (n+1) + j + 1;
const double *rptr = rd.data().begin() + j + 1;
// Ab(i,j+1:end) -= ai*rd(j+1:end)
for (int j2 = j + 1; j2 < n+1; j2++, Aij++, rptr++)
*Aij -= ai * (*rptr);
}
}
// Special version of QR for Constrained calls slower but smarter code
// that deals with possibly zero sigmas
// It is Gram-Schmidt orthogonalization rather than Householder
sharedDiagonal Constrained::QR(Matrix& Ab) const {
// get size(A) and maxRank
size_t m = Ab.size1(), n = Ab.size2()-1;
size_t maxRank = min(m,n);
// create storage for [R d]
typedef boost::tuple<size_t, Vector, double> Triple;
list<Triple> Rd;
Vector pseudo(m); // allocate storage for pseudo-inverse
Vector weights = emul(invsigmas_,invsigmas_); // calculate weights once
// We loop over all columns, because the columns that can be eliminated
// are not necessarily contiguous. For each one, estimate the corresponding
// scalar variable x as d-rS, with S the separator (remaining columns).
// Then update A and b by substituting x with d-rS, zero-ing out x's column.
for (size_t j=0; j<n; ++j) {
// extract the first column of A
Vector a(column(Ab, j)); // ublas::matrix_column is slower ! TODO Really, why ????
// Calculate weighted pseudo-inverse and corresponding precision
double precision = weightedPseudoinverse(a, weights, pseudo);
// If precision is zero, no information on this column
// This is actually not limited to constraints, could happen in Gaussian::QR
// In that case, we're probably hosed. TODO: make sure Householder is rank-revealing
if (precision < 1e-8) continue;
// create solution [r d], rhs is automatically r(n)
Vector rd(n+1); // uninitialized !
rd(j)=1.0; // put 1 on diagonal
for (size_t j2=j+1; j2<n+1; ++j2) // and fill in remainder with dot-products
rd(j2) = inner_prod(pseudo, ublas::matrix_column<Matrix>(Ab, j2));
// construct solution (r, d, sigma)
Rd.push_back(boost::make_tuple(j, rd, precision));
// exit after rank exhausted
if (Rd.size()>=maxRank) break;
// update Ab, expensive, using outer product
updateAb(Ab, j, a, rd);
}
// Create storage for precisions
Vector precisions(Rd.size());
// Write back result in Ab, imperative as we are
// TODO: test that is correct if a column was skipped !!!!
size_t i = 0; // start with first row
BOOST_FOREACH(const Triple& t, Rd) {
const size_t& j = t.get<0>();
const Vector& rd = t.get<1>();
precisions(i) = t.get<2>();
for (size_t j2=0; j2<j; ++j2) Ab(i,j2) = 0.0; // fill in zeros below diagonal anway
for (size_t j2=j; j2<n+1; ++j2) // copy the j-the row TODO memcpy
Ab(i,j2) = rd(j2);
i+=1;
}
return Diagonal::Precisions(precisions);
}
/* ************************************************************************* */ /* ************************************************************************* */
void Isotropic::print(const string& name) const { void Isotropic::print(const string& name) const {
@ -128,7 +232,7 @@ namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
void Unit::print(const std::string& name) const { void Unit::print(const std::string& name) const {
cout << "Unit " << name << endl; cout << "Unit (" << dim_ << ") " << name << endl;
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -15,6 +15,9 @@
namespace gtsam { namespace noiseModel { namespace gtsam { namespace noiseModel {
class Diagonal;
typedef boost::shared_ptr<Diagonal> sharedDiagonal;
/** /**
* noiseModel::Base is the abstract base class for all noise models. * noiseModel::Base is the abstract base class for all noise models.
* *
@ -130,6 +133,15 @@ namespace gtsam { namespace noiseModel {
whitenInPlace(b); whitenInPlace(b);
} }
/**
* Apply appropriately weighted QR factorization to the system [A b]
* Q' * [A b] = [R d]
* Dimensions: (r*m) * m*(n+1) = r*(n+1)
* @param Ab is the m*(n+1) augmented system matrix [A b]
* @return in-place QR factorization [R d]. Below-diagonal is undefined !!!!!
*/
virtual sharedDiagonal QR(Matrix& Ab) const;
/** /**
* Return R itself, but note that Whiten(H) is cheaper than R*H * Return R itself, but note that Whiten(H) is cheaper than R*H
*/ */
@ -198,6 +210,7 @@ namespace gtsam { namespace noiseModel {
}; // Diagonal }; // Diagonal
/** /**
* A Constrained constrained model is a specialization of Diagonal which allows * A Constrained constrained model is a specialization of Diagonal which allows
* some or all of the sigmas to be zero, forcing the error to be zero there. * some or all of the sigmas to be zero, forcing the error to be zero there.
@ -209,6 +222,9 @@ namespace gtsam { namespace noiseModel {
class Constrained : public Diagonal { class Constrained : public Diagonal {
protected: protected:
// Constrained does not have member variables
// Instead (possibly zero) sigmas are stored in Diagonal Base class
/** protected constructor takes sigmas */ /** protected constructor takes sigmas */
Constrained(const Vector& sigmas) :Diagonal(sigmas) {} Constrained(const Vector& sigmas) :Diagonal(sigmas) {}
@ -240,6 +256,11 @@ namespace gtsam { namespace noiseModel {
virtual Matrix Whiten(const Matrix& H) const; virtual Matrix Whiten(const Matrix& H) const;
virtual void WhitenInPlace(Matrix& H) const; virtual void WhitenInPlace(Matrix& H) const;
/**
* Apply QR factorization to the system [A b], taking into account constraints
*/
virtual sharedDiagonal QR(Matrix& Ab) const;
}; // Constrained }; // Constrained
/** /**
@ -340,7 +361,5 @@ namespace gtsam { namespace noiseModel {
#endif #endif
}; };
typedef Diagonal::shared_ptr sharedDiagonal;
} // namespace gtsam } // namespace gtsam

View File

@ -133,6 +133,46 @@ TEST(NoiseModel, ConstrainedAll )
DOUBLES_EQUAL(0.0,i->Mahalanobis(feasible),1e-9); DOUBLES_EQUAL(0.0,i->Mahalanobis(feasible),1e-9);
} }
/* ************************************************************************* */
TEST( NoiseModel, QR )
{
// create a matrix to eliminate
Matrix Ab1 = Matrix_(4, 6+1,
-1., 0., 1., 0., 0., 0., -0.2,
0., -1., 0., 1., 0., 0., 0.3,
1., 0., 0., 0., -1., 0., 0.2,
0., 1., 0., 0., 0., -1., -0.1);
Matrix Ab2 = Ab1; // otherwise overwritten !
Vector sigmas = Vector_(4, 0.2, 0.2, 0.1, 0.1);
// Expected result
Vector expectedSigmas = Vector_(4, 0.0894427, 0.0894427, 0.223607, 0.223607);
sharedDiagonal expectedModel = noiseModel::Diagonal::Sigmas(expectedSigmas);
// Call Gaussian version
sharedDiagonal diagonal = noiseModel::Diagonal::Sigmas(sigmas);
sharedDiagonal actual1 = diagonal->QR(Ab1);
sharedDiagonal expected = noiseModel::Unit::Create(4);
CHECK(assert_equal(*expected,*actual1));
Matrix expectedRd1 = Matrix_(4, 6+1,
11.1803, 0.0, -2.23607, 0.0, -8.94427, 0.0, 2.23607,
0.0, 11.1803, 0.0, -2.23607, 0.0, -8.94427,-1.56525,
-0.618034, 0.0, 4.47214, 0.0, -4.47214, 0.0, 0.0,
0.0, -0.618034, 0.0, 4.47214, 0.0, -4.47214, 0.894427);
CHECK(assert_equal(expectedRd1,Ab1,1e-4)); // Ab was modified in place !!!
// Call Constrained version
sharedDiagonal constrained = noiseModel::Constrained::Mixed(sigmas);
sharedDiagonal actual2 = constrained->QR(Ab2);
CHECK(assert_equal(*expectedModel,*actual2));
Matrix expectedRd2 = Matrix_(4, 6+1,
1., 0., -0.2, 0., -0.8, 0., 0.2,
0., 1., 0.,-0.2, 0., -0.8,-0.14,
0., 0., 1., 0., -1., 0., 0.0,
0., 0., 0., 1., 0., -1., 0.2);
CHECK(assert_equal(expectedRd2,Ab2,1e-6)); // Ab was modified in place !!!
}
/* ************************************************************************* */ /* ************************************************************************* */
int main() { int main() {
TestResult tr; TestResult tr;