Integrated householder-based QR into NoiseModel. Note that the examples for GFG have changed to ensure that they are actually a linearized version of the nonlinear example.

release/4.3a0
Alex Cunningham 2010-01-27 04:39:35 +00:00
parent 98b825ddbd
commit 59c7ce7e29
23 changed files with 999 additions and 975 deletions

View File

@ -29,7 +29,7 @@ namespace gtsam {
GaussianBayesNet scalarGaussian(const Symbol& key, double mu, double sigma) {
GaussianBayesNet bn;
GaussianConditional::shared_ptr
conditional(new GaussianConditional(key, Vector_(1,mu), eye(1), Vector_(1,sigma)));
conditional(new GaussianConditional(key, Vector_(1,mu)/sigma, eye(1)/sigma, ones(1)));
bn.push_back(conditional);
return bn;
}
@ -39,7 +39,7 @@ GaussianBayesNet simpleGaussian(const Symbol& key, const Vector& mu, double sigm
GaussianBayesNet bn;
size_t n = mu.size();
GaussianConditional::shared_ptr
conditional(new GaussianConditional(key, mu, eye(n), repeat(n,sigma)));
conditional(new GaussianConditional(key, mu/sigma, eye(n)/sigma, ones(n)));
bn.push_back(conditional);
return bn;
}

View File

@ -57,6 +57,7 @@ GaussianFactor::GaussianFactor(const vector<shared_ptr> & factors)
size_t pos = 0; // save last position inserted into the new rhs vector
// iterate over all factors
bool constrained = false;
BOOST_FOREACH(shared_ptr factor, factors){
if (verbose) factor->print();
// number of rows for factor f
@ -72,10 +73,25 @@ GaussianFactor::GaussianFactor(const vector<shared_ptr> & factors)
// update the matrices
append_factor(factor,m,pos);
// check if there are constraints
if (verbose) factor->model_->print("Checking for zeros");
if (!constrained && factor->model_->isConstrained()) {
constrained = true;
if (verbose) cout << "Found a constraint!" << endl;
}
pos += mf;
}
if (verbose) cout << "GaussianFactor::GaussianFactor done" << endl;
model_ = noiseModel::Diagonal::Sigmas(sigmas);
if (verbose) cout << "GaussianFactor::GaussianFactor done" << endl;
if (constrained) {
model_ = noiseModel::Constrained::MixedSigmas(sigmas);
if (verbose) model_->print("Just created Constraint ^");
} else {
model_ = noiseModel::Diagonal::Sigmas(sigmas);
if (verbose) model_->print("Just created Diagonal");
}
}
/* ************************************************************************* */
@ -304,6 +320,7 @@ void GaussianFactor::append_factor(GaussianFactor::shared_ptr f, size_t m, size_
pair<GaussianConditional::shared_ptr, GaussianFactor::shared_ptr>
GaussianFactor::eliminate(const Symbol& key) const
{
bool verbose = false;
// if this factor does not involve key, we exit with empty CG and LF
const_iterator it = As_.find(key);
if (it==As_.end()) {
@ -323,7 +340,9 @@ GaussianFactor::eliminate(const Symbol& key) const
Matrix Ab = matrix_augmented(ordering,false);
// Use in-place QR on dense Ab appropriate to NoiseModel
if (verbose) model_->print("Before QR");
SharedDiagonal noiseModel = model_->QR(Ab);
if (verbose) model_->print("After QR");
// get dimensions of the eliminated variable
// TODO: this is another map find that should be avoided !
@ -355,96 +374,16 @@ GaussianFactor::eliminate(const Symbol& key) const
if (cur_key!=key) {
size_t dim = getDim(cur_key); // TODO avoid find !
conditional->add(cur_key, sub(Ab, 0, n1, j, j+dim));
factor->insert(cur_key, sub(Ab, n1, maxRank, j, j+dim));
factor->insert(cur_key, sub(Ab, n1, maxRank, j, j+dim)); // TODO: handle zeros properly
j+=dim;
}
// Set sigmas
factor->model_ = noiseModel::Diagonal::Sigmas(sub(noiseModel->sigmas(),n1,maxRank));
// extract ds vector for the new b
factor->set_b(sub(d, n1, maxRank));
return make_pair(conditional, factor);
}
/* ************************************************************************* */
/* Note, in place !!!!
* Do incomplete QR factorization for the first n columns
* We will do QR on all matrices and on RHS
* Then take first n rows and make a GaussianConditional,
* and last rows to make a new joint linear factor on separator
*/
/* ************************************************************************* *
pair<GaussianConditional::shared_ptr, GaussianFactor::shared_ptr>
GaussianFactor::eliminate(const Symbol& key) const
{
// if this factor does not involve key, we exit with empty CG and LF
const_iterator it = As_.find(key);
if (it==As_.end()) {
// Conditional Gaussian is just a parent-less node with P(x)=1
GaussianFactor::shared_ptr lf(new GaussianFactor);
GaussianConditional::shared_ptr cg(new GaussianConditional(key));
return make_pair(cg,lf);
}
// create an internal ordering that eliminates key first
Ordering ordering;
ordering += key;
BOOST_FOREACH(const Symbol& k, keys())
if (k != key) ordering += k;
// extract A, b from the combined linear factor (ensure that x is leading)
// TODO: get Ab as augmented matrix
// Matrix Ab = matrix_augmented(ordering,false);
Matrix A; Vector b;
boost::tie(A, b) = matrix(ordering, false);
size_t n = A.size2();
// Do in-place QR to get R, d of the augmented system
std::list<boost::tuple<Vector, double, double> > solution =
weighted_eliminate(A, b, model_->sigmas());
// get dimensions of the eliminated variable
// TODO: this is another map find that should be avoided !
size_t n1 = getDim(key);
// if m<n1, this factor cannot be eliminated
size_t maxRank = solution.size();
if (maxRank<n1)
throw(domain_error("GaussianFactor::eliminate: fewer constraints than unknowns"));
// unpack the solutions
Matrix R(maxRank, n);
Vector r, d(maxRank), newSigmas(maxRank); double di, sigma;
Matrix::iterator2 Rit = R.begin2();
size_t i = 0;
BOOST_FOREACH(boost::tie(r, di, sigma), solution) {
copy(r.begin(), r.end(), Rit); // copy r vector
d(i) = di; // copy in rhs
newSigmas(i) = sigma; // copy in new sigmas
Rit += n; i += 1;
}
// create base conditional Gaussian
GaussianConditional::shared_ptr conditional(new GaussianConditional(key,
sub(d, 0, n1), // form d vector
sub(R, 0, n1, 0, n1), // form R matrix
sub(newSigmas, 0, n1))); // get standard deviations
// extract the block matrices for parents in both CG and LF
GaussianFactor::shared_ptr factor(new GaussianFactor);
size_t j = n1;
BOOST_FOREACH(Symbol& cur_key, ordering)
if (cur_key!=key) {
size_t dim = getDim(cur_key);
conditional->add(cur_key, sub(R, 0, n1, j, j+dim));
factor->insert(cur_key, sub(R, n1, maxRank, j, j+dim));
j+=dim;
}
// Set sigmas
factor->model_ = noiseModel::Diagonal::Sigmas(sub(newSigmas,n1,maxRank));
// set the right model here
if (noiseModel->isConstrained())
factor->model_ = noiseModel::Constrained::MixedSigmas(sub(noiseModel->sigmas(),n1,maxRank));
else
factor->model_ = noiseModel::Diagonal::Sigmas(sub(noiseModel->sigmas(),n1,maxRank));
// extract ds vector for the new b
factor->set_b(sub(d, n1, maxRank));

View File

@ -108,6 +108,11 @@ GaussianFactorGraph::eliminate(const Ordering& ordering)
/* ************************************************************************* */
VectorConfig GaussianFactorGraph::optimize(const Ordering& ordering)
{
bool verbose = false;
if (verbose)
BOOST_FOREACH(sharedFactor factor,factors_)
factor->get_model()->print("Starting model");
// eliminate all nodes in the given ordering -> chordal Bayes net
GaussianBayesNet chordalBayesNet = eliminate(ordering);

View File

@ -274,7 +274,13 @@ AM_LDFLAGS = -L../CppUnitLite -lCppUnitLite $(BOOST_LDFLAGS) $(boost_serializati
# TO ENABLE GSL AND ATLAS, uncomment this part!
# AM_LDFLAGS += -lgsl -lcblas -latlas
# libgtsam_la_LDFLAGS += -lgsl -lcblas -latlas
# libgtsam_la_LDFLAGS += -lgsl -lcblas -latlas # Note: order of libraries is critical
# AM_CXXFLAGS += -DGSL
# libgtsam_la_CPPFLAGS += -DGSL
# TO ENABLE JUST GSL, uncomment this part!
# AM_LDFLAGS += -lgsl -lgslcblas
# libgtsam_la_LDFLAGS += -lgsl -lgslcblas
# AM_CXXFLAGS += -DGSL
# libgtsam_la_CPPFLAGS += -DGSL

View File

@ -33,299 +33,319 @@ static double inf = std::numeric_limits<double>::infinity();
using namespace std;
namespace gtsam {
namespace noiseModel {
namespace noiseModel {
/* ************************************************************************* */
// update A, b
// A' \define A_{S}-ar and b'\define b-ad
// Linear algebra: takes away projection on latest orthogonal
// Graph: make a new factor on the separator S
// __attribute__ ((noinline)) // uncomment to prevent inlining when profiling
static void updateAb(Matrix& Ab, int j, const Vector& a, const Vector& rd) {
size_t m = Ab.size1(), n = Ab.size2()-1;
/* ************************************************************************* */
// update A, b
// A' \define A_{S}-ar and b'\define b-ad
// Linear algebra: takes away projection on latest orthogonal
// Graph: make a new factor on the separator S
// __attribute__ ((noinline)) // uncomment to prevent inlining when profiling
static void updateAb(Matrix& Ab, int j, const Vector& a, const Vector& rd) {
size_t m = Ab.size1(), n = Ab.size2()-1;
#ifdef GSL
// Ab(0:m,j+1:n) = Ab(0:m,j+1:n) - a(0:m)*rd(j+1:end)'
// get a view for Ab
gsl_matrix_view Abg = gsl_matrix_view_array(Ab.data().begin(), m, n+1);
gsl_matrix_view Abg_view = gsl_matrix_submatrix (&(Abg.matrix), 0, j+1, m, n-j);
// Ab(0:m,j+1:n) = Ab(0:m,j+1:n) - a(0:m)*rd(j+1:end)'
// get a view for Ab
gsl_matrix_view Abg = gsl_matrix_view_array(Ab.data().begin(), m, n+1);
gsl_matrix_view Abg_view = gsl_matrix_submatrix (&(Abg.matrix), 0, j+1, m, n-j);
// get a view for a
gsl_vector_const_view ag = gsl_vector_const_view_array(a.data().begin(), m);
// get a view for a
gsl_vector_const_view ag = gsl_vector_const_view_array(a.data().begin(), m);
// get a view for r
gsl_vector_const_view rdg = gsl_vector_const_view_array(rd.data().begin()+j+1, n-j);
// get a view for r
gsl_vector_const_view rdg = gsl_vector_const_view_array(rd.data().begin()+j+1, n-j);
// rank one update
gsl_blas_dger (-1.0, &(ag.vector), &(rdg.vector), &(Abg_view.matrix));
// rank one update
gsl_blas_dger (-1.0, &(ag.vector), &(rdg.vector), &(Abg_view.matrix));
#else
for (int i = 0; i < m; i++) { // update all rows
double ai = a(i);
double *Aij = Ab.data().begin() + i * (n+1) + j + 1;
const double *rptr = rd.data().begin() + j + 1;
// Ab(i,j+1:end) -= ai*rd(j+1:end)
for (int j2 = j + 1; j2 < n+1; j2++, Aij++, rptr++)
*Aij -= ai * (*rptr);
}
#endif
}
/* ************************************************************************* */
Gaussian::shared_ptr Gaussian::Covariance(const Matrix& covariance, bool smart) {
size_t m = covariance.size1(), n = covariance.size2();
if (m != n) throw invalid_argument("Gaussian::Covariance: covariance not square");
if (smart) {
// check all non-diagonal entries
int i,j;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
if (i != j && fabs(covariance(i, j) > 1e-9)) goto full;
Vector variances(n);
for (j = 0; j < n; j++) variances(j) = covariance(j,j);
return Diagonal::Variances(variances,true);
}
full: return shared_ptr(new Gaussian(n, inverse_square_root(covariance)));
}
void Gaussian::print(const string& name) const {
gtsam::print(thisR(), "Gaussian");
}
bool Gaussian::equals(const Base& expected, double tol) const {
const Gaussian* p = dynamic_cast<const Gaussian*> (&expected);
if (p == NULL) return false;
if (typeid(*this) != typeid(*p)) return false;
//if (!sqrt_information_) return true; // ALEX todo;
return equal_with_abs_tol(R(), p->R(), sqrt(tol));
}
Vector Gaussian::whiten(const Vector& v) const {
return thisR() * v;
}
Vector Gaussian::unwhiten(const Vector& v) const {
return backSubstituteUpper(thisR(), v);
}
double Gaussian::Mahalanobis(const Vector& v) const {
// Note: for Diagonal, which does ediv_, will be correct for constraints
Vector w = whiten(v);
return inner_prod(w, w);
}
Matrix Gaussian::Whiten(const Matrix& H) const {
return thisR() * H;
}
void Gaussian::WhitenInPlace(Matrix& H) const {
H = thisR() * H;
}
// General QR, see also special version in Constrained
SharedDiagonal Gaussian::QR(Matrix& Ab) const {
// get size(A) and maxRank
// TODO: really no rank problems ?
size_t m = Ab.size1(), n = Ab.size2()-1;
size_t maxRank = min(m,n);
// pre-whiten everything (cheaply if possible)
WhitenInPlace(Ab);
// Perform in-place Householder
householder_(Ab, maxRank);
return Unit::Create(maxRank);
}
/* ************************************************************************* */
// TODO: can we avoid calling reciprocal twice ?
Diagonal::Diagonal(const Vector& sigmas) :
Gaussian(sigmas.size()), invsigmas_(reciprocal(sigmas)), sigmas_(
sigmas) {
}
Diagonal::shared_ptr Diagonal::Variances(const Vector& variances, bool smart) {
if (smart) {
// check whether all the same entry
int j, n = variances.size();
for (j = 1; j < n; j++)
if (variances(j) != variances(0)) goto full;
return Isotropic::Variance(n, variances(0), true);
}
full: return shared_ptr(new Diagonal(esqrt(variances)));
}
void Diagonal::print(const string& name) const {
gtsam::print(sigmas_, "Diagonal sigmas " + name);
}
Vector Diagonal::whiten(const Vector& v) const {
return emul(v, invsigmas_);
}
Vector Diagonal::unwhiten(const Vector& v) const {
return emul(v, sigmas_);
}
Matrix Diagonal::Whiten(const Matrix& H) const {
return vector_scale(invsigmas_, H);
}
void Diagonal::WhitenInPlace(Matrix& H) const {
H = vector_scale(invsigmas_, H);
}
Vector Diagonal::sample() const {
Vector result(dim_);
for (int i = 0; i < dim_; i++) {
typedef boost::normal_distribution<double> Normal;
Normal dist(0.0, this->sigmas_(i));
boost::variate_generator<boost::minstd_rand&, Normal> norm(generator, dist);
result(i) = norm();
}
return result;
}
/* ************************************************************************* */
void Constrained::print(const std::string& name) const {
gtsam::print(sigmas_, "Constrained sigmas " + name);
}
Vector Constrained::whiten(const Vector& v) const {
// ediv_ does the right thing with the errors
return ediv_(v, sigmas_);
}
Matrix Constrained::Whiten(const Matrix& H) const {
throw logic_error("noiseModel::Constrained cannot Whiten");
}
void Constrained::WhitenInPlace(Matrix& H) const {
throw logic_error("noiseModel::Constrained cannot Whiten");
}
// Special version of QR for Constrained calls slower but smarter code
// that deals with possibly zero sigmas
// It is Gram-Schmidt orthogonalization rather than Householder
SharedDiagonal Diagonal::QR(Matrix& Ab) const {
// get size(A) and maxRank
size_t m = Ab.size1(), n = Ab.size2()-1;
size_t maxRank = min(m,n);
// create storage for [R d]
typedef boost::tuple<size_t, Vector, double> Triple;
list<Triple> Rd;
Vector pseudo(m); // allocate storage for pseudo-inverse
Vector weights = emul(invsigmas_,invsigmas_); // calculate weights once
// We loop over all columns, because the columns that can be eliminated
// are not necessarily contiguous. For each one, estimate the corresponding
// scalar variable x as d-rS, with S the separator (remaining columns).
// Then update A and b by substituting x with d-rS, zero-ing out x's column.
for (size_t j=0; j<n; ++j) {
// extract the first column of A
// ublas::matrix_column is slower ! TODO Really, why ????
// AGC: if you use column() you will automatically call ublas, use
// column_() to actually use the one in our library
Vector a(column(Ab, j));
// Calculate weighted pseudo-inverse and corresponding precision
double precision = weightedPseudoinverse(a, weights, pseudo);
// If precision is zero, no information on this column
// This is actually not limited to constraints, could happen in Gaussian::QR
// In that case, we're probably hosed. TODO: make sure Householder is rank-revealing
if (precision < 1e-8) continue;
// create solution [r d], rhs is automatically r(n)
Vector rd(n+1); // uninitialized !
rd(j)=1.0; // put 1 on diagonal
for (size_t j2=j+1; j2<n+1; ++j2) // and fill in remainder with dot-products
rd(j2) = inner_prod(pseudo, ublas::matrix_column<Matrix>(Ab, j2));
// construct solution (r, d, sigma)
Rd.push_back(boost::make_tuple(j, rd, precision));
// exit after rank exhausted
if (Rd.size()>=maxRank) break;
// update Ab, expensive, using outer product
updateAb(Ab, j, a, rd);
}
// Create storage for precisions
Vector precisions(Rd.size());
// Write back result in Ab, imperative as we are
// TODO: test that is correct if a column was skipped !!!!
size_t i = 0; // start with first row
bool mixed = false;
BOOST_FOREACH(const Triple& t, Rd) {
const size_t& j = t.get<0>();
const Vector& rd = t.get<1>();
precisions(i) = t.get<2>();
if (precisions(i)==inf) mixed = true;
for (size_t j2=0; j2<j; ++j2) Ab(i,j2) = 0.0; // fill in zeros below diagonal anway
for (size_t j2=j; j2<n+1; ++j2) // copy the j-the row TODO memcpy
Ab(i,j2) = rd(j2);
i+=1;
}
return mixed ? Constrained::MixedPrecisions(precisions) : Diagonal::Precisions(precisions);
}
/* ************************************************************************* */
Isotropic::shared_ptr Isotropic::Variance(size_t dim, double variance, bool smart) {
if (smart && fabs(variance-1.0)<1e-9) return Unit::Create(dim);
return shared_ptr(new Isotropic(dim, sqrt(variance)));
}
void Isotropic::print(const string& name) const {
cout << "Isotropic sigma " << name << " " << sigma_ << endl;
}
double Isotropic::Mahalanobis(const Vector& v) const {
double dot = inner_prod(v, v);
return dot * invsigma_ * invsigma_;
}
Vector Isotropic::whiten(const Vector& v) const {
return v * invsigma_;
}
Vector Isotropic::unwhiten(const Vector& v) const {
return v * sigma_;
}
Matrix Isotropic::Whiten(const Matrix& H) const {
return invsigma_ * H;
}
void Isotropic::WhitenInPlace(Matrix& H) const {
H *= invsigma_;
}
// faster version
Vector Isotropic::sample() const {
typedef boost::normal_distribution<double> Normal;
Normal dist(0.0, this->sigma_);
boost::variate_generator<boost::minstd_rand&, Normal> norm(generator, dist);
Vector result(dim_);
for (int i = 0; i < dim_; i++)
result(i) = norm();
return result;
}
/* ************************************************************************* */
void Unit::print(const std::string& name) const {
cout << "Unit (" << dim_ << ") " << name << endl;
}
/* ************************************************************************* */
for (int i = 0; i < m; i++) { // update all rows
double ai = a(i);
double *Aij = Ab.data().begin() + i * (n+1) + j + 1;
const double *rptr = rd.data().begin() + j + 1;
// Ab(i,j+1:end) -= ai*rd(j+1:end)
for (int j2 = j + 1; j2 < n+1; j2++, Aij++, rptr++)
*Aij -= ai * (*rptr);
}
#endif
}
/* ************************************************************************* */
Gaussian::shared_ptr Gaussian::Covariance(const Matrix& covariance, bool smart) {
size_t m = covariance.size1(), n = covariance.size2();
if (m != n) throw invalid_argument("Gaussian::Covariance: covariance not square");
if (smart) {
// check all non-diagonal entries
int i,j;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
if (i != j && fabs(covariance(i, j) > 1e-9)) goto full;
Vector variances(n);
for (j = 0; j < n; j++) variances(j) = covariance(j,j);
return Diagonal::Variances(variances,true);
}
full: return shared_ptr(new Gaussian(n, inverse_square_root(covariance)));
}
void Gaussian::print(const string& name) const {
gtsam::print(thisR(), "Gaussian");
}
bool Gaussian::equals(const Base& expected, double tol) const {
const Gaussian* p = dynamic_cast<const Gaussian*> (&expected);
if (p == NULL) return false;
if (typeid(*this) != typeid(*p)) return false;
//if (!sqrt_information_) return true; // ALEX todo;
return equal_with_abs_tol(R(), p->R(), sqrt(tol));
}
Vector Gaussian::whiten(const Vector& v) const {
return thisR() * v;
}
Vector Gaussian::unwhiten(const Vector& v) const {
return backSubstituteUpper(thisR(), v);
}
double Gaussian::Mahalanobis(const Vector& v) const {
// Note: for Diagonal, which does ediv_, will be correct for constraints
Vector w = whiten(v);
return inner_prod(w, w);
}
Matrix Gaussian::Whiten(const Matrix& H) const {
return thisR() * H;
}
void Gaussian::WhitenInPlace(Matrix& H) const {
H = thisR() * H;
}
// General QR, see also special version in Constrained
SharedDiagonal Gaussian::QR(Matrix& Ab) const {
bool verbose = false;
if (verbose) cout << "\nIn Gaussian::QR" << endl;
// get size(A) and maxRank
// TODO: really no rank problems ?
size_t m = Ab.size1(), n = Ab.size2()-1;
size_t maxRank = min(m,n);
// pre-whiten everything (cheaply if possible)
if (verbose) gtsam::print(Ab, "Ab before whitening");
WhitenInPlace(Ab);
if (verbose) gtsam::print(Ab, "Ab after whitening");
// Perform in-place Householder
householder(Ab, maxRank);
if (verbose) gtsam::print(Ab, "Ab before householder");
return Unit::Create(maxRank);
}
/* ************************************************************************* */
Diagonal::Diagonal(const Vector& sigmas) :
Gaussian(sigmas.size()), invsigmas_(reciprocal(sigmas)), sigmas_(sigmas) {
}
Diagonal::shared_ptr Diagonal::Variances(const Vector& variances, bool smart) {
if (smart) {
// check whether all the same entry
int j, n = variances.size();
for (j = 1; j < n; j++)
if (variances(j) != variances(0)) goto full;
return Isotropic::Variance(n, variances(0), true);
}
full: return shared_ptr(new Diagonal(esqrt(variances)));
}
Diagonal::shared_ptr Diagonal::Sigmas(const Vector& sigmas, bool smart) {
if (smart) {
// look for zeros to make a constraint
for (size_t i=0; i<sigmas.size(); ++i)
if (sigmas(i)<1e-8)
return Constrained::MixedSigmas(sigmas);
}
return Diagonal::shared_ptr(new Diagonal(sigmas));
}
void Diagonal::print(const string& name) const {
gtsam::print(sigmas_, "Diagonal sigmas " + name);
}
Vector Diagonal::whiten(const Vector& v) const {
return emul(v, invsigmas_);
}
Vector Diagonal::unwhiten(const Vector& v) const {
return emul(v, sigmas_);
}
Matrix Diagonal::Whiten(const Matrix& H) const {
return vector_scale(invsigmas_, H);
}
void Diagonal::WhitenInPlace(Matrix& H) const {
H = vector_scale(invsigmas_, H);
}
Vector Diagonal::sample() const {
Vector result(dim_);
for (int i = 0; i < dim_; i++) {
typedef boost::normal_distribution<double> Normal;
Normal dist(0.0, this->sigmas_(i));
boost::variate_generator<boost::minstd_rand&, Normal> norm(generator, dist);
result(i) = norm();
}
return result;
}
/* ************************************************************************* */
void Constrained::print(const std::string& name) const {
gtsam::print(sigmas_, "Constrained sigmas " + name);
}
Vector Constrained::whiten(const Vector& v) const {
// ediv_ does the right thing with the errors
return ediv_(v, sigmas_);
}
Matrix Constrained::Whiten(const Matrix& H) const {
throw logic_error("noiseModel::Constrained cannot Whiten");
}
void Constrained::WhitenInPlace(Matrix& H) const {
throw logic_error("noiseModel::Constrained cannot Whiten");
}
// Special version of QR for Constrained calls slower but smarter code
// that deals with possibly zero sigmas
// It is Gram-Schmidt orthogonalization rather than Householder
// Previously Diagonal::QR
SharedDiagonal Constrained::QR(Matrix& Ab) const {
bool verbose = false;
if (verbose) cout << "\nStarting Constrained::QR" << endl;
// get size(A) and maxRank
size_t m = Ab.size1(), n = Ab.size2()-1;
size_t maxRank = min(m,n);
// create storage for [R d]
typedef boost::tuple<size_t, Vector, double> Triple;
list<Triple> Rd;
Vector pseudo(m); // allocate storage for pseudo-inverse
Vector weights = emul(invsigmas_,invsigmas_); // calculate weights once
// We loop over all columns, because the columns that can be eliminated
// are not necessarily contiguous. For each one, estimate the corresponding
// scalar variable x as d-rS, with S the separator (remaining columns).
// Then update A and b by substituting x with d-rS, zero-ing out x's column.
for (size_t j=0; j<n; ++j) {
// extract the first column of A
// ublas::matrix_column is slower ! TODO Really, why ????
// AGC: if you use column() you will automatically call ublas, use
// column_() to actually use the one in our library
Vector a(column(Ab, j));
// Calculate weighted pseudo-inverse and corresponding precision
double precision = weightedPseudoinverse(a, weights, pseudo);
// If precision is zero, no information on this column
// This is actually not limited to constraints, could happen in Gaussian::QR
// In that case, we're probably hosed. TODO: make sure Householder is rank-revealing
if (precision < 1e-8) continue;
// create solution [r d], rhs is automatically r(n)
Vector rd(n+1); // uninitialized !
rd(j)=1.0; // put 1 on diagonal
for (size_t j2=j+1; j2<n+1; ++j2) // and fill in remainder with dot-products
rd(j2) = inner_prod(pseudo, ublas::matrix_column<Matrix>(Ab, j2));
// construct solution (r, d, sigma)
Rd.push_back(boost::make_tuple(j, rd, precision));
// exit after rank exhausted
if (Rd.size()>=maxRank) break;
// update Ab, expensive, using outer product
updateAb(Ab, j, a, rd);
}
// Create storage for precisions
Vector precisions(Rd.size());
// Write back result in Ab, imperative as we are
// TODO: test that is correct if a column was skipped !!!!
size_t i = 0; // start with first row
bool mixed = false;
BOOST_FOREACH(const Triple& t, Rd) {
const size_t& j = t.get<0>();
const Vector& rd = t.get<1>();
precisions(i) = t.get<2>();
if (precisions(i)==inf) mixed = true;
for (size_t j2=0; j2<j; ++j2) Ab(i,j2) = 0.0; // fill in zeros below diagonal anway
for (size_t j2=j; j2<n+1; ++j2) // copy the j-the row TODO memcpy
Ab(i,j2) = rd(j2);
i+=1;
}
return mixed ? Constrained::MixedPrecisions(precisions) : Diagonal::Precisions(precisions);
}
/* ************************************************************************* */
Isotropic::shared_ptr Isotropic::Variance(size_t dim, double variance, bool smart) {
if (smart && fabs(variance-1.0)<1e-9) return Unit::Create(dim);
return shared_ptr(new Isotropic(dim, sqrt(variance)));
}
void Isotropic::print(const string& name) const {
cout << "Isotropic sigma " << name << " " << sigma_ << endl;
}
double Isotropic::Mahalanobis(const Vector& v) const {
double dot = inner_prod(v, v);
return dot * invsigma_ * invsigma_;
}
Vector Isotropic::whiten(const Vector& v) const {
return v * invsigma_;
}
Vector Isotropic::unwhiten(const Vector& v) const {
return v * sigma_;
}
Matrix Isotropic::Whiten(const Matrix& H) const {
return invsigma_ * H;
}
void Isotropic::WhitenInPlace(Matrix& H) const {
H *= invsigma_;
}
// faster version
Vector Isotropic::sample() const {
typedef boost::normal_distribution<double> Normal;
Normal dist(0.0, this->sigma_);
boost::variate_generator<boost::minstd_rand&, Normal> norm(generator, dist);
Vector result(dim_);
for (int i = 0; i < dim_; i++)
result(i) = norm();
return result;
}
/* ************************************************************************* */
void Unit::print(const std::string& name) const {
cout << "Unit (" << dim_ << ") " << name << endl;
}
/* ************************************************************************* */
}
} // gtsam

View File

@ -15,222 +15,216 @@
namespace gtsam {
class SharedDiagonal; // forward declare, defined at end
class SharedDiagonal; // forward declare, defined at end
namespace noiseModel {
namespace noiseModel {
/**
* noiseModel::Base is the abstract base class for all noise models.
*
* Noise models must implement a 'whiten' function to normalize an error vector,
* and an 'unwhiten' function to unnormalize an error vector.
*/
class Base : public Testable<Base> {
/**
* noiseModel::Base is the abstract base class for all noise models.
*
* Noise models must implement a 'whiten' function to normalize an error vector,
* and an 'unwhiten' function to unnormalize an error vector.
*/
class Base : public Testable<Base> {
protected:
protected:
size_t dim_;
size_t dim_;
public:
public:
Base(size_t dim):dim_(dim) {}
virtual ~Base() {}
Base(size_t dim):dim_(dim) {}
virtual ~Base() {}
/**
* Dimensionality
*/
inline size_t dim() const { return dim_;}
/**
* Whiten an error vector.
*/
virtual Vector whiten(const Vector& v) const = 0;
/**
* Unwhiten an error vector.
*/
virtual Vector unwhiten(const Vector& v) const = 0;
/** in-place whiten, override if can be done more efficiently */
virtual void whitenInPlace(Vector& v) const {
v = whiten(v);
}
/** in-place unwhiten, override if can be done more efficiently */
virtual void unwhitenInPlace(Vector& v) const {
v = unwhiten(v);
}
};
/**
* Gaussian implements the mathematical model
* |R*x|^2 = |y|^2 with R'*R=inv(Sigma)
* where
* y = whiten(x) = R*x
* x = unwhiten(x) = inv(R)*y
* as indeed
* |y|^2 = y'*y = x'*R'*R*x
* Various derived classes are available that are more efficient.
/**
* Dimensionality
*/
struct Gaussian: public Base {
inline size_t dim() const { return dim_;}
private:
/**
* Whiten an error vector.
*/
virtual Vector whiten(const Vector& v) const = 0;
// TODO: store as boost upper-triangular or whatever is passed from above
/* Matrix square root of information matrix (R) */
boost::optional<Matrix> sqrt_information_;
/**
* Unwhiten an error vector.
*/
virtual Vector unwhiten(const Vector& v) const = 0;
/**
* Return R itself, but note that Whiten(H) is cheaper than R*H
*/
const Matrix& thisR() const {
// should never happen
if (!sqrt_information_) throw std::runtime_error("Gaussian: has no R matrix");
return *sqrt_information_;
}
/** in-place whiten, override if can be done more efficiently */
virtual void whitenInPlace(Vector& v) const {
v = whiten(v);
}
protected:
/** in-place unwhiten, override if can be done more efficiently */
virtual void unwhitenInPlace(Vector& v) const {
v = unwhiten(v);
}
};
/** protected constructor takes square root information matrix */
Gaussian(size_t dim, const boost::optional<Matrix>& sqrt_information = boost::none) :
Base(dim), sqrt_information_(sqrt_information) {
}
/**
* Gaussian implements the mathematical model
* |R*x|^2 = |y|^2 with R'*R=inv(Sigma)
* where
* y = whiten(x) = R*x
* x = unwhiten(x) = inv(R)*y
* as indeed
* |y|^2 = y'*y = x'*R'*R*x
* Various derived classes are available that are more efficient.
*/
struct Gaussian: public Base {
public:
private:
typedef boost::shared_ptr<Gaussian> shared_ptr;
// TODO: store as boost upper-triangular or whatever is passed from above
/* Matrix square root of information matrix (R) */
boost::optional<Matrix> sqrt_information_;
/**
* A Gaussian noise model created by specifying a square root information matrix.
* @param smart, check if can be simplified to derived class
*/
static shared_ptr SqrtInformation(const Matrix& R) {
return shared_ptr(new Gaussian(R.size1(),R));
}
/**
* Return R itself, but note that Whiten(H) is cheaper than R*H
*/
const Matrix& thisR() const {
// should never happen
if (!sqrt_information_) throw std::runtime_error("Gaussian: has no R matrix");
return *sqrt_information_;
}
/**
* A Gaussian noise model created by specifying a covariance matrix.
* @param smart, check if can be simplified to derived class
*/
static shared_ptr Covariance(const Matrix& covariance, bool smart=false);
protected:
/**
* A Gaussian noise model created by specifying an information matrix.
*/
static shared_ptr Information(const Matrix& Q) {
return shared_ptr(new Gaussian(Q.size1(),square_root_positive(Q)));
}
/** protected constructor takes square root information matrix */
Gaussian(size_t dim, const boost::optional<Matrix>& sqrt_information = boost::none) :
Base(dim), sqrt_information_(sqrt_information) {
}
virtual void print(const std::string& name) const;
virtual bool equals(const Base& expected, double tol) const;
virtual Vector whiten(const Vector& v) const;
virtual Vector unwhiten(const Vector& v) const;
public:
/**
* Mahalanobis distance v'*R'*R*v = <R*v,R*v>
*/
virtual double Mahalanobis(const Vector& v) const;
typedef boost::shared_ptr<Gaussian> shared_ptr;
/**
* Multiply a derivative with R (derivative of whiten)
* Equivalent to whitening each column of the input matrix.
*/
virtual Matrix Whiten(const Matrix& H) const;
/**
* A Gaussian noise model created by specifying a square root information matrix.
* @param smart, check if can be simplified to derived class
*/
static shared_ptr SqrtInformation(const Matrix& R) {
return shared_ptr(new Gaussian(R.size1(),R));
}
/**
* In-place version
*/
virtual void WhitenInPlace(Matrix& H) const;
/**
* A Gaussian noise model created by specifying a covariance matrix.
* @param smart, check if can be simplified to derived class
*/
static shared_ptr Covariance(const Matrix& covariance, bool smart=false);
/**
* Whiten a system, in place as well
*/
inline void WhitenSystem(Matrix& A, Vector& b) const {
WhitenInPlace(A);
whitenInPlace(b);
}
/**
* A Gaussian noise model created by specifying an information matrix.
*/
static shared_ptr Information(const Matrix& Q) {
return shared_ptr(new Gaussian(Q.size1(),square_root_positive(Q)));
}
/**
* Apply appropriately weighted QR factorization to the system [A b]
* Q' * [A b] = [R d]
* Dimensions: (r*m) * m*(n+1) = r*(n+1)
* @param Ab is the m*(n+1) augmented system matrix [A b]
* @return in-place QR factorization [R d]. Below-diagonal is undefined !!!!!
*/
virtual SharedDiagonal QR(Matrix& Ab) const;
virtual void print(const std::string& name) const;
virtual bool equals(const Base& expected, double tol) const;
virtual Vector whiten(const Vector& v) const;
virtual Vector unwhiten(const Vector& v) const;
/**
* Return R itself, but note that Whiten(H) is cheaper than R*H
*/
virtual Matrix R() const { return thisR();}
/**
* Mahalanobis distance v'*R'*R*v = <R*v,R*v>
*/
virtual double Mahalanobis(const Vector& v) const;
}; // Gaussian
/**
* Multiply a derivative with R (derivative of whiten)
* Equivalent to whitening each column of the input matrix.
*/
virtual Matrix Whiten(const Matrix& H) const;
// FD: does not work, ambiguous overload :-(
// inline Vector operator*(const Gaussian& R, const Vector& v) {return R.whiten(v);}
/**
* In-place version
*/
virtual void WhitenInPlace(Matrix& H) const;
/**
* A diagonal noise model implements a diagonal covariance matrix, with the
* elements of the diagonal specified in a Vector. This class has no public
* constructors, instead, use the static constructor functions Sigmas etc...
*/
class Diagonal : public Gaussian {
protected:
/**
* Whiten a system, in place as well
*/
inline void WhitenSystem(Matrix& A, Vector& b) const {
WhitenInPlace(A);
whitenInPlace(b);
}
/** sigmas and reciprocal */
Vector sigmas_, invsigmas_;
/**
* Apply appropriately weighted QR factorization to the system [A b]
* Q' * [A b] = [R d]
* Dimensions: (r*m) * m*(n+1) = r*(n+1)
* @param Ab is the m*(n+1) augmented system matrix [A b]
* @return in-place QR factorization [R d]. Below-diagonal is undefined !!!!!
*/
virtual SharedDiagonal QR(Matrix& Ab) const;
/** protected constructor takes sigmas */
Diagonal(const Vector& sigmas);
/**
* Return R itself, but note that Whiten(H) is cheaper than R*H
*/
virtual Matrix R() const { return thisR();}
public:
}; // Gaussian
// FD: does not work, ambiguous overload :-(
// inline Vector operator*(const Gaussian& R, const Vector& v) {return R.whiten(v);}
/**
* A diagonal noise model implements a diagonal covariance matrix, with the
* elements of the diagonal specified in a Vector. This class has no public
* constructors, instead, use the static constructor functions Sigmas etc...
*/
class Diagonal : public Gaussian {
protected:
/** sigmas and reciprocal */
Vector sigmas_, invsigmas_;
/** protected constructor takes sigmas */
Diagonal(const Vector& sigmas);
public:
typedef boost::shared_ptr<Diagonal> shared_ptr;
/**
* A diagonal noise model created by specifying a Vector of sigmas, i.e.
* standard devations, the diagonal of the square root covariance matrix.
*/
static shared_ptr Sigmas(const Vector& sigmas) {
return shared_ptr(new Diagonal(sigmas));
}
/**
* A diagonal noise model created by specifying a Vector of sigmas, i.e.
* standard devations, the diagonal of the square root covariance matrix.
*/
static shared_ptr Sigmas(const Vector& sigmas, bool smart=false);
/**
* A diagonal noise model created by specifying a Vector of variances, i.e.
* i.e. the diagonal of the covariance matrix.
* @param smart, check if can be simplified to derived class
*/
static shared_ptr Variances(const Vector& variances, bool smart = false);
/**
* A diagonal noise model created by specifying a Vector of variances, i.e.
* i.e. the diagonal of the covariance matrix.
* @param smart, check if can be simplified to derived class
*/
static shared_ptr Variances(const Vector& variances, bool smart = false);
/**
* A diagonal noise model created by specifying a Vector of precisions, i.e.
* i.e. the diagonal of the information matrix, i.e., weights
*/
static shared_ptr Precisions(const Vector& precisions) {
return Variances(reciprocal(precisions));
}
/**
* A diagonal noise model created by specifying a Vector of precisions, i.e.
* i.e. the diagonal of the information matrix, i.e., weights
*/
static shared_ptr Precisions(const Vector& precisions) {
return Variances(reciprocal(precisions));
}
virtual void print(const std::string& name) const;
virtual Vector whiten(const Vector& v) const;
virtual Vector unwhiten(const Vector& v) const;
virtual Vector whiten(const Vector& v) const;
virtual Vector unwhiten(const Vector& v) const;
virtual Matrix Whiten(const Matrix& H) const;
virtual void WhitenInPlace(Matrix& H) const;
/**
* Apply QR factorization to the system [A b], taking into account constraints
*/
virtual SharedDiagonal QR(Matrix& Ab) const;
/**
* Return standard deviations (sqrt of diagonal)
*/
inline const Vector& sigmas() const { return sigmas_; }
inline double sigma(size_t i) const { return sigmas_(i); }
* Return standard deviations (sqrt of diagonal)
*/
inline const Vector& sigmas() const { return sigmas_; }
inline double sigma(size_t i) const { return sigmas_(i); }
/**
* generate random variate
*/
virtual Vector sample() const;
/**
* generate random variate
*/
virtual Vector sample() const;
/**
* Return R itself, but note that Whiten(H) is cheaper than R*H
@ -238,155 +232,171 @@ namespace gtsam {
virtual Matrix R() const {
return diag(invsigmas_);
}
}; // Diagonal
/**
* Simple check for constrained-ness
*/
virtual bool isConstrained() {return false;}
/**
* A Constrained constrained model is a specialization of Diagonal which allows
* some or all of the sigmas to be zero, forcing the error to be zero there.
* All other Gaussian models are guaranteed to have a non-singular square-root
* information matrix, but this class is specifically equipped to deal with
* singular noise models, specifically: whiten will return zero on those
* components that have zero sigma *and* zero error, infinity otherwise.
*/
class Constrained : public Diagonal {
protected:
}; // Diagonal
// Constrained does not have member variables
// Instead (possibly zero) sigmas are stored in Diagonal Base class
/**
* A Constrained constrained model is a specialization of Diagonal which allows
* some or all of the sigmas to be zero, forcing the error to be zero there.
* All other Gaussian models are guaranteed to have a non-singular square-root
* information matrix, but this class is specifically equipped to deal with
* singular noise models, specifically: whiten will return zero on those
* components that have zero sigma *and* zero error, infinity otherwise.
*/
class Constrained : public Diagonal {
protected:
/** protected constructor takes sigmas */
Constrained(const Vector& sigmas) :Diagonal(sigmas) {}
// Constrained does not have member variables
// Instead (possibly zero) sigmas are stored in Diagonal Base class
public:
/** protected constructor takes sigmas */
Constrained(const Vector& sigmas) :Diagonal(sigmas) {}
typedef boost::shared_ptr<Constrained> shared_ptr;
public:
/**
* A diagonal noise model created by specifying a Vector of
* standard devations, some of which might be zero
*/
static shared_ptr MixedSigmas(const Vector& sigmas) {
return shared_ptr(new Constrained(sigmas));
}
typedef boost::shared_ptr<Constrained> shared_ptr;
/**
* A diagonal noise model created by specifying a Vector of
* standard devations, some of which might be zero
*/
static shared_ptr MixedVariances(const Vector& variances) {
return shared_ptr(new Constrained(esqrt(variances)));
}
/**
* A diagonal noise model created by specifying a Vector of
* standard devations, some of which might be zero
* TODO: make smart - check for zeros
*/
static shared_ptr MixedSigmas(const Vector& sigmas, bool smart = false) {
return shared_ptr(new Constrained(sigmas));
}
/**
* A diagonal noise model created by specifying a Vector of
* precisions, some of which might be inf
*/
static shared_ptr MixedPrecisions(const Vector& precisions) {
return MixedVariances(reciprocal(precisions));
}
/**
* A diagonal noise model created by specifying a Vector of
* standard devations, some of which might be zero
*/
static shared_ptr MixedVariances(const Vector& variances) {
return shared_ptr(new Constrained(esqrt(variances)));
}
/**
* Fully constrained. TODO: subclass ?
*/
static shared_ptr All(size_t dim) {
return MixedSigmas(repeat(dim,0));
}
/**
* A diagonal noise model created by specifying a Vector of
* precisions, some of which might be inf
*/
static shared_ptr MixedPrecisions(const Vector& precisions) {
return MixedVariances(reciprocal(precisions));
}
virtual void print(const std::string& name) const;
virtual Vector whiten(const Vector& v) const;
/**
* Fully constrained. TODO: subclass ?
*/
static shared_ptr All(size_t dim) {
return MixedSigmas(repeat(dim,0));
}
// Whitening Jacobians does not make sense for possibly constrained
// noise model and will throw an exception.
virtual void print(const std::string& name) const;
virtual Vector whiten(const Vector& v) const;
virtual Matrix Whiten(const Matrix& H) const;
virtual void WhitenInPlace(Matrix& H) const;
// Whitening Jacobians does not make sense for possibly constrained
// noise model and will throw an exception.
}; // Constrained
virtual Matrix Whiten(const Matrix& H) const;
virtual void WhitenInPlace(Matrix& H) const;
/**
* An isotropic noise model corresponds to a scaled diagonal covariance
* To construct, use one of the static methods
*/
class Isotropic : public Diagonal {
protected:
double sigma_, invsigma_;
/**
* Apply QR factorization to the system [A b], taking into account constraints
*/
virtual SharedDiagonal QR(Matrix& Ab) const;
/** protected constructor takes sigma */
Isotropic(size_t dim, double sigma) :
Diagonal(repeat(dim, sigma)),sigma_(sigma),invsigma_(1.0/sigma) {}
/**
* Not constrained
*/
virtual bool isConstrained() {return true;}
public:
}; // Constrained
typedef boost::shared_ptr<Isotropic> shared_ptr;
/**
* An isotropic noise model corresponds to a scaled diagonal covariance
* To construct, use one of the static methods
*/
class Isotropic : public Diagonal {
protected:
double sigma_, invsigma_;
/**
* An isotropic noise model created by specifying a standard devation sigma
*/
static shared_ptr Sigma(size_t dim, double sigma) {
return shared_ptr(new Isotropic(dim, sigma));
}
/** protected constructor takes sigma */
Isotropic(size_t dim, double sigma) :
Diagonal(repeat(dim, sigma)),sigma_(sigma),invsigma_(1.0/sigma) {}
/**
* An isotropic noise model created by specifying a variance = sigma^2.
* @param smart, check if can be simplified to derived class
*/
static shared_ptr Variance(size_t dim, double variance, bool smart = false);
public:
/**
* An isotropic noise model created by specifying a precision
*/
static shared_ptr Precision(size_t dim, double precision) {
return Variance(dim, 1.0/precision);
}
typedef boost::shared_ptr<Isotropic> shared_ptr;
virtual void print(const std::string& name) const;
virtual double Mahalanobis(const Vector& v) const;
virtual Vector whiten(const Vector& v) const;
virtual Vector unwhiten(const Vector& v) const;
virtual Matrix Whiten(const Matrix& H) const;
virtual void WhitenInPlace(Matrix& H) const;
/**
* An isotropic noise model created by specifying a standard devation sigma
*/
static shared_ptr Sigma(size_t dim, double sigma) {
return shared_ptr(new Isotropic(dim, sigma));
}
/**
* Return standard deviation
*/
inline double sigma() const { return sigma_; }
/**
* An isotropic noise model created by specifying a variance = sigma^2.
* @param smart, check if can be simplified to derived class
*/
static shared_ptr Variance(size_t dim, double variance, bool smart = false);
/**
* generate random variate
*/
virtual Vector sample() const;
/**
* An isotropic noise model created by specifying a precision
*/
static shared_ptr Precision(size_t dim, double precision) {
return Variance(dim, 1.0/precision);
}
};
virtual void print(const std::string& name) const;
virtual double Mahalanobis(const Vector& v) const;
virtual Vector whiten(const Vector& v) const;
virtual Vector unwhiten(const Vector& v) const;
virtual Matrix Whiten(const Matrix& H) const;
virtual void WhitenInPlace(Matrix& H) const;
/**
* Unit: i.i.d. unit-variance noise on all m dimensions.
*/
class Unit : public Isotropic {
protected:
/**
* Return standard deviation
*/
inline double sigma() const { return sigma_; }
Unit(size_t dim): Isotropic(dim,1.0) {}
/**
* generate random variate
*/
virtual Vector sample() const;
public:
};
typedef boost::shared_ptr<Unit> shared_ptr;
/**
* Unit: i.i.d. unit-variance noise on all m dimensions.
*/
class Unit : public Isotropic {
protected:
/**
* Create a unit covariance noise model
*/
static shared_ptr Create(size_t dim) {
return shared_ptr(new Unit(dim));
}
Unit(size_t dim): Isotropic(dim,1.0) {}
virtual void print(const std::string& name) const;
virtual double Mahalanobis(const Vector& v) const {return inner_prod(v,v); }
virtual Vector whiten(const Vector& v) const { return v; }
virtual Vector unwhiten(const Vector& v) const { return v; }
virtual Matrix Whiten(const Matrix& H) const { return H; }
virtual void WhitenInPlace(Matrix& H) const {}
};
public:
} // namespace noiseModel
typedef boost::shared_ptr<Unit> shared_ptr;
/**
* Create a unit covariance noise model
*/
static shared_ptr Create(size_t dim) {
return shared_ptr(new Unit(dim));
}
virtual void print(const std::string& name) const;
virtual double Mahalanobis(const Vector& v) const {return inner_prod(v,v); }
virtual Vector whiten(const Vector& v) const { return v; }
virtual Vector unwhiten(const Vector& v) const { return v; }
virtual Matrix Whiten(const Matrix& H) const { return H; }
virtual void WhitenInPlace(Matrix& H) const {}
};
} // namespace noiseModel
using namespace noiseModel;
} // namespace gtsam

View File

@ -85,6 +85,11 @@ namespace gtsam {
return keys_;
}
/** access to the noise model */
SharedGaussian get_noiseModel() const {
return noiseModel_;
}
/** get the size of the factor */
std::size_t size() const {
return keys_.size();

View File

@ -126,19 +126,28 @@ namespace example {
// linearized prior on x1: c["x1"]+x1=0 i.e. x1=-c["x1"]
Vector b1 = -Vector_(2,0.1, 0.1);
fg.add("x1", I, b1, sigma0_1);
//fg.add("x1", I, b1, sigma0_1);
fg.add("x1", 10*eye(2), -1.0*ones(2), noiseModel::Unit::Create(2));
// odometry between x1 and x2: x2-x1=[0.2;-0.1]
Vector b2 = Vector_(2, 0.2, -0.1);
fg.add("x1", -I, "x2", I, b2, sigma0_1);
//fg.add("x1", -I, "x2", I, b2, sigma0_1);
fg.add("x1", -10*eye(2),"x2", 10*eye(2), Vector_(2, 2.0, -1.0),
noiseModel::Unit::Create(2));
// measurement between x1 and l1: l1-x1=[0.0;0.2]
Vector b3 = Vector_(2, 0.0, 0.2);
fg.add("x1", -I, "l1", I, b3, sigma0_2);
//fg.add("x1", -I, "l1", I, b3, sigma0_2);
fg.add("x1", -5*eye(2),
"l1", 5*eye(2), Vector_(2, 0.0, 1.0),
noiseModel::Unit::Create(2));
// measurement between x2 and l1: l1-x2=[-0.2;0.3]
Vector b4 = Vector_(2, -0.2, 0.3);
fg.add("x2", -I, "l1", I, b4, sigma0_2);
//fg.add("x2", -I, "l1", I, b4, sigma0_2);
fg.add("x2", -5*eye(2),
"l1", 5*eye(2), Vector_(2, -1.0, 1.5),
noiseModel::Unit::Create(2));
return fg;
}
@ -306,6 +315,7 @@ namespace example {
Vector b1(2);
b1(0) = 1.0;
b1(1) = -1.0;
//GaussianFactor::shared_ptr f1(new GaussianFactor("x", sigma0_1->Whiten(Ax), sigma0_1->whiten(b1), sigma0_1));
GaussianFactor::shared_ptr f1(new GaussianFactor("x", Ax, b1, sigma0_1));
// create binary constraint factor

View File

@ -35,8 +35,8 @@ static SharedDiagonal
TEST( GaussianFactor, linearFactor )
{
Matrix I = eye(2);
Vector b = Vector_(2,0.2,-0.1);
GaussianFactor expected("x1", -I, "x2", I, b, sigma0_1);
Vector b = Vector_(2, 2.0, -1.0);
GaussianFactor expected("x1", -10*I,"x2", 10*I, b, noiseModel::Unit::Create(2));
// create a small linear factor graph
GaussianFactorGraph fg = createGaussianFactorGraph();
@ -131,37 +131,37 @@ TEST( GaussianFactor, combine )
// the expected combined linear factor
Matrix Ax2 = Matrix_(4, 2, // x2
-1., 0.,
+0., -1.,
1., 0.,
+0., 1.);
-5., 0.,
+0., -5.,
10., 0.,
+0., 10.);
Matrix Al1 = Matrix_(4, 2, // l1
1., 0.,
0., 1.,
5., 0.,
0., 5.,
0., 0.,
0., 0.);
Matrix Ax1 = Matrix_(4, 2, // x1
0.00, 0., // f4
0.00, 0., // f4
-1., 0., // f2
0.00, -1. // f2
-10., 0., // f2
0.00, -10. // f2
);
// the RHS
Vector b2(4);
b2(0) = -0.2;
b2(1) = 0.3;
b2(2) = 0.2;
b2(3) = -0.1;
b2(0) = -1.0;
b2(1) = 1.5;
b2(2) = 2.0;
b2(3) = -1.0;
// use general constructor for making arbitrary factors
vector<pair<Symbol, Matrix> > meas;
meas.push_back(make_pair("x2", Ax2));
meas.push_back(make_pair("l1", Al1));
meas.push_back(make_pair("x1", Ax1));
GaussianFactor expected(meas, b2, sigmas);
GaussianFactor expected(meas, b2, noiseModel::Diagonal::Sigmas(ones(4)));
CHECK(assert_equal(expected,combined));
}
@ -320,23 +320,24 @@ TEST( GaussianFactor, eliminate )
boost::tie(actualCG,actualLF) = combined.eliminate("x2");
// create expected Conditional Gaussian
Matrix I = eye(2);
Matrix I = eye(2)*sqrt(125.0);
Matrix R11 = I, S12 = -0.2*I, S13 = -0.8*I;
Vector d(2); d(0) = 0.2; d(1) = -0.14;
Vector d = I*Vector_(2,0.2,-0.14);
// Check the conditional Gaussian
GaussianConditional
expectedCG("x2", d, R11, "l1", S12, "x1", S13, repeat(2, 1 / sqrt(125.0)));
expectedCG("x2", d, R11, "l1", S12, "x1", S13, repeat(2, 1.0));
// the expected linear factor
I = eye(2)/0.2236;
Matrix Bl1 = I, Bx1 = -I;
Vector b1(2); b1(0) = 0.0; b1(1) = 0.2;
Vector b1 = I*Vector_(2,0.0,0.2);
GaussianFactor expectedLF("l1", Bl1, "x1", Bx1, b1, repeat(2,0.2236));
GaussianFactor expectedLF("l1", Bl1, "x1", Bx1, b1, repeat(2,1.0));
// check if the result matches
CHECK(assert_equal(expectedCG,*actualCG,1e-4));
CHECK(assert_equal(expectedLF,*actualLF,1e-5));
CHECK(assert_equal(expectedCG,*actualCG,1e-3));
CHECK(assert_equal(expectedLF,*actualLF,1e-3));
}
@ -383,21 +384,18 @@ TEST( GaussianFactor, eliminate2 )
boost::tie(actualCG,actualLF) = combined.eliminate("x2");
// create expected Conditional Gaussian
double oldSigma = 0.0894427; // from when R was made unit
Matrix R11 = Matrix_(2,2,
1.00, 0.00,
0.00, 1.00
);
)/oldSigma;
Matrix S12 = Matrix_(2,4,
-0.20, 0.00,-0.80, 0.00,
+0.00,-0.20,+0.00,-0.80
);
Vector d(2); d(0) = 0.2; d(1) = -0.14;
Vector x2Sigmas(2);
x2Sigmas(0) = 0.0894427;
x2Sigmas(1) = 0.0894427;
GaussianConditional expectedCG("x2",d,R11,"l11",S12,x2Sigmas);
)/oldSigma;
Vector d = Vector_(2,0.2,-0.14)/oldSigma;
GaussianConditional expectedCG("x2",d,R11,"l11",S12,ones(2));
CHECK(assert_equal(expectedCG,*actualCG,1e-4));
// the expected linear factor
double sigma = 0.2236;
@ -405,16 +403,10 @@ TEST( GaussianFactor, eliminate2 )
// l1 x1
1.00, 0.00, -1.00, 0.00,
0.00, 1.00, +0.00, -1.00
);
// the RHS
Vector b1(2); b1(0) = 0.0; b1(1) = 0.894427;
GaussianFactor expectedLF("l11", Bl1x1, b1*sigma, repeat(2,sigma));
// check if the result matches
CHECK(assert_equal(expectedCG,*actualCG,1e-4));
CHECK(assert_equal(expectedLF,*actualLF,1e-5));
)/sigma;
Vector b1 =Vector_(2,0.0,0.894427);
GaussianFactor expectedLF("l11", Bl1x1, b1, repeat(2,1.0));
CHECK(assert_equal(expectedLF,*actualLF,1e-3));
}
/* ************************************************************************* */
@ -463,7 +455,10 @@ TEST( GaussianFactor, matrix )
GaussianFactorGraph fg = createGaussianFactorGraph();
// get the factor "f2" from the factor graph
GaussianFactor::shared_ptr lf = fg[1];
//GaussianFactor::shared_ptr lf = fg[1]; // NOTE: using the older version
Vector b2 = Vector_(2, 0.2, -0.1);
Matrix I = eye(2);
GaussianFactor::shared_ptr lf(new GaussianFactor("x1", -I, "x2", I, b2, sigma0_1));
// render with a given ordering
Ordering ord;
@ -489,7 +484,7 @@ TEST( GaussianFactor, matrix )
Matrix A2 = Matrix_(2,4,
-1.0, 0.0, 1.0, 0.0,
000.0,-1.0, 0.0, 1.0 );
Vector b2 = Vector_(2, 0.2, -0.1);
//Vector b2 = Vector_(2, 2.0, -1.0);
EQUALITY(A_act2,A2);
EQUALITY(b_act2,b2);
@ -508,7 +503,10 @@ TEST( GaussianFactor, matrix_aug )
GaussianFactorGraph fg = createGaussianFactorGraph();
// get the factor "f2" from the factor graph
GaussianFactor::shared_ptr lf = fg[1];
//GaussianFactor::shared_ptr lf = fg[1];
Vector b2 = Vector_(2, 0.2, -0.1);
Matrix I = eye(2);
GaussianFactor::shared_ptr lf(new GaussianFactor("x1", -I, "x2", I, b2, sigma0_1));
// render with a given ordering
Ordering ord;
@ -654,9 +652,9 @@ TEST( GaussianFactor, alphaFactor )
GaussianFactor::shared_ptr actual = factor->alphaFactor(alphaKey,x,d);
// calculate expected
Matrix A = Matrix_(2,1,30.0,5.0);
Vector b = Vector_(2,-0.1,-0.1);
GaussianFactor expected(alphaKey,A,b,sigma0_1);
Matrix A = Matrix_(2,1,300.0,50.0);
Vector b = Vector_(2,-1.0,-1.0);
GaussianFactor expected(alphaKey,A,b,noiseModel::Unit::Create(2));
CHECK(assert_equal(expected,*actual));
}

View File

@ -28,7 +28,7 @@ using namespace boost::assign;
using namespace gtsam;
using namespace example;
double tol=1e-4;
double tol=1e-5;
/* ************************************************************************* */
/* unit test for equals (GaussianFactorGraph1 == GaussianFactorGraph2) */
@ -77,12 +77,6 @@ TEST( GaussianFactorGraph, combine_factors_x1 )
// create a small example for a linear factor graph
GaussianFactorGraph fg = createGaussianFactorGraph();
// create sigmas
double sigma1 = 0.1;
double sigma2 = 0.1;
double sigma3 = 0.2;
Vector sigmas = Vector_(6, sigma1, sigma1, sigma2, sigma2, sigma3, sigma3);
// combine all factors
GaussianFactor::shared_ptr actual = removeAndCombineFactors(fg,"x1");
@ -92,42 +86,42 @@ TEST( GaussianFactorGraph, combine_factors_x1 )
0., 0.,
0., 0.,
0., 0.,
1., 0.,
0., 1.
5., 0.,
0., 5.
);
Matrix Ax1 = Matrix_(6,2,
1., 0.,
0., 1.,
-1., 0.,
0.,-1.,
-1., 0.,
0.,-1.
10., 0.,
0., 10.,
-10., 0.,
0.,-10.,
-5., 0.,
0.,-5.
);
Matrix Ax2 = Matrix_(6,2,
0., 0.,
0., 0.,
1., 0.,
0., 1.,
10., 0.,
0., 10.,
0., 0.,
0., 0.
);
// the expected RHS vector
Vector b(6);
b(0) = -1*sigma1;
b(1) = -1*sigma1;
b(2) = 2*sigma2;
b(3) = -1*sigma2;
b(4) = 0*sigma3;
b(5) = 1*sigma3;
b(0) = -1;
b(1) = -1;
b(2) = 2;
b(3) = -1;
b(4) = 0;
b(5) = 1;
vector<pair<Symbol, Matrix> > meas;
meas.push_back(make_pair("l1", Al1));
meas.push_back(make_pair("x1", Ax1));
meas.push_back(make_pair("x2", Ax2));
GaussianFactor expected(meas, b, sigmas);
GaussianFactor expected(meas, b, ones(6));
//GaussianFactor expected("l1", Al1, "x1", Ax1, "x2", Ax2, b);
// check if the two factors are the same
@ -140,11 +134,6 @@ TEST( GaussianFactorGraph, combine_factors_x2 )
// create a small example for a linear factor graph
GaussianFactorGraph fg = createGaussianFactorGraph();
// determine sigmas
double sigma1 = 0.1;
double sigma2 = 0.2;
Vector sigmas = Vector_(4, sigma1, sigma1, sigma2, sigma2);
// combine all factors
GaussianFactor::shared_ptr actual = removeAndCombineFactors(fg,"x2");
@ -153,38 +142,38 @@ TEST( GaussianFactorGraph, combine_factors_x2 )
// l1
0., 0.,
0., 0.,
1., 0.,
0., 1.
5., 0.,
0., 5.
);
Matrix Ax1 = Matrix_(4,2,
// x1
-1., 0., // f2
0.,-1., // f2
-10., 0., // f2
0.,-10., // f2
0., 0., // f4
0., 0. // f4
);
Matrix Ax2 = Matrix_(4,2,
// x2
1., 0.,
0., 1.,
-1., 0.,
0.,-1.
10., 0.,
0., 10.,
-5., 0.,
0.,-5.
);
// the expected RHS vector
Vector b(4);
b(0) = 2*sigma1;
b(1) = -1*sigma1;
b(2) = -1 *sigma2;
b(3) = 1.5*sigma2;
b(0) = 2;
b(1) = -1;
b(2) = -1;
b(3) = 1.5;
vector<pair<Symbol, Matrix> > meas;
meas.push_back(make_pair("l1", Al1));
meas.push_back(make_pair("x1", Ax1));
meas.push_back(make_pair("x2", Ax2));
GaussianFactor expected(meas, b, sigmas);
GaussianFactor expected(meas, b, ones(4));
// check if the two factors are the same
CHECK(assert_equal(expected,*actual));
@ -197,9 +186,9 @@ TEST( GaussianFactorGraph, eliminateOne_x1 )
GaussianConditional::shared_ptr actual = fg.eliminateOne("x1");
// create expected Conditional Gaussian
Matrix I = eye(2), R11 = I, S12 = -0.111111*I, S13 = -0.444444*I;
Vector d = Vector_(2, -0.133333, -0.0222222), sigma = repeat(2, 1./15);
GaussianConditional expected("x1",d,R11,"l1",S12,"x2",S13,sigma);
Matrix I = 15*eye(2), R11 = I, S12 = -0.111111*I, S13 = -0.444444*I;
Vector d = Vector_(2, -0.133333, -0.0222222), sigma = ones(2);
GaussianConditional expected("x1",15*d,R11,"l1",S12,"x2",S13,sigma);
CHECK(assert_equal(expected,*actual,tol));
}
@ -211,8 +200,9 @@ TEST( GaussianFactorGraph, eliminateOne_x2 )
GaussianConditional::shared_ptr actual = fg.eliminateOne("x2");
// create expected Conditional Gaussian
Matrix I = eye(2), R11 = I, S12 = -0.2*I, S13 = -0.8*I;
Vector d = Vector_(2, 0.2, -0.14), sigma = repeat(2, 0.0894427);
double sig = 0.0894427;
Matrix I = eye(2)/sig, R11 = I, S12 = -0.2*I, S13 = -0.8*I;
Vector d = Vector_(2, 0.2, -0.14)/sig, sigma = ones(2);
GaussianConditional expected("x2",d,R11,"l1",S12,"x1",S13,sigma);
CHECK(assert_equal(expected,*actual,tol));
@ -225,8 +215,9 @@ TEST( GaussianFactorGraph, eliminateOne_l1 )
GaussianConditional::shared_ptr actual = fg.eliminateOne("l1");
// create expected Conditional Gaussian
Matrix I = eye(2), R11 = I, S12 = -0.5*I, S13 = -0.5*I;
Vector d = Vector_(2, -0.1, 0.25), sigma = repeat(2, 0.141421);
double sig = sqrt(2)/10.;
Matrix I = eye(2)/sig, R11 = I, S12 = -0.5*I, S13 = -0.5*I;
Vector d = Vector_(2, -0.1, 0.25)/sig, sigma = ones(2);
GaussianConditional expected("l1",d,R11,"x1",S12,"x2",S13,sigma);
CHECK(assert_equal(expected,*actual,tol));
@ -235,24 +226,26 @@ TEST( GaussianFactorGraph, eliminateOne_l1 )
/* ************************************************************************* */
TEST( GaussianFactorGraph, eliminateAll )
{
// create expected Chordal bayes Net
Matrix I = eye(2);
// create expected Chordal bayes Net
Matrix I = eye(2);
Vector d1 = Vector_(2, -0.1,-0.1);
GaussianBayesNet expected = simpleGaussian("x1",d1,0.1);
Vector d1 = Vector_(2, -0.1,-0.1);
GaussianBayesNet expected = simpleGaussian("x1",d1,0.1);
Vector d2 = Vector_(2, 0.0, 0.2), sigma2 = repeat(2,0.149071);
push_front(expected,"l1",d2, I,"x1", (-1)*I,sigma2);
double sig1 = 0.149071;
Vector d2 = Vector_(2, 0.0, 0.2)/sig1, sigma2 = ones(2);
push_front(expected,"l1",d2, I/sig1,"x1", (-1)*I/sig1,sigma2);
Vector d3 = Vector_(2, 0.2, -0.14), sigma3 = repeat(2,0.0894427);
push_front(expected,"x2",d3, I,"l1", (-0.2)*I, "x1", (-0.8)*I, sigma3);
double sig2 = 0.0894427;
Vector d3 = Vector_(2, 0.2, -0.14)/sig2, sigma3 = ones(2);
push_front(expected,"x2",d3, I/sig2,"l1", (-0.2)*I/sig2, "x1", (-0.8)*I/sig2, sigma3);
// Check one ordering
GaussianFactorGraph fg1 = createGaussianFactorGraph();
Ordering ordering;
ordering += "x2","l1","x1";
GaussianBayesNet actual = fg1.eliminate(ordering);
CHECK(assert_equal(expected,actual,tol));
// Check one ordering
GaussianFactorGraph fg1 = createGaussianFactorGraph();
Ordering ordering;
ordering += "x2","l1","x1";
GaussianBayesNet actual = fg1.eliminate(ordering);
CHECK(assert_equal(expected,actual,tol));
}
/* ************************************************************************* */
@ -267,7 +260,7 @@ TEST( GaussianFactorGraph, add_priors )
expected.push_back(GaussianFactor::shared_ptr(new GaussianFactor("l1",A,b,sigma)));
expected.push_back(GaussianFactor::shared_ptr(new GaussianFactor("x1",A,b,sigma)));
expected.push_back(GaussianFactor::shared_ptr(new GaussianFactor("x2",A,b,sigma)));
CHECK(assert_equal(expected,actual)); // Fails
CHECK(assert_equal(expected,actual));
}
/* ************************************************************************* */
@ -640,14 +633,14 @@ TEST( GaussianFactorGraph, elimination )
GaussianBayesNet bayesNet = fg.eliminate(ord);
// Check sigma
DOUBLES_EQUAL(1.0/0.612372,bayesNet["x2"]->get_sigmas()(0),1e-5);
DOUBLES_EQUAL(1.0,bayesNet["x2"]->get_sigmas()(0),1e-5);
// Check matrix
Matrix R;Vector d;
boost::tie(R,d) = matrix(bayesNet);
Matrix expected = Matrix_(2,2,
0.707107, -0.353553,
0.0, 0.612372);
0.0, 0.612372);
CHECK(assert_equal(expected,R,1e-6));
}

View File

@ -25,9 +25,11 @@ using namespace example;
/* ************************************************************************* */
// Some numbers that should be consistent among all smoother tests
double sigmax1 = 0.786153, sigmax2 = 0.687131, sigmax3 = 0.671512, sigmax4 =
double sigmax1 = 0.786153, sigmax2 = 1.0/1.47292, sigmax3 = 0.671512, sigmax4 =
0.669534, sigmax5 = sigmax3, sigmax6 = sigmax2, sigmax7 = sigmax1;
const double tol = 1e-4;
/* ************************************************************************* */
TEST( ISAM, iSAM_smoother )
{
@ -112,30 +114,30 @@ TEST( BayesTree, linear_smoother_shortcuts )
GaussianBayesNet empty;
GaussianISAM::sharedClique R = bayesTree.root();
GaussianBayesNet actual1 = R->shortcut<GaussianFactor>(R);
CHECK(assert_equal(empty,actual1,1e-4));
CHECK(assert_equal(empty,actual1,tol));
// Check the conditional P(C2|Root)
GaussianISAM::sharedClique C2 = bayesTree["x5"];
GaussianBayesNet actual2 = C2->shortcut<GaussianFactor>(R);
CHECK(assert_equal(empty,actual2,1e-4));
CHECK(assert_equal(empty,actual2,tol));
// Check the conditional P(C3|Root)
Vector sigma3 = repeat(2, 0.61808);
Matrix A56 = Matrix_(2,2,-0.382022,0.,0.,-0.382022);
double sigma3 = 0.61808;
Matrix A56 = Matrix_(2,2,-0.382022,0.,0.,-0.382022);
GaussianBayesNet expected3;
push_front(expected3,"x5", zero(2), eye(2), "x6", A56, sigma3);
push_front(expected3,"x5", zero(2), eye(2)/sigma3, "x6", A56/sigma3, ones(2));
GaussianISAM::sharedClique C3 = bayesTree["x4"];
GaussianBayesNet actual3 = C3->shortcut<GaussianFactor>(R);
CHECK(assert_equal(expected3,actual3,1e-4));
CHECK(assert_equal(expected3,actual3,tol));
// Check the conditional P(C4|Root)
Vector sigma4 = repeat(2, 0.661968);
Matrix A46 = Matrix_(2,2,-0.146067,0.,0.,-0.146067);
GaussianBayesNet expected4;
push_front(expected4,"x4", zero(2), eye(2), "x6", A46, sigma4);
double sigma4 = 0.661968;
Matrix A46 = Matrix_(2,2,-0.146067,0.,0.,-0.146067);
GaussianBayesNet expected4;
push_front(expected4,"x4", zero(2), eye(2)/sigma4, "x6", A46/sigma4, ones(2));
GaussianISAM::sharedClique C4 = bayesTree["x3"];
GaussianBayesNet actual4 = C4->shortcut<GaussianFactor>(R);
CHECK(assert_equal(expected4,actual4,1e-4));
CHECK(assert_equal(expected4,actual4,tol));
}
/* ************************************************************************* *
@ -167,40 +169,43 @@ TEST( BayesTree, balanced_smoother_marginals )
// eliminate using a "nested dissection" ordering
GaussianBayesNet chordalBayesNet = smoother.eliminate(ordering);
VectorConfig expectedSolution;
BOOST_FOREACH(string key, ordering)
expectedSolution.insert(key,zero(2));
VectorConfig actualSolution = optimize(chordalBayesNet);
CHECK(assert_equal(expectedSolution,actualSolution,1e-4));
VectorConfig expectedSolution;
BOOST_FOREACH(string key, ordering)
expectedSolution.insert(key,zero(2));
VectorConfig actualSolution = optimize(chordalBayesNet);
CHECK(assert_equal(expectedSolution,actualSolution,tol));
// Create the Bayes tree
GaussianISAM bayesTree(chordalBayesNet);
LONGS_EQUAL(4,bayesTree.size());
double tol=1e-5;
// Check marginal on x1
GaussianBayesNet expected1 = simpleGaussian("x1", zero(2), sigmax1);
GaussianBayesNet actual1 = bayesTree.marginalBayesNet<GaussianFactor>("x1");
CHECK(assert_equal(expected1,actual1,1e-4));
CHECK(assert_equal(expected1,actual1,tol));
// Check marginal on x2
GaussianBayesNet expected2 = simpleGaussian("x2", zero(2), sigmax2);
double sigx2 = 0.68712938; // FIXME: this should be corrected analytically
GaussianBayesNet expected2 = simpleGaussian("x2", zero(2), sigx2);
GaussianBayesNet actual2 = bayesTree.marginalBayesNet<GaussianFactor>("x2");
CHECK(assert_equal(expected2,actual2,1e-4));
CHECK(assert_equal(expected2,actual2,tol)); // FAILS
// Check marginal on x3
GaussianBayesNet expected3 = simpleGaussian("x3", zero(2), sigmax3);
GaussianBayesNet expected3 = simpleGaussian("x3", zero(2), sigmax3);
GaussianBayesNet actual3 = bayesTree.marginalBayesNet<GaussianFactor>("x3");
CHECK(assert_equal(expected3,actual3,1e-4));
CHECK(assert_equal(expected3,actual3,tol));
// Check marginal on x4
GaussianBayesNet expected4 = simpleGaussian("x4", zero(2), sigmax4);
GaussianBayesNet expected4 = simpleGaussian("x4", zero(2), sigmax4);
GaussianBayesNet actual4 = bayesTree.marginalBayesNet<GaussianFactor>("x4");
CHECK(assert_equal(expected4,actual4,1e-4));
CHECK(assert_equal(expected4,actual4,tol));
// Check marginal on x7 (should be equal to x1)
GaussianBayesNet expected7 = simpleGaussian("x7", zero(2), sigmax7);
GaussianBayesNet expected7 = simpleGaussian("x7", zero(2), sigmax7);
GaussianBayesNet actual7 = bayesTree.marginalBayesNet<GaussianFactor>("x7");
CHECK(assert_equal(expected7,actual7,1e-4));
CHECK(assert_equal(expected7,actual7,tol));
}
/* ************************************************************************* */
@ -219,19 +224,19 @@ TEST( BayesTree, balanced_smoother_shortcuts )
GaussianBayesNet empty;
GaussianISAM::sharedClique R = bayesTree.root();
GaussianBayesNet actual1 = R->shortcut<GaussianFactor>(R);
CHECK(assert_equal(empty,actual1,1e-4));
CHECK(assert_equal(empty,actual1,tol));
// Check the conditional P(C2|Root)
GaussianISAM::sharedClique C2 = bayesTree["x3"];
GaussianBayesNet actual2 = C2->shortcut<GaussianFactor>(R);
CHECK(assert_equal(empty,actual2,1e-4));
CHECK(assert_equal(empty,actual2,tol));
// Check the conditional P(C3|Root), which should be equal to P(x2|x4)
GaussianConditional::shared_ptr p_x2_x4 = chordalBayesNet["x2"];
GaussianBayesNet expected3; expected3.push_back(p_x2_x4);
GaussianISAM::sharedClique C3 = bayesTree["x1"];
GaussianBayesNet actual3 = C3->shortcut<GaussianFactor>(R);
CHECK(assert_equal(expected3,actual3,1e-4));
CHECK(assert_equal(expected3,actual3,tol));
}
/* ************************************************************************* */
@ -247,14 +252,13 @@ TEST( BayesTree, balanced_smoother_clique_marginals )
GaussianISAM bayesTree(chordalBayesNet);
// Check the clique marginal P(C3)
GaussianBayesNet expected = simpleGaussian("x2",zero(2),sigmax2);
Vector sigma = repeat(2, 0.707107);
Matrix A12 = (-0.5)*eye(2);
push_front(expected,"x1", zero(2), eye(2), "x2", A12, sigma);
double sigmax2_alt = 1/1.45533; // THIS NEEDS TO BE CHECKED!
GaussianBayesNet expected = simpleGaussian("x2",zero(2),sigmax2_alt);
push_front(expected,"x1", zero(2), eye(2)*sqrt(2), "x2", -eye(2)*sqrt(2)/2, ones(2));
GaussianISAM::sharedClique R = bayesTree.root(), C3 = bayesTree["x1"];
FactorGraph<GaussianFactor> marginal = C3->marginal<GaussianFactor>(R);
GaussianBayesNet actual = eliminate<GaussianFactor,GaussianConditional>(marginal,C3->keys());
CHECK(assert_equal(expected,actual,1e-4));
CHECK(assert_equal(expected,actual,tol));
}
/* ************************************************************************* */
@ -265,41 +269,58 @@ TEST( BayesTree, balanced_smoother_joint )
Ordering ordering;
ordering += "x1","x3","x5","x7","x2","x6","x4";
// Create the Bayes tree
// Create the Bayes tree, expected to look like:
// x5 x6 x4
// x3 x2 : x4
// x1 : x2
// x7 : x6
GaussianBayesNet chordalBayesNet = smoother.eliminate(ordering);
GaussianISAM bayesTree(chordalBayesNet);
// Conditional density elements reused by both tests
Vector sigma = repeat(2, 0.786146);
Matrix I = eye(2), A = -0.00429185*I;
// Conditional density elements reused by both tests
const Vector sigma = ones(2);
const Matrix I = eye(2), A = -0.00429185*I;
// Check the joint density P(x1,x7) factored as P(x1|x7)P(x7)
GaussianBayesNet expected1 = simpleGaussian("x7", zero(2), sigmax7);
push_front(expected1,"x1", zero(2), I, "x7", A, sigma);
// Check the joint density P(x1,x7) factored as P(x1|x7)P(x7)
GaussianBayesNet expected1;
// Why does the sign get flipped on the prior?
GaussianConditional::shared_ptr
parent1(new GaussianConditional("x7", zero(2), -1*I/sigmax7, ones(2)));
expected1.push_front(parent1);
push_front(expected1,"x1", zero(2), I/sigmax7, "x7", A/sigmax7, sigma);
GaussianBayesNet actual1 = bayesTree.jointBayesNet<GaussianFactor>("x1","x7");
CHECK(assert_equal(expected1,actual1,1e-4));
CHECK(assert_equal(expected1,actual1,tol));
// Check the joint density P(x7,x1) factored as P(x7|x1)P(x1)
GaussianBayesNet expected2 = simpleGaussian("x1", zero(2), sigmax1);
push_front(expected2,"x7", zero(2), I, "x1", A, sigma);
GaussianBayesNet expected2;
GaussianConditional::shared_ptr
parent2(new GaussianConditional("x1", zero(2), -1*I/sigmax1, ones(2)));
expected2.push_front(parent2);
push_front(expected2,"x7", zero(2), I/sigmax1, "x1", A/sigmax1, sigma);
GaussianBayesNet actual2 = bayesTree.jointBayesNet<GaussianFactor>("x7","x1");
CHECK(assert_equal(expected2,actual2,1e-4));
CHECK(assert_equal(expected2,actual2,tol));
// Check the joint density P(x1,x4), i.e. with a root variable
GaussianBayesNet expected3 = simpleGaussian("x4", zero(2), sigmax4);
Vector sigma14 = repeat(2, 0.784465);
Matrix A14 = -0.0769231*I;
push_front(expected3,"x1", zero(2), I, "x4", A14, sigma14);
GaussianBayesNet expected3;
GaussianConditional::shared_ptr
parent3(new GaussianConditional("x4", zero(2), I/sigmax4, ones(2)));
expected3.push_front(parent3);
double sig14 = 0.784465;
Matrix A14 = -0.0769231*I;
push_front(expected3,"x1", zero(2), I/sig14, "x4", A14/sig14, sigma);
GaussianBayesNet actual3 = bayesTree.jointBayesNet<GaussianFactor>("x1","x4");
CHECK(assert_equal(expected3,actual3,1e-4));
CHECK(assert_equal(expected3,actual3,tol));
// Check the joint density P(x4,x1), i.e. with a root variable, factored the other way
GaussianBayesNet expected4 = simpleGaussian("x1", zero(2), sigmax1);
Vector sigma41 = repeat(2, 0.668096);
Matrix A41 = -0.055794*I;
push_front(expected4,"x4", zero(2), I, "x1", A41, sigma41);
GaussianBayesNet expected4;
GaussianConditional::shared_ptr
parent4(new GaussianConditional("x1", zero(2), -1.0*I/sigmax1, ones(2)));
expected4.push_front(parent4);
double sig41 = 0.668096;
Matrix A41 = -0.055794*I;
push_front(expected4,"x4", zero(2), I/sig41, "x1", A41/sig41, sigma);
GaussianBayesNet actual4 = bayesTree.jointBayesNet<GaussianFactor>("x4","x1");
CHECK(assert_equal(expected4,actual4,1e-4));
CHECK(assert_equal(expected4,actual4,tol));
}
/* ************************************************************************* */

View File

@ -22,6 +22,8 @@ using namespace std;
using namespace gtsam;
using namespace example;
const double tol = 1e-4;
/* ************************************************************************* */
TEST( ISAM2, solving )
{
@ -34,10 +36,10 @@ TEST( ISAM2, solving )
GaussianISAM2 btree(nlfg, ordering, noisy);
VectorConfig actualDelta = optimize2(btree);
VectorConfig delta = createCorrectDelta();
CHECK(assert_equal(delta, actualDelta));
CHECK(assert_equal(delta, actualDelta, 0.01));
Config actualSolution = noisy.expmap(actualDelta);
Config solution = createConfig();
CHECK(assert_equal(solution, actualSolution));
CHECK(assert_equal(solution, actualSolution, tol));
}
/* ************************************************************************* */

View File

@ -47,7 +47,7 @@ TEST( Inference, marginals )
BayesNet<GaussianConditional> actual = eliminate<GaussianFactor,GaussianConditional>(fg,keys);
// expected is just scalar Gaussian on x
GaussianBayesNet expected = scalarGaussian("x",4,sqrt(2));
GaussianBayesNet expected = scalarGaussian("x", 4, sqrt(2));
CHECK(assert_equal(expected,actual));
}

View File

@ -10,7 +10,7 @@ using namespace boost::assign;
#include <CppUnitLite/TestHarness.h>
// TODO: DANGEROUS, create shared pointers
#define GTSAM_DANGEROUS_GAUSSIAN 3
#define GTSAM_MAGIC_GAUSSIAN 3
#define GTSAM_MAGIC_KEY
#include "Ordering.h"
@ -114,8 +114,8 @@ TEST( Iterative, conjugateGradientDescent_soft_constraint )
config.insert(2, Pose2(1.5,0.,0.));
Pose2Graph graph;
graph.addPrior(1, Pose2(0.,0.,0.), sharedSigma(3, 1e-10));
graph.addConstraint(1,2, Pose2(1.,0.,0.), sharedSigma(3, 1));
graph.addPrior(1, Pose2(0.,0.,0.), Isotropic::Sigma(3, 1e-10));
graph.addConstraint(1,2, Pose2(1.,0.,0.), Isotropic::Sigma(3, 1));
VectorConfig zeros;
zeros.insert("x1",zero(3));
@ -140,8 +140,8 @@ TEST( Iterative, subgraphPCG )
theta_bar.insert(2, Pose2(1.5,0.,0.));
Pose2Graph graph;
graph.addPrior(1, Pose2(0.,0.,0.), sharedSigma(3, 1e-10));
graph.addConstraint(1,2, Pose2(1.,0.,0.), sharedSigma(3, 1));
graph.addPrior(1, Pose2(0.,0.,0.), Isotropic::Sigma(3, 1e-10));
graph.addConstraint(1,2, Pose2(1.,0.,0.), Isotropic::Sigma(3, 1));
VectorConfig zeros;
zeros.insert("x1",zero(3));

View File

@ -502,32 +502,33 @@ TEST( matrix, backsubtitution )
/* ************************************************************************* */
TEST( matrix, houseHolder )
{
double data[] = {-5, 0, 5, 0, 0, 0, -1,
00, -5, 0, 5, 0, 0, 1.5,
10, 0, 0, 0,-10, 0, 2,
00, 10, 0, 0, 0,-10, -1};
double data[] = {
-5, 0, 5, 0, 0, 0, -1,
00, -5, 0, 5, 0, 0, 1.5,
10, 0, 0, 0,-10, 0, 2,
00, 10, 0, 0, 0,-10, -1};
// check in-place householder, with v vectors below diagonal
double data1[] = {
11.1803, 0, -2.2361, 0, -8.9443, 0, 2.236,
0, 11.1803, 0, -2.2361, 0, -8.9443, -1.565,
-0.618034, 0, 4.4721, 0, -4.4721, 0, 0,
0, -0.618034, 0, 4.4721, 0, -4.4721, 0.894 };
Matrix expected1 = Matrix_(4,7, data1);
Matrix A1 = Matrix_(4, 7, data);
householder_(A1,3);
CHECK(assert_equal(expected1, A1, 1e-3));
// check in-place householder, with v vectors below diagonal
double data1[] = {
11.1803, 0, -2.2361, 0, -8.9443, 0, 2.236,
0, 11.1803, 0, -2.2361, 0, -8.9443, -1.565,
-0.618034, 0, 4.4721, 0, -4.4721, 0, 0,
0, -0.618034, 0, 4.4721, 0, -4.4721, 0.894 };
Matrix expected1 = Matrix_(4,7, data1);
Matrix A1 = Matrix_(4, 7, data);
householder_(A1,3);
CHECK(assert_equal(expected1, A1, 1e-3));
// in-place, with zeros below diagonal
double data2[] = {
11.1803, 0, -2.2361, 0, -8.9443, 0, 2.236,
0, 11.1803, 0, -2.2361, 0, -8.9443, -1.565,
0, 0, 4.4721, 0, -4.4721, 0, 0,
0, 0, 0, 4.4721, 0, -4.4721, 0.894 };
Matrix expected = Matrix_(4,7, data2);
Matrix A2 = Matrix_(4, 7, data);
householder(A2,3);
CHECK(assert_equal(expected, A2, 1e-3));
// in-place, with zeros below diagonal
double data2[] = {
11.1803, 0, -2.2361, 0, -8.9443, 0, 2.236,
0, 11.1803, 0, -2.2361, 0, -8.9443, -1.565,
0, 0, 4.4721, 0, -4.4721, 0, 0,
0, 0, 0, 4.4721, 0, -4.4721, 0.894 };
Matrix expected = Matrix_(4,7, data2);
Matrix A2 = Matrix_(4, 7, data);
householder(A2,3);
CHECK(assert_equal(expected, A2, 1e-3));
}
/* ************************************************************************* */
// unit test for qr factorization (and hence householder)
@ -535,39 +536,39 @@ TEST( matrix, houseHolder )
/* ************************************************************************* */
TEST( matrix, qr )
{
double data[] = {-5, 0, 5, 0,
00, -5, 0, 5,
10, 0, 0, 0,
00, 10, 0, 0,
00, 0, 0,-10,
10, 0,-10, 0};
Matrix A = Matrix_(6, 4, data);
double data[] = {-5, 0, 5, 0,
00, -5, 0, 5,
10, 0, 0, 0,
00, 10, 0, 0,
00, 0, 0,-10,
10, 0,-10, 0};
Matrix A = Matrix_(6, 4, data);
double dataQ[] = {
-0.3333, 0, 0.2981, 0, 0, -0.8944,
0000000, -0.4472, 0, 0.3651, -0.8165, 0,
00.6667, 0, 0.7454, 0, 0, 0,
0000000, 0.8944, 0, 0.1826, -0.4082, 0,
0000000, 0, 0, -0.9129, -0.4082, 0,
00.6667, 0, -0.5963, 0, 0, -0.4472,
};
Matrix expectedQ = Matrix_(6,6, dataQ);
double dataR[] = {
15, 0, -8.3333, 0,
00, 11.1803, 0, -2.2361,
00, 0, 7.4536, 0,
00, 0, 0, 10.9545,
00, 0, 0, 0,
00, 0, 0, 0,
};
Matrix expectedR = Matrix_(6,4, dataR);
double dataQ[] = {
-0.3333, 0, 0.2981, 0, 0, -0.8944,
0000000, -0.4472, 0, 0.3651, -0.8165, 0,
00.6667, 0, 0.7454, 0, 0, 0,
0000000, 0.8944, 0, 0.1826, -0.4082, 0,
0000000, 0, 0, -0.9129, -0.4082, 0,
00.6667, 0, -0.5963, 0, 0, -0.4472,
};
Matrix expectedQ = Matrix_(6,6, dataQ);
Matrix Q,R;
boost::tie(Q,R) = qr(A);
CHECK(assert_equal(expectedQ, Q, 1e-4));
CHECK(assert_equal(expectedR, R, 1e-4));
CHECK(assert_equal(A, Q*R, 1e-14));
double dataR[] = {
15, 0, -8.3333, 0,
00, 11.1803, 0, -2.2361,
00, 0, 7.4536, 0,
00, 0, 0, 10.9545,
00, 0, 0, 0,
00, 0, 0, 0,
};
Matrix expectedR = Matrix_(6,4, dataR);
Matrix Q,R;
boost::tie(Q,R) = qr(A);
CHECK(assert_equal(expectedQ, Q, 1e-4));
CHECK(assert_equal(expectedR, R, 1e-4));
CHECK(assert_equal(A, Q*R, 1e-14));
}
/* ************************************************************************* */

View File

@ -135,46 +135,46 @@ TEST(NoiseModel, ConstrainedAll )
DOUBLES_EQUAL(0.0,i->Mahalanobis(feasible),1e-9);
}
// Currently does not pass
///* ************************************************************************* */
//TEST( NoiseModel, QR )
//{
// // create a matrix to eliminate
// Matrix Ab1 = Matrix_(4, 6+1,
// -1., 0., 1., 0., 0., 0., -0.2,
// 0., -1., 0., 1., 0., 0., 0.3,
// 1., 0., 0., 0., -1., 0., 0.2,
// 0., 1., 0., 0., 0., -1., -0.1);
// Matrix Ab2 = Ab1; // otherwise overwritten !
// Vector sigmas = Vector_(4, 0.2, 0.2, 0.1, 0.1);
//
// // Expected result
// Vector expectedSigmas = Vector_(4, 0.0894427, 0.0894427, 0.223607, 0.223607);
// SharedDiagonal expectedModel = noiseModel::Diagonal::Sigmas(expectedSigmas);
//
// // Call Gaussian version
// SharedDiagonal diagonal = noiseModel::Diagonal::Sigmas(sigmas);
// SharedDiagonal actual1 = diagonal->QR(Ab1);
// SharedDiagonal expected = noiseModel::Unit::Create(4);
// CHECK(assert_equal(*expected,*actual1));
// Matrix expectedRd1 = Matrix_(4, 6+1,
// 11.1803, 0.0, -2.23607, 0.0, -8.94427, 0.0, 2.23607,
// 0.0, 11.1803, 0.0, -2.23607, 0.0, -8.94427,-1.56525,
// -0.618034, 0.0, 4.47214, 0.0, -4.47214, 0.0, 0.0,
// 0.0, -0.618034, 0.0, 4.47214, 0.0, -4.47214, 0.894427);
// CHECK(assert_equal(expectedRd1,Ab1,1e-4)); // Ab was modified in place !!!
//
// // Call Constrained version
// SharedDiagonal constrained = noiseModel::Constrained::MixedSigmas(sigmas);
// SharedDiagonal actual2 = constrained->QR(Ab2);
// CHECK(assert_equal(*expectedModel,*actual2));
// Matrix expectedRd2 = Matrix_(4, 6+1,
// 1., 0., -0.2, 0., -0.8, 0., 0.2,
// 0., 1., 0.,-0.2, 0., -0.8,-0.14,
// 0., 0., 1., 0., -1., 0., 0.0,
// 0., 0., 0., 1., 0., -1., 0.2);
// CHECK(assert_equal(expectedRd2,Ab2,1e-6)); // Ab was modified in place !!!
//}
/* ************************************************************************* */
TEST( NoiseModel, QR )
{
// create a matrix to eliminate
Matrix Ab1 = Matrix_(4, 6+1,
-1., 0., 1., 0., 0., 0., -0.2,
0., -1., 0., 1., 0., 0., 0.3,
1., 0., 0., 0., -1., 0., 0.2,
0., 1., 0., 0., 0., -1., -0.1);
Matrix Ab2 = Ab1; // otherwise overwritten !
Vector sigmas = Vector_(4, 0.2, 0.2, 0.1, 0.1);
// Expected result
Vector expectedSigmas = Vector_(4, 0.0894427, 0.0894427, 0.223607, 0.223607);
SharedDiagonal expectedModel = noiseModel::Diagonal::Sigmas(expectedSigmas);
// Call Gaussian version
SharedDiagonal diagonal = noiseModel::Diagonal::Sigmas(sigmas);
SharedDiagonal actual1 = diagonal->QR(Ab1);
SharedDiagonal expected = noiseModel::Unit::Create(4);
CHECK(assert_equal(*expected,*actual1));
Matrix expectedRd1 = Matrix_(4, 6+1,
11.1803, 0.0, -2.23607, 0.0, -8.94427, 0.0, 2.23607,
0.0, 11.1803, 0.0, -2.23607, 0.0, -8.94427,-1.56525,
0.0, 0.0, 4.47214, 0.0, -4.47214, 0.0, 0.0,
0.0, 0.0, 0.0, 4.47214, 0.0, -4.47214, 0.894427);
CHECK(assert_equal(expectedRd1,Ab1,1e-4)); // Ab was modified in place !!!
// Call Constrained version
SharedDiagonal constrained = noiseModel::Constrained::MixedSigmas(sigmas);
SharedDiagonal actual2 = constrained->QR(Ab2);
SharedDiagonal expectedModel2 = noiseModel::Diagonal::Sigmas(expectedSigmas);
CHECK(assert_equal(*expectedModel2,*actual2));
Matrix expectedRd2 = Matrix_(4, 6+1,
1., 0., -0.2, 0., -0.8, 0., 0.2,
0., 1., 0.,-0.2, 0., -0.8,-0.14,
0., 0., 1., 0., -1., 0., 0.0,
0., 0., 0., 1., 0., -1., 0.2);
CHECK(assert_equal(expectedRd2,Ab2,1e-6)); // Ab was modified in place !!!
}
/* ************************************************************************* */
TEST(NoiseModel, QRNan )

View File

@ -197,7 +197,7 @@ TEST( NonlinearConstraint2, binary_scalar_linearize ) {
x1, Matrix_(1,1, -1.0),
Vector_(1, 6.0), constraintModel);
CHECK(assert_equal(*actualFactor, expectedFactor));
CHECK(assert_equal(*actualConstraint, expectedConstraint)); //FAILS - wrong b value
CHECK(assert_equal(*actualConstraint, expectedConstraint));
}
/* ************************************************************************* */

View File

@ -85,16 +85,15 @@ TEST( NonlinearFactor, NonlinearFactor )
DOUBLES_EQUAL(expected,actual,0.00000001);
}
/* ************************************************************************* *
/* ************************************************************************* */
TEST( NonlinearFactor, linearize_f1 )
{
// Grab a non-linear factor
Graph nfg = createNonlinearFactorGraph();
boost::shared_ptr<NonlinearFactor1> nlf =
boost::static_pointer_cast<NonlinearFactor1>(nfg[0]);
Graph::sharedFactor nlf = nfg[0];
// We linearize at noisy config from SmallExample
VectorConfig c = createNoisyConfig();
Config c = createNoisyConfig();
GaussianFactor::shared_ptr actual = nlf->linearize(c);
GaussianFactorGraph lfg = createGaussianFactorGraph();
@ -104,61 +103,58 @@ TEST( NonlinearFactor, linearize_f1 )
// The error |A*dx-b| approximates (h(x0+dx)-z) = -error_vector
// Hence i.e., b = approximates z-h(x0) = error_vector(x0)
CHECK(assert_equal(nlf->error_vector(c),actual->get_b()));
//CHECK(assert_equal(nlf->error_vector(c),actual->get_b()));
}
/* ************************************************************************* *
/* ************************************************************************* */
TEST( NonlinearFactor, linearize_f2 )
{
// Grab a non-linear factor
Graph nfg = createNonlinearFactorGraph();
boost::shared_ptr<NonlinearFactor1> nlf =
boost::static_pointer_cast<NonlinearFactor1>(nfg[1]);
Graph::sharedFactor nlf = nfg[1];
// We linearize at noisy config from SmallExample
VectorConfig c = createNoisyConfig();
Config c = createNoisyConfig();
GaussianFactor::shared_ptr actual = nlf->linearize(c);
GaussianFactorGraph lfg = createGaussianFactorGraph();
GaussianFactor::shared_ptr expected = lfg[1];
CHECK(expected->equals(*actual));
CHECK(assert_equal(*expected,*actual));
}
/* ************************************************************************* *
/* ************************************************************************* */
TEST( NonlinearFactor, linearize_f3 )
{
// Grab a non-linear factor
Graph nfg = createNonlinearFactorGraph();
boost::shared_ptr<NonlinearFactor1> nlf =
boost::static_pointer_cast<NonlinearFactor1>(nfg[2]);
Graph::sharedFactor nlf = nfg[2];
// We linearize at noisy config from SmallExample
VectorConfig c = createNoisyConfig();
Config c = createNoisyConfig();
GaussianFactor::shared_ptr actual = nlf->linearize(c);
GaussianFactorGraph lfg = createGaussianFactorGraph();
GaussianFactor::shared_ptr expected = lfg[2];
CHECK(expected->equals(*actual));
CHECK(assert_equal(*expected,*actual));
}
/* ************************************************************************* *
/* ************************************************************************* */
TEST( NonlinearFactor, linearize_f4 )
{
// Grab a non-linear factor
Graph nfg = createNonlinearFactorGraph();
boost::shared_ptr<NonlinearFactor1> nlf =
boost::static_pointer_cast<NonlinearFactor1>(nfg[3]);
Graph::sharedFactor nlf = nfg[3];
// We linearize at noisy config from SmallExample
VectorConfig c = createNoisyConfig();
Config c = createNoisyConfig();
GaussianFactor::shared_ptr actual = nlf->linearize(c);
GaussianFactorGraph lfg = createGaussianFactorGraph();
GaussianFactor::shared_ptr expected = lfg[3];
CHECK(expected->equals(*actual));
CHECK(assert_equal(*expected,*actual));
}
/* ************************************************************************* */

View File

@ -68,15 +68,14 @@ TEST( Graph, probPrime )
DOUBLES_EQUAL(expected,actual,0);
}
/* ************************************************************************* *
// TODO: Commented out until noise model is passed to Gaussian factor graph
/* ************************************************************************* */
TEST( Graph, linearize )
{
Graph fg = createNonlinearFactorGraph();
Config initial = createNoisyConfig();
GaussianFactorGraph linearized = fg.linearize(initial);
GaussianFactorGraph expected = createGaussianFactorGraph();
CHECK(assert_equal(expected,linearized));
CHECK(assert_equal(expected,linearized)); // Needs correct linearizations
}
/* ************************************************************************* */

View File

@ -32,6 +32,9 @@ using namespace boost;
using namespace gtsam;
using namespace example;
// FIXME: this tolerance is too high - something is wrong with the noisemodel
const double tol = 1e-6;
typedef NonlinearOptimizer<Graph,Config> Optimizer;
/* ************************************************************************* */
@ -57,12 +60,14 @@ TEST( NonlinearOptimizer, delta )
dx2(1) = -0.2;
expected.insert("x2", dx2);
Optimizer::shared_solver solver;
// Check one ordering
shared_ptr<Ordering> ord1(new Ordering());
*ord1 += "x2","l1","x1";
Optimizer::shared_solver solver;
solver = Optimizer::shared_solver(new Optimizer::solver(ord1));
Optimizer optimizer1(fg, initial, solver);
VectorConfig actual1 = optimizer1.linearizeAndOptimizeForDelta();
CHECK(assert_equal(actual1,expected));
@ -71,6 +76,7 @@ TEST( NonlinearOptimizer, delta )
*ord2 += "x1","x2","l1";
solver = Optimizer::shared_solver(new Optimizer::solver(ord2));
Optimizer optimizer2(fg, initial, solver);
VectorConfig actual2 = optimizer2.linearizeAndOptimizeForDelta();
CHECK(assert_equal(actual2,expected));
@ -79,8 +85,18 @@ TEST( NonlinearOptimizer, delta )
*ord3 += "l1","x1","x2";
solver = Optimizer::shared_solver(new Optimizer::solver(ord3));
Optimizer optimizer3(fg, initial, solver);
VectorConfig actual3 = optimizer3.linearizeAndOptimizeForDelta();
CHECK(assert_equal(actual3,expected));
// More...
shared_ptr<Ordering> ord4(new Ordering());
*ord4 += "x1","x2", "l1";
solver = Optimizer::shared_solver(new Optimizer::solver(ord4));
Optimizer optimizer4(fg, initial, solver);
VectorConfig actual4 = optimizer4.linearizeAndOptimizeForDelta();
CHECK(assert_equal(actual4,expected));
}
/* ************************************************************************* */
@ -152,12 +168,12 @@ TEST( NonlinearOptimizer, optimize )
// Gauss-Newton
Optimizer actual1 = optimizer.gaussNewton(relativeThreshold,
absoluteThreshold);
DOUBLES_EQUAL(0,fg->error(*(actual1.config())),1e-3);
DOUBLES_EQUAL(0,fg->error(*(actual1.config())),tol);
// Levenberg-Marquardt
Optimizer actual2 = optimizer.levenbergMarquardt(relativeThreshold,
absoluteThreshold, Optimizer::SILENT);
DOUBLES_EQUAL(0,fg->error(*(actual2.config())),1e-3);
DOUBLES_EQUAL(0,fg->error(*(actual2.config())),tol);
}
/* ************************************************************************* */
@ -170,8 +186,8 @@ TEST( NonlinearOptimizer, Factorization )
config->insert(2, Pose2(1.5,0.,0.));
boost::shared_ptr<Pose2Graph> graph(new Pose2Graph);
graph->addPrior(1, Pose2(0.,0.,0.), sharedSigma(3, 1e-10));
graph->addConstraint(1,2, Pose2(1.,0.,0.), sharedSigma(3, 1));
graph->addPrior(1, Pose2(0.,0.,0.), Isotropic::Sigma(3, 1e-10));
graph->addConstraint(1,2, Pose2(1.,0.,0.), Isotropic::Sigma(3, 1));
boost::shared_ptr<Ordering> ordering(new Ordering);
ordering->push_back(Pose2Config::Key(1));
@ -197,8 +213,8 @@ TEST( NonlinearOptimizer, SubgraphPCG )
config->insert(2, Pose2(1.5,0.,0.));
boost::shared_ptr<Pose2Graph> graph(new Pose2Graph);
graph->addPrior(1, Pose2(0.,0.,0.), sharedSigma(3, 1e-10));
graph->addConstraint(1,2, Pose2(1.,0.,0.), sharedSigma(3, 1));
graph->addPrior(1, Pose2(0.,0.,0.), Isotropic::Sigma(3, 1e-10));
graph->addConstraint(1,2, Pose2(1.,0.,0.), Isotropic::Sigma(3, 1));
double relativeThreshold = 1e-5;
double absoluteThreshold = 1e-5;

View File

@ -372,7 +372,7 @@ TEST ( SQPOptimizer, inequality_avoid_iterative ) {
// verify
Config2D exp2(feasible);
exp2.insert(x2, Point2(5.0, 0.5));
CHECK(assert_equal(exp2, *(final.config()))); // FAILS
CHECK(assert_equal(exp2, *(final.config())));
}
/* ************************************************************************* */

View File

@ -27,6 +27,7 @@ using namespace boost::assign;
* Alex's Machine
* Results for Eliminate:
* Initial (1891): 17.91 sec, 55834.7 calls/sec
* NoiseQR : 12.58 sec
*
* Results for matrix_augmented:
* Initial (1891) : 0.85 sec, 1.17647e+06 calls/sec

View File

@ -75,6 +75,8 @@ TEST(timeGaussianFactorGraph, planar)
// Improved (int->size_t)
// (N = 100) : 15.39
// Using GSL/BLAS for updateAb : 12.87
// Using NoiseQR : 16.33
// Using correct system : 10.00
// Switch to 100*100 grid = 10K poses
// 1879: 15.6498 15.3851 15.5279