Merging hessianfactor branch, Cholesky is now default and GaussianFactor is virtual (see email to frankcvs)

release/4.3a0
Richard Roberts 2011-01-20 22:22:00 +00:00
parent 4367a245bd
commit 4880257e69
63 changed files with 2839 additions and 1797 deletions

View File

@ -155,12 +155,12 @@ namespace gtsam {
assert(Stair != NULL);
tic("householder_denseqr");
tic(1, "householder_denseqr");
int npiv = min(m,n);
int b = min(min(m,n),32);
double W[b*(n+b)];
DenseQR(m, n, npiv, A.data().begin(), Stair, W);
toc("householder_denseqr");
toc(1, "householder_denseqr");
}
} // namespace gtsam

View File

@ -37,7 +37,7 @@ sources += DSFVector.cpp
check_PROGRAMS += tests/testBTree tests/testDSF tests/testDSFVector
# Timing tests
noinst_PROGRAMS = tests/timeMatrix tests/timeublas tests/timeVirtual
noinst_PROGRAMS = tests/timeMatrix tests/timeublas tests/timeVirtual tests/timeTest
#----------------------------------------------------------------------------------------------------
# Create a libtool library that is not installed

View File

@ -72,6 +72,8 @@ public:
const_iterator end() const { return base_.end(); }
};
template<class MATRIX> class SymmetricBlockView;
/**
* This class stores a *reference* to a matrix and allows it to be accessed as
* a collection of vertical blocks. It also provides for accessing individual
@ -81,7 +83,13 @@ public:
* is consistent with the given block dimensions.
*
* This class also has three parameters that can be changed after construction
* that change the
* that change the apparent view of the matrix. firstBlock() determines the
* block that has index 0 for all operations (except for re-setting
* firstBlock()). rowStart() determines the apparent first row of the matrix
* for all operations (except for setting rowStart() and rowEnd()). rowEnd()
* determines the apparent *exclusive* last row for all operations. To include
* all rows, rowEnd() should be set to the number of rows in the matrix (i.e.
* one after the last true row index).
*/
template<class MATRIX>
class VerticalBlockView {
@ -93,29 +101,58 @@ public:
typedef BlockColumn<const MATRIX> constColumn;
protected:
FullMatrix& matrix_; // the reference to the original matrix
FullMatrix& matrix_; // the reference to the full matrix
std::vector<size_t> variableColOffsets_; // the starting columns of each block (0-based)
// for elimination, represent
// Changes apparent matrix view, see main class comment.
size_t rowStart_; // 0 initially
size_t rowEnd_; // the number of row - 1, initially
size_t blockStart_; // 0 initially
public:
/** Construct from an empty matrix (asserts that the matrix is empty) */
VerticalBlockView(FullMatrix& matrix);
VerticalBlockView(FullMatrix& matrix) :
matrix_(matrix), rowStart_(0), rowEnd_(matrix_.size1()), blockStart_(0) {
fillOffsets((size_t*)0, (size_t*)0);
assertInvariants();
}
/**
* Construct from a non-empty matrix and copy the block structure from
* another block view. */
template<class RHS>
VerticalBlockView(FullMatrix& matrix, const RHS& rhs) :
matrix_(matrix) {
if(matrix_.size1() != rhs.size1() || matrix_.size2() != rhs.size2())
throw std::invalid_argument(
"In VerticalBlockView<>(FullMatrix& matrix, const RHS& rhs), matrix and rhs must\n"
"already be of the same size. If not, construct the VerticalBlockView from an\n"
"empty matrix and then use copyStructureFrom(const RHS& rhs) to resize the matrix\n"
"and set up the block structure.");
copyStructureFrom(rhs);
assertInvariants();
}
/** Construct from iterators over the sizes of each vertical block */
template<typename ITERATOR>
VerticalBlockView(FullMatrix& matrix, ITERATOR firstBlockDim, ITERATOR lastBlockDim);
VerticalBlockView(FullMatrix& matrix, ITERATOR firstBlockDim, ITERATOR lastBlockDim) :
matrix_(matrix), rowStart_(0), rowEnd_(matrix_.size1()), blockStart_(0) {
fillOffsets(firstBlockDim, lastBlockDim);
assertInvariants();
}
/** Construct from a vector of the sizes of each vertical block, resize the
* matrix so that its height is matrixNewHeight and its width fits the given
* block dimensions.
*/
template<typename ITERATOR>
VerticalBlockView(FullMatrix& matrix, ITERATOR firstBlockDim, ITERATOR lastBlockDim, size_t matrixNewHeight);
VerticalBlockView(FullMatrix& matrix, ITERATOR firstBlockDim, ITERATOR lastBlockDim, size_t matrixNewHeight) :
matrix_(matrix), rowStart_(0), rowEnd_(matrixNewHeight), blockStart_(0) {
fillOffsets(firstBlockDim, lastBlockDim);
matrix_.resize(matrixNewHeight, variableColOffsets_.back(), false);
assertInvariants();
}
/** Row size
*/
size_t size1() const { assertInvariants(); return rowEnd_ - rowStart_; }
@ -160,11 +197,19 @@ public:
boost::numeric::ublas::range(variableColOffsets_[actualStartBlock], variableColOffsets_[actualEndBlock]));
}
Block full() {
return range(0,nBlocks());
}
constBlock full() const {
return range(0,nBlocks());
}
Column column(size_t block, size_t columnOffset) {
assertInvariants();
size_t actualBlock = block + blockStart_;
checkBlock(actualBlock);
assert(variableColOffsets_[actualBlock] + columnOffset < matrix_.size2());
assert(variableColOffsets_[actualBlock] + columnOffset < variableColOffsets_[actualBlock+1]);
Block blockMat(operator()(block));
return Column(blockMat, columnOffset);
}
@ -193,24 +238,65 @@ public:
size_t firstBlock() const { return blockStart_; }
/** Copy the block structure and resize the underlying matrix, but do not
* copy the matrix data.
* copy the matrix data. If blockStart(), rowStart(), and/or rowEnd() have
* been modified, this copies the structure of the corresponding matrix view.
* In the destination VerticalBlockView, blockStart() and rowStart() will
* thus be 0, rowEnd() will be size2() of the source VerticalBlockView, and
* the underlying matrix will be the size of the view of the source matrix.
*/
template<class RHSMATRIX>
void copyStructureFrom(const VerticalBlockView<RHSMATRIX>& rhs);
template<class RHS>
void copyStructureFrom(const RHS& rhs) {
if(matrix_.size1() != rhs.size1() || matrix_.size2() != rhs.size2())
matrix_.resize(rhs.size1(), rhs.size2(), false);
if(rhs.blockStart_ == 0)
variableColOffsets_ = rhs.variableColOffsets_;
else {
variableColOffsets_.resize(rhs.nBlocks() + 1);
variableColOffsets_[0] = 0;
size_t j=0;
assert(rhs.variableColOffsets_.begin()+rhs.blockStart_ < rhs.variableColOffsets_.end()-1);
for(std::vector<size_t>::const_iterator off=rhs.variableColOffsets_.begin()+rhs.blockStart_; off!=rhs.variableColOffsets_.end()-1; ++off) {
variableColOffsets_[j+1] = variableColOffsets_[j] + (*(off+1) - *off);
++ j;
}
}
rowStart_ = 0;
rowEnd_ = matrix_.size1();
blockStart_ = 0;
assertInvariants();
}
/** Copy the block struture and matrix data, resizing the underlying matrix
* in the process. This can deal with assigning between different types of
* underlying matrices, as long as the matrices themselves are assignable.
* To avoid creating a temporary matrix this assumes no aliasing, i.e. that
* no part of the underlying matrices refer to the same memory!
*
* If blockStart(), rowStart(), and/or rowEnd() have been modified, this
* copies the structure of the corresponding matrix view. In the destination
* VerticalBlockView, blockStart() and rowStart() will thus be 0, rowEnd()
* will be size2() of the source VerticalBlockView, and the underlying matrix
* will be the size of the view of the source matrix.
*/
template<class RHSMATRIX>
VerticalBlockView<MATRIX>& assignNoalias(const VerticalBlockView<RHSMATRIX>& rhs);
template<class RHS>
VerticalBlockView<MATRIX>& assignNoalias(const RHS& rhs) {
copyStructureFrom(rhs);
boost::numeric::ublas::noalias(matrix_) = rhs.full();
return *this;
}
/** Swap the contents of the underlying matrix and the block information with
* another VerticalBlockView.
*/
void swap(VerticalBlockView<MATRIX>& other);
void swap(VerticalBlockView<MATRIX>& other) {
matrix_.swap(other.matrix_);
variableColOffsets_.swap(other.variableColOffsets_);
std::swap(rowStart_, other.rowStart_);
std::swap(rowEnd_, other.rowEnd_);
std::swap(blockStart_, other.blockStart_);
assertInvariants();
other.assertInvariants();
}
protected:
void assertInvariants() const {
@ -238,80 +324,254 @@ protected:
}
}
template<class RHSMATRIX>
friend class VerticalBlockView<MATRIX>;
template<class OTHER> friend class SymmetricBlockView;
template<class RELATED> friend class VerticalBlockView;
};
/* ************************************************************************* */
template<class MATRIX>
VerticalBlockView<MATRIX>::VerticalBlockView(FullMatrix& matrix) :
matrix_(matrix), rowStart_(0), rowEnd_(matrix_.size1()), blockStart_(0) {
fillOffsets((size_t*)0, (size_t*)0);
assertInvariants();
}
/* ************************************************************************* */
/**
* This class stores a *reference* to a matrix and allows it to be accessed as
* a collection of blocks. It also provides for accessing individual
* columns from those blocks. When constructed or resized, the caller must
* provide the dimensions of the blocks, as well as an underlying matrix
* storage object. This class will resize the underlying matrix such that it
* is consistent with the given block dimensions.
*
* This class uses a symmetric block structure. The underlying matrix does not
* necessarily need to be symmetric.
*
* This class also has a parameter that can be changed after construction to
* change the apparent matrix view. firstBlock() determines the block that
* appears to have index 0 for all operations (except re-setting firstBlock()).
*/
template<class MATRIX>
template<typename ITERATOR>
VerticalBlockView<MATRIX>::VerticalBlockView(FullMatrix& matrix, ITERATOR firstBlockDim, ITERATOR lastBlockDim) :
matrix_(matrix), rowStart_(0), rowEnd_(matrix_.size1()), blockStart_(0) {
fillOffsets(firstBlockDim, lastBlockDim);
assertInvariants();
}
class SymmetricBlockView {
public:
typedef MATRIX FullMatrix;
typedef typename boost::numeric::ublas::matrix_range<MATRIX> Block;
typedef typename boost::numeric::ublas::matrix_range<const MATRIX> constBlock;
typedef BlockColumn<MATRIX> Column;
typedef BlockColumn<const MATRIX> constColumn;
/* ************************************************************************* */
template<class MATRIX>
template<typename ITERATOR>
VerticalBlockView<MATRIX>::VerticalBlockView(FullMatrix& matrix, ITERATOR firstBlockDim, ITERATOR lastBlockDim, size_t matrixNewHeight) :
matrix_(matrix), rowStart_(0), rowEnd_(matrixNewHeight), blockStart_(0) {
fillOffsets(firstBlockDim, lastBlockDim);
matrix_.resize(matrixNewHeight, variableColOffsets_.back(), false);
assertInvariants();
}
protected:
FullMatrix& matrix_; // the reference to the full matrix
std::vector<size_t> variableColOffsets_; // the starting columns of each block (0-based)
/* ************************************************************************* */
template<class MATRIX>
template<class RHSMATRIX>
void VerticalBlockView<MATRIX>::copyStructureFrom(const VerticalBlockView<RHSMATRIX>& rhs) {
matrix_.resize(rhs.rowEnd() - rhs.rowStart(), rhs.range(0, rhs.nBlocks()).size2(), false);
if(rhs.blockStart_ == 0)
variableColOffsets_ = rhs.variableColOffsets_;
else {
variableColOffsets_.resize(rhs.nBlocks() + 1);
// Changes apparent matrix view, see main class comment.
size_t blockStart_; // 0 initially
public:
/** Construct from an empty matrix (asserts that the matrix is empty) */
SymmetricBlockView(FullMatrix& matrix) :
matrix_(matrix), blockStart_(0) {
fillOffsets((size_t*)0, (size_t*)0);
assertInvariants();
}
/** Construct from iterators over the sizes of each block */
template<typename ITERATOR>
SymmetricBlockView(FullMatrix& matrix, ITERATOR firstBlockDim, ITERATOR lastBlockDim) :
matrix_(matrix), blockStart_(0) {
fillOffsets(firstBlockDim, lastBlockDim);
assertInvariants();
}
/**
* Modify the size and structure of the underlying matrix and this block
* view. If 'preserve' is true, the underlying matrix data will be copied if
* the matrix size changes, otherwise the new data will be uninitialized.
*/
template<typename ITERATOR>
void resize(ITERATOR firstBlockDim, ITERATOR lastBlockDim, bool preserve) {
blockStart_ = 0;
fillOffsets(firstBlockDim, lastBlockDim);
matrix_.resize(variableColOffsets_.back(), variableColOffsets_.back(), preserve);
}
/** Row size
*/
size_t size1() const { assertInvariants(); return variableColOffsets_.back() - variableColOffsets_[blockStart_]; }
/** Column size
*/
size_t size2() const { return size1(); }
/** Block count
*/
size_t nBlocks() const { assertInvariants(); return variableColOffsets_.size() - 1 - blockStart_; }
Block operator()(size_t i_block, size_t j_block) {
return range(i_block, i_block+1, j_block, j_block+1);
}
constBlock operator()(size_t i_block, size_t j_block) const {
return range(i_block, i_block+1, j_block, j_block+1);
}
Block range(size_t i_startBlock, size_t i_endBlock, size_t j_startBlock, size_t j_endBlock) {
assertInvariants();
size_t i_actualStartBlock = i_startBlock + blockStart_;
size_t i_actualEndBlock = i_endBlock + blockStart_;
size_t j_actualStartBlock = j_startBlock + blockStart_;
size_t j_actualEndBlock = j_endBlock + blockStart_;
checkBlock(i_actualStartBlock);
checkBlock(j_actualStartBlock);
assert(i_actualEndBlock < variableColOffsets_.size());
assert(j_actualEndBlock < variableColOffsets_.size());
return Block(matrix_,
boost::numeric::ublas::range(variableColOffsets_[i_actualStartBlock], variableColOffsets_[i_actualEndBlock]),
boost::numeric::ublas::range(variableColOffsets_[j_actualStartBlock], variableColOffsets_[j_actualEndBlock]));
}
constBlock range(size_t i_startBlock, size_t i_endBlock, size_t j_startBlock, size_t j_endBlock) const {
assertInvariants();
size_t i_actualStartBlock = i_startBlock + blockStart_;
size_t i_actualEndBlock = i_endBlock + blockStart_;
size_t j_actualStartBlock = j_startBlock + blockStart_;
size_t j_actualEndBlock = j_endBlock + blockStart_;
checkBlock(i_actualStartBlock);
checkBlock(j_actualStartBlock);
assert(i_actualEndBlock < variableColOffsets_.size());
assert(j_actualEndBlock < variableColOffsets_.size());
return constBlock(matrix_,
boost::numeric::ublas::range(variableColOffsets_[i_actualStartBlock], variableColOffsets_[i_actualEndBlock]),
boost::numeric::ublas::range(variableColOffsets_[j_actualStartBlock], variableColOffsets_[j_actualEndBlock]));
}
Block full() {
return range(0,nBlocks(), 0,nBlocks());
}
constBlock full() const {
return range(0,nBlocks(), 0,nBlocks());
}
Column column(size_t i_block, size_t j_block, size_t columnOffset) {
assertInvariants();
size_t j_actualBlock = j_block + blockStart_;
assert(variableColOffsets_[j_actualBlock] + columnOffset < variableColOffsets_[j_actualBlock+1]);
Block blockMat(operator()(i_block, j_block));
return Column(blockMat, columnOffset);
}
constColumn column(size_t i_block, size_t j_block, size_t columnOffset) const {
assertInvariants();
size_t j_actualBlock = j_block + blockStart_;
assert(variableColOffsets_[j_actualBlock] + columnOffset < variableColOffsets_[j_actualBlock+1]);
constBlock blockMat(operator()(i_block, j_block));
return constColumn(blockMat, columnOffset);
}
Column rangeColumn(size_t i_startBlock, size_t i_endBlock, size_t j_block, size_t columnOffset) {
assertInvariants();
size_t j_actualBlock = j_block + blockStart_;
assert(variableColOffsets_[j_actualBlock] + columnOffset < variableColOffsets_[j_actualBlock+1]);
Block blockMat(this->range(i_startBlock, i_endBlock, j_block));
return Column(blockMat, columnOffset);
}
constColumn rangeColumn(size_t i_startBlock, size_t i_endBlock, size_t j_block, size_t columnOffset) const {
assertInvariants();
size_t j_actualBlock = j_block + blockStart_;
assert(variableColOffsets_[j_actualBlock] + columnOffset < variableColOffsets_[j_actualBlock+1]);
constBlock blockMat(this->range(i_startBlock, i_endBlock, j_block, j_block+1));
return constColumn(blockMat, columnOffset);
}
size_t offset(size_t block) const {
assertInvariants();
size_t actualBlock = block + blockStart_;
checkBlock(actualBlock);
return variableColOffsets_[actualBlock] - variableColOffsets_[blockStart_];
}
size_t& blockStart() { return blockStart_; }
size_t blockStart() const { return blockStart_; }
/** Copy the block structure and resize the underlying matrix, but do not
* copy the matrix data. If blockStart() has been modified, this copies the
* structure of the corresponding matrix view. In the destination
* SymmetricBlockView, startBlock() will thus be 0 and the underlying matrix
* will be the size of the view of the source matrix.
*/
template<class RHS>
void copyStructureFrom(const RHS& rhs) {
matrix_.resize(rhs.size2(), rhs.size2(), false);
if(rhs.blockStart_ == 0)
variableColOffsets_ = rhs.variableColOffsets_;
else {
variableColOffsets_.resize(rhs.nBlocks() + 1);
variableColOffsets_[0] = 0;
size_t j=0;
assert(rhs.variableColOffsets_.begin()+rhs.blockStart_ < rhs.variableColOffsets_.end()-1);
for(std::vector<size_t>::const_iterator off=rhs.variableColOffsets_.begin()+rhs.blockStart_; off!=rhs.variableColOffsets_.end()-1; ++off) {
variableColOffsets_[j+1] = variableColOffsets_[j] + (*(off+1) - *off);
++ j;
}
}
blockStart_ = 0;
assertInvariants();
}
/** Copy the block struture and matrix data, resizing the underlying matrix
* in the process. This can deal with assigning between different types of
* underlying matrices, as long as the matrices themselves are assignable.
* To avoid creating a temporary matrix this assumes no aliasing, i.e. that
* no part of the underlying matrices refer to the same memory!
*
* If blockStart() has been modified, this copies the structure of the
* corresponding matrix view. In the destination SymmetricBlockView,
* startBlock() will thus be 0 and the underlying matrix will be the size
* of the view of the source matrix.
*/
template<class RHSMATRIX>
SymmetricBlockView<MATRIX>& assignNoalias(const SymmetricBlockView<RHSMATRIX>& rhs) {
copyStructureFrom(rhs);
boost::numeric::ublas::noalias(matrix_) = rhs.range(0, rhs.nBlocks(), 0, rhs.nBlocks());
return *this;
}
/** Swap the contents of the underlying matrix and the block information with
* another VerticalBlockView.
*/
void swap(SymmetricBlockView<MATRIX>& other) {
matrix_.swap(other.matrix_);
variableColOffsets_.swap(other.variableColOffsets_);
std::swap(blockStart_, other.blockStart_);
assertInvariants();
other.assertInvariants();
}
protected:
void assertInvariants() const {
assert(matrix_.size1() == matrix_.size2());
assert(matrix_.size2() == variableColOffsets_.back());
assert(blockStart_ < variableColOffsets_.size());
}
void checkBlock(size_t block) const {
assert(matrix_.size1() == matrix_.size2());
assert(matrix_.size2() == variableColOffsets_.back());
assert(block < variableColOffsets_.size()-1);
assert(variableColOffsets_[block] < matrix_.size2() && variableColOffsets_[block+1] <= matrix_.size2());
}
template<typename ITERATOR>
void fillOffsets(ITERATOR firstBlockDim, ITERATOR lastBlockDim) {
variableColOffsets_.resize((lastBlockDim-firstBlockDim)+1);
variableColOffsets_[0] = 0;
size_t j=0;
assert(rhs.variableColOffsets_.begin()+rhs.blockStart_ < rhs.variableColOffsets_.end()-1);
for(std::vector<size_t>::const_iterator off=rhs.variableColOffsets_.begin()+rhs.blockStart_; off!=rhs.variableColOffsets_.end()-1; ++off) {
variableColOffsets_[j+1] = variableColOffsets_[j] + (*(off+1) - *off);
for(ITERATOR dim=firstBlockDim; dim!=lastBlockDim; ++dim) {
variableColOffsets_[j+1] = variableColOffsets_[j] + *dim;
++ j;
}
}
rowStart_ = 0;
rowEnd_ = matrix_.size1();
blockStart_ = 0;
assertInvariants();
}
/* ************************************************************************* */
template<class MATRIX>
template<class RHSMATRIX>
VerticalBlockView<MATRIX>& VerticalBlockView<MATRIX>::assignNoalias(const VerticalBlockView<RHSMATRIX>& rhs) {
copyStructureFrom(rhs);
boost::numeric::ublas::noalias(matrix_) = rhs.range(0, rhs.nBlocks());
return *this;
}
/* ************************************************************************* */
template<class MATRIX>
void VerticalBlockView<MATRIX>::swap(VerticalBlockView<MATRIX>& other) {
matrix_.swap(other.matrix_);
variableColOffsets_.swap(other.variableColOffsets_);
std::swap(rowStart_, other.rowStart_);
std::swap(rowEnd_, other.rowEnd_);
std::swap(blockStart_, other.blockStart_);
assertInvariants();
other.assertInvariants();
}
template<class RELATED> friend class SymmetricBlockView;
template<class OTHER> friend class VerticalBlockView;
};
}

View File

@ -18,6 +18,7 @@
#include <gtsam/base/cholesky.h>
#include <gtsam/base/lapack.h>
#include <gtsam/base/timing.h>
#include <boost/numeric/ublas/matrix_proxy.hpp>
#include <boost/numeric/ublas/blas.hpp>
@ -225,7 +226,50 @@ size_t choleskyCareful(MatrixColMajor& ATA) {
}
return maxrank;
}
/* ************************************************************************* */
void choleskyPartial(MatrixColMajor& ABC, size_t nFrontal) {
static const bool debug = false;
assert(ABC.size1() == ABC.size2());
assert(nFrontal <= ABC.size1());
const size_t n = ABC.size1();
// Compute Cholesky factorization of A, overwrites A.
tic(1, "dpotrf");
int info = lapack_dpotrf('U', nFrontal, &ABC(0,0), n);
if(info != 0) {
if(info < 0)
throw std::domain_error(boost::str(boost::format(
"Bad input to choleskyFactorUnderdetermined, dpotrf returned %d.\n")%info));
else
throw std::domain_error(boost::str(boost::format(
"The matrix passed into choleskyFactorUnderdetermined is numerically rank-deficient, dpotrf returned rank=%d, expected rank was %d.\n")%(info-1)%nFrontal));
}
toc(1, "dpotrf");
// Views of R, S, and L submatrices.
ublas::matrix_range<MatrixColMajor> R(ublas::project(ABC, ublas::range(0,nFrontal), ublas::range(0,nFrontal)));
ublas::matrix_range<MatrixColMajor> S(ublas::project(ABC, ublas::range(0,nFrontal), ublas::range(nFrontal,n)));
ublas::matrix_range<MatrixColMajor> L(ublas::project(ABC, ublas::range(nFrontal,n), ublas::range(nFrontal,n)));
// Compute S = inv(R') * B
tic(2, "compute S");
if(S.size2() > 0)
cblas_dtrsm(CblasColMajor, CblasLeft, CblasUpper, CblasTrans, CblasNonUnit, S.size1(), S.size2(), 1.0, &R(0,0), n, &S(0,0), n);
if(debug) gtsam::print(S, "S: ");
toc(2, "compute S");
// Compute L = C - S' * S
tic(3, "compute L");
if(debug) gtsam::print(L, "C: ");
if(L.size2() > 0)
cblas_dsyrk(CblasColMajor, CblasUpper, CblasTrans, L.size1(), S.size1(), -1.0, &S(0,0), n, 1.0, &L(0,0), n);
if(debug) gtsam::print(L, "L: ");
toc(3, "compute L");
}
}

View File

@ -60,5 +60,15 @@ size_t choleskyFactorUnderdetermined(MatrixColMajor& Ab, size_t nFrontal);
*/
size_t choleskyCareful(MatrixColMajor& ATA);
/**
* Partial Cholesky computes a factor [R S such that [R' 0 [R S = [A B
* 0 L] S' I] 0 L] B' C].
* The input to this function is the matrix ABC = [A B], and the parameter
* [B' C]
* nFrontal determines the split between A, B, and C, with A being of size
* nFrontal x nFrontal.
*/
void choleskyPartial(MatrixColMajor& ABC, size_t nFrontal);
}

View File

@ -20,6 +20,8 @@
#include <CppUnitLite/TestHarness.h>
#include <boost/numeric/ublas/triangular.hpp>
#include <boost/numeric/ublas/matrix_proxy.hpp>
#include <boost/numeric/ublas/symmetric.hpp>
using namespace gtsam;
namespace ublas = boost::numeric::ublas;
@ -66,6 +68,37 @@ TEST(cholesky, choleskyFactorUnderdetermined) {
CHECK(assert_equal(expected, actual, 1e-3));
}
/* ************************************************************************* */
TEST(cholesky, choleskyPartial) {
// choleskyPartial should only use the upper triangle, so this represents a
// symmetric matrix.
Matrix ABC = Matrix_(7,7,
4.0375, 3.4584, 3.5735, 2.4815, 2.1471, 2.7400, 2.2063,
0., 4.7267, 3.8423, 2.3624, 2.8091, 2.9579, 2.5914,
0., 0., 5.1600, 2.0797, 3.4690, 3.2419, 2.9992,
0., 0., 0., 1.8786, 1.0535, 1.4250, 1.3347,
0., 0., 0., 0., 3.0788, 2.6283, 2.3791,
0., 0., 0., 0., 0., 2.9227, 2.4056,
0., 0., 0., 0., 0., 0., 2.5776);
// Do partial Cholesky on 3 frontal scalar variables
MatrixColMajor RSL(ABC);
choleskyPartial(RSL, 3);
// See the function comment for choleskyPartial, this decomposition should hold.
Matrix R1(ublas::trans(RSL));
Matrix R2(RSL);
ublas::project(R1, ublas::range(3,7), ublas::range(3,7)) = eye(4,4);
ublas::project(R2, ublas::range(3,7), ublas::range(3,7)) =
ublas::symmetric_adaptor<const ublas::matrix_range<Matrix>,ublas::upper>(
ublas::project(R2, ublas::range(3,7), ublas::range(3,7)));
Matrix actual(R1 * R2);
EXPECT(assert_equal(ublas::symmetric_adaptor<Matrix,ublas::upper>(ABC), actual, 1e-9));
}
/* ************************************************************************* */
int main() {
TestResult tr;

View File

@ -0,0 +1,62 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file timing.h
* @brief
* @author Richard Roberts (extracted from Michael Kaess' timing functions)
* @created Oct 5, 2010
*/
#include <gtsam/base/timing.h>
int main(int argc, char *argv[]) {
ticPush_("1", "top 1");
ticPush_("1", "sub 1");
tic_("sub sub a");
toc_("sub sub a");
ticPop_("1", "sub 1");
ticPush_("2", "sub 2");
tic_("sub sub b");
toc_("sub sub b");
ticPop_("2", "sub 2");
ticPop_("1", "top 1");
ticPush_("2", "top 2");
ticPush_("1", "sub 1");
tic_("sub sub a");
toc_("sub sub a");
ticPop_("1", "sub 1");
ticPush_("2", "sub 2");
tic_("sub sub b");
toc_("sub sub b");
ticPop_("2", "sub 2");
ticPop_("2", "top 2");
for(size_t i=0; i<1000000; ++i) {
ticPush_("3", "overhead");
ticPush_("1", "overhead");
ticPop_("1", "overhead");
ticPop_("3", "overhead");
}
for(size_t i=0; i<1000000; ++i) {
tic(1, "overhead a");
tic(1, "overhead b");
toc(1, "overhead b");
toc(1, "overhead a");
}
tictoc_print_();
return 0;
}

View File

@ -7,6 +7,7 @@
#pragma once
#include <gtsam/base/timing.h>
#include <gtsam/base/FastSet.h>
#include <gtsam/inference/EliminationTree.h>
#include <gtsam/inference/VariableSlots.h>
#include <gtsam/inference/FactorGraph-inl.h>
@ -30,7 +31,7 @@ EliminationTree<FACTOR>::eliminate_(Conditionals& conditionals) const {
if(debug) cout << "ETree: eliminating " << this->key_ << endl;
set<Index, std::less<Index>, boost::fast_pool_allocator<Index> > separator;
FastSet<Index> separator;
// Create the list of factors to be eliminated, initially empty, and reserve space
FactorGraph<FACTOR> factors;
@ -92,12 +93,10 @@ EliminationTree<FACTOR>::Create(const FactorGraph<DERIVEDFACTOR>& factorGraph, c
static const bool debug = false;
tic("ET 1: Create");
tic("ET 1.1: ComputeParents");
tic(1, "ET ComputeParents");
// Compute the tree structure
vector<Index> parents(ComputeParents(structure));
toc("ET 1.1: ComputeParents");
toc(1, "ET ComputeParents");
// Number of variables
const size_t n = structure.size();
@ -105,7 +104,7 @@ EliminationTree<FACTOR>::Create(const FactorGraph<DERIVEDFACTOR>& factorGraph, c
static const Index none = numeric_limits<Index>::max();
// Create tree structure
tic("ET 1.2: assemble tree");
tic(2, "assemble tree");
vector<shared_ptr> trees(n);
for (Index k = 1; k <= n; k++) {
Index j = n - k;
@ -113,10 +112,10 @@ EliminationTree<FACTOR>::Create(const FactorGraph<DERIVEDFACTOR>& factorGraph, c
if (parents[j] != none)
trees[parents[j]]->add(trees[j]);
}
toc("ET 1.2: assemble tree");
toc(2, "assemble tree");
// Hang factors in right places
tic("ET 1.3: hang factors");
tic(3, "hang factors");
BOOST_FOREACH(const typename DERIVEDFACTOR::shared_ptr& derivedFactor, factorGraph) {
// Here we static_cast to the factor type of this EliminationTree. This
// allows performing symbolic elimination on, for example, GaussianFactors.
@ -124,9 +123,7 @@ EliminationTree<FACTOR>::Create(const FactorGraph<DERIVEDFACTOR>& factorGraph, c
Index j = factor->front();
trees[j]->add(factor);
}
toc("ET 1.3: hang factors");
toc("ET 1: Create");
toc(3, "hang factors");
// Assert that all other nodes have parents, i.e. that this is not a forest.
#ifndef NDEBUG
@ -147,9 +144,9 @@ typename EliminationTree<FACTOR>::shared_ptr
EliminationTree<FACTOR>::Create(const FactorGraph<DERIVEDFACTOR>& factorGraph) {
// Build variable index
tic("ET 0: variable index");
tic(0, "ET Create, variable index");
const VariableIndex variableIndex(factorGraph);
toc("ET 0: variable index");
toc(0, "ET Create, variable index");
// Build elimination tree
return Create(factorGraph, variableIndex);
@ -185,24 +182,20 @@ template<class FACTOR>
typename EliminationTree<FACTOR>::BayesNet::shared_ptr
EliminationTree<FACTOR>::eliminate() const {
tic("ET 2: eliminate");
// call recursive routine
tic("ET 2.1: recursive eliminate");
tic(1, "ET recursive eliminate");
Conditionals conditionals(this->key_ + 1);
(void)eliminate_(conditionals);
toc("ET 2.1: recursive eliminate");
toc(1, "ET recursive eliminate");
// Add conditionals to BayesNet
tic("ET 2.1: assemble BayesNet");
tic(2, "assemble BayesNet");
typename BayesNet::shared_ptr bayesNet(new BayesNet);
BOOST_FOREACH(const typename BayesNet::sharedConditional& conditional, conditionals) {
if(conditional)
bayesNet->push_back(conditional);
}
toc("ET 2.1: assemble BayesNet");
toc("ET 2: eliminate");
toc(2, "assemble BayesNet");
return bayesNet;
}

View File

@ -101,7 +101,7 @@ public:
keys_[0] = key1; keys_[1] = key2; keys_[2] = key3; keys_[3] = key4; assertInvariants(); }
/** Construct n-way factor */
FactorBase(std::set<Key> keys) {
FactorBase(const std::set<Key>& keys) {
BOOST_FOREACH(const Key& key, keys)
keys_.push_back(key);
assertInvariants(); }
@ -124,7 +124,7 @@ public:
typename BayesNet<CONDITIONAL>::shared_ptr eliminate(size_t nrFrontals = 1);
/**
* Permutes the GaussianFactor, but for efficiency requires the permutation
* Permutes the factor, but for efficiency requires the permutation
* to already be inverted.
*/
void permuteWithInverse(const Permutation& inversePermutation);

View File

@ -85,7 +85,7 @@ namespace gtsam {
void print(const std::string& s = "FactorGraph") const;
/** Check equality */
bool equals(const FactorGraph& fg, double tol = 1e-9) const;
bool equals(const FactorGraph<FACTOR>& fg, double tol = 1e-9) const;
/** const cast to the underlying vector of factors */
operator const std::vector<sharedFactor>&() const { return factors_; }
@ -110,6 +110,39 @@ namespace gtsam {
/** return the number valid factors */
size_t nrFactors() const;
/** dynamic_cast the factor pointers down or up the class hierarchy */
template<class RELATED>
typename RELATED::shared_ptr dynamicCastFactors() const {
typename RELATED::shared_ptr ret(new RELATED);
ret->reserve(this->size());
BOOST_FOREACH(const sharedFactor& factor, *this) {
typename RELATED::Factor::shared_ptr castedFactor(boost::dynamic_pointer_cast<typename RELATED::Factor>(factor));
if(castedFactor)
ret->push_back(castedFactor);
else
throw std::invalid_argument("In FactorGraph<FACTOR>::dynamic_factor_cast(), dynamic_cast failed, meaning an invalid cast was requested.");
}
return ret;
}
/**
* dynamic_cast factor pointers if possible, otherwise convert with a
* constructor of the target type.
*/
template<class TARGET>
typename TARGET::shared_ptr convertCastFactors() const {
typename TARGET::shared_ptr ret(new TARGET);
ret->reserve(this->size());
BOOST_FOREACH(const sharedFactor& factor, *this) {
typename TARGET::Factor::shared_ptr castedFactor(boost::dynamic_pointer_cast<typename TARGET::Factor>(factor));
if(castedFactor)
ret->push_back(castedFactor);
else
ret->push_back(typename TARGET::Factor::shared_ptr(new typename TARGET::Factor(*factor)));
}
return ret;
}
/** ----------------- Modifying Factor Graphs ---------------------------- */
/** STL begin and end, so we can use BOOST_FOREACH */
@ -179,7 +212,7 @@ namespace gtsam {
FactorGraph<FACTOR>::FactorGraph(const BayesNet<CONDITIONAL>& bayesNet) {
factors_.reserve(bayesNet.size());
BOOST_FOREACH(const typename CONDITIONAL::shared_ptr& cond, bayesNet) {
this->push_back(sharedFactor(new FACTOR(*cond)));
this->push_back(cond->toFactor());
}
}

View File

@ -87,7 +87,7 @@ typename FactorGraph<FACTOR>::shared_ptr GenericSequentialSolver<FACTOR>::jointF
joint->reserve(js.size());
typename BayesNet<typename FACTOR::Conditional>::const_reverse_iterator conditional = bayesNet->rbegin();
for(size_t i = 0; i < js.size(); ++i) {
joint->push_back(typename FACTOR::shared_ptr(new FACTOR(**(conditional++)))); }
joint->push_back((*(conditional++))->toFactor()); }
// Undo the permutation on the eliminated joint marginal factors
BOOST_FOREACH(const typename FACTOR::shared_ptr& factor, *joint) {

View File

@ -23,8 +23,6 @@
namespace gtsam {
class IndexFactor;
class IndexConditional : public ConditionalBase<Index> {
public:
@ -62,6 +60,9 @@ public:
static shared_ptr FromRange(ITERATOR firstKey, ITERATOR lastKey, size_t nrFrontals) {
return Base::FromRange<This>(firstKey, lastKey, nrFrontals); }
/** Convert to a factor */
IndexFactor::shared_ptr toFactor() const { return IndexFactor::shared_ptr(new IndexFactor(*this)); }
};
}

View File

@ -18,6 +18,7 @@
#include <gtsam/inference/FactorBase-inl.h>
#include <gtsam/inference/IndexFactor.h>
#include <gtsam/inference/IndexConditional.h>
#include <gtsam/inference/VariableSlots.h>
using namespace std;

View File

@ -18,7 +18,6 @@
#pragma once
#include <gtsam/inference/IndexConditional.h>
#include <gtsam/inference/FactorBase.h>
namespace gtsam {
@ -60,7 +59,7 @@ public:
IndexFactor(Index j1, Index j2, Index j3, Index j4) : Base(j1, j2, j3, j4) {}
/** Construct n-way factor */
IndexFactor(std::set<Index> js) : Base(js) {}
IndexFactor(const std::set<Index>& js) : Base(js) {}
/**
* Combine and eliminate several factors.

View File

@ -39,19 +39,20 @@ namespace gtsam {
/* ************************************************************************* */
template <class FG>
void JunctionTree<FG>::construct(const FG& fg, const VariableIndex& variableIndex) {
tic("JT 1 constructor");
tic("JT 1.1 symbolic elimination");
SymbolicBayesNet::shared_ptr sbn = EliminationTree<IndexFactor>::Create(fg, variableIndex)->eliminate();
toc("JT 1.1 symbolic elimination");
tic("JT 1.2 symbolic BayesTree");
tic(1, "JT symbolic ET");
const typename EliminationTree<IndexFactor>::shared_ptr symETree(EliminationTree<IndexFactor>::Create(fg, variableIndex));
toc(1, "JT symbolic ET");
tic(2, "JT symbolic eliminate");
SymbolicBayesNet::shared_ptr sbn = symETree->eliminate();
toc(2, "JT symbolic eliminate");
tic(3, "symbolic BayesTree");
SymbolicBayesTree sbt(*sbn);
toc("JT 1.2 symbolic BayesTree");
toc(3, "symbolic BayesTree");
// distribute factors
tic("JT 1.3 distributeFactors");
tic(4, "distributeFactors");
this->root_ = distributeFactors(fg, sbt.root());
toc("JT 1.3 distributeFactors");
toc("JT 1 constructor");
toc(4, "distributeFactors");
}
/* ************************************************************************* */
@ -95,7 +96,7 @@ namespace gtsam {
// Now add each factor to the list corresponding to its lowest-ordered
// variable.
vector<list<size_t, boost::fast_pool_allocator<size_t> > > targets(maxVar+1);
vector<FastList<size_t> > targets(maxVar+1);
for(size_t i=0; i<lowestOrdered.size(); ++i)
if(lowestOrdered[i] != numeric_limits<Index>::max())
targets[lowestOrdered[i]].push_back(i);
@ -107,7 +108,7 @@ namespace gtsam {
/* ************************************************************************* */
template<class FG>
typename JunctionTree<FG>::sharedClique JunctionTree<FG>::distributeFactors(const FG& fg,
const std::vector<std::list<size_t,boost::fast_pool_allocator<size_t> > >& targets,
const std::vector<FastList<size_t> >& targets,
const SymbolicBayesTree::sharedClique& bayesClique) {
if(bayesClique) {
@ -165,22 +166,23 @@ namespace gtsam {
// Now that we know which factors and variables, and where variables
// come from and go to, create and eliminate the new joint factor.
tic("JT 2.2 CombineAndEliminate");
tic(2, "CombineAndEliminate");
pair<typename BayesNet<typename FG::Factor::Conditional>::shared_ptr, typename FG::sharedFactor> eliminated(
FG::Factor::CombineAndEliminate(fg, current->frontal.size()));
toc("JT 2.2 CombineAndEliminate");
toc(2, "CombineAndEliminate");
assert(std::equal(eliminated.second->begin(), eliminated.second->end(), current->separator.begin()));
tic("JT 2.4 Update tree");
tic(3, "Update tree");
// create a new clique corresponding the combined factors
typename BayesTree::sharedClique new_clique(new typename BayesTree::Clique(*eliminated.first));
new_clique->children_ = children;
BOOST_FOREACH(typename BayesTree::sharedClique& childRoot, children)
childRoot->parent_ = new_clique;
BOOST_FOREACH(typename BayesTree::sharedClique& childRoot, children) {
childRoot->parent_ = new_clique;
}
toc(3, "Update tree");
toc("JT 2.4 Update tree");
return make_pair(new_clique, eliminated.second);
}
@ -188,11 +190,9 @@ namespace gtsam {
template <class FG>
typename JunctionTree<FG>::BayesTree::sharedClique JunctionTree<FG>::eliminate() const {
if(this->root()) {
tic("JT 2 eliminate");
pair<typename BayesTree::sharedClique, typename FG::sharedFactor> ret = this->eliminateOneClique(this->root());
if (ret.second->size() != 0)
throw runtime_error("JuntionTree::eliminate: elimination failed because of factors left over!");
toc("JT 2 eliminate");
return ret.first;
} else
return typename BayesTree::sharedClique();

View File

@ -23,7 +23,7 @@
#include <vector>
#include <list>
#include <boost/shared_ptr.hpp>
#include <boost/pool/pool_alloc.hpp>
#include <gtsam/base/FastList.h>
#include <gtsam/inference/BayesTree.h>
#include <gtsam/inference/ClusterTree.h>
#include <gtsam/inference/IndexConditional.h>
@ -59,7 +59,7 @@ namespace gtsam {
const SymbolicBayesTree::sharedClique& clique);
// distribute the factors along the cluster tree
sharedClique distributeFactors(const FG& fg, const std::vector<std::list<size_t,boost::fast_pool_allocator<size_t> > >& targets,
sharedClique distributeFactors(const FG& fg, const std::vector<FastList<size_t> >& targets,
const SymbolicBayesTree::sharedClique& clique);
// recursive elimination function

View File

@ -27,6 +27,7 @@ using namespace boost::assign;
#define GTSAM_MAGIC_KEY
#include <gtsam/inference/SymbolicFactorGraph.h>
#include <gtsam/inference/FactorGraph-inl.h>
using namespace std;
using namespace gtsam;
@ -92,6 +93,11 @@ typedef boost::shared_ptr<SymbolicFactorGraph> shared;
// CHECK(singletonGraph_excepted.equals(singletonGraph));
//}
TEST(FactorGraph, dynamic_factor_cast) {
FactorGraph<IndexFactor> fg;
fg.dynamicCastFactors<FactorGraph<IndexFactor> >();
}
/* ************************************************************************* */
int main() {

View File

@ -22,6 +22,8 @@
#include <boost/lambda/bind.hpp>
#include <gtsam/linear/GaussianConditional.h>
#include <gtsam/linear/GaussianFactor.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/base/Matrix-inl.h>
using namespace std;
@ -139,7 +141,10 @@ bool GaussianConditional::equals(const GaussianConditional &c, double tol) const
return true;
}
/* ************************************************************************* */
GaussianFactor::shared_ptr GaussianConditional::toFactor() const {
return GaussianFactor::shared_ptr(new JacobianFactor(*this));
}
/* ************************************************************************* */
Vector GaussianConditional::solve(const VectorValues& x) const {

View File

@ -118,6 +118,12 @@ public:
rsd_type::constBlock get_S(const_iterator variable) const { return rsd_(variable - this->begin()); }
const Vector& get_sigmas() const {return sigmas_;}
/**
* Copy to a Factor (this creates a JacobianFactor and returns it as its
* base class GaussianFactor.
*/
boost::shared_ptr<GaussianFactor> toFactor() const;
/**
* solve a conditional Gaussian
* @param x values structure in which the parents values (y,z,...) are known
@ -137,7 +143,7 @@ protected:
rsd_type::Block get_R_() { return rsd_(0); }
rsd_type::Block get_S_(iterator variable) { return rsd_(variable - this->begin()); }
friend class GaussianFactor;
friend class JacobianFactor;
private:
// /** Serialization function */

File diff suppressed because it is too large Load Diff

View File

@ -13,42 +13,24 @@
* @file GaussianFactor.h
* @brief Linear Factor....A Gaussian
* @brief linearFactor
* @author Christian Potthast
* @author Richard Roberts, Christian Potthast
*/
// \callgraph
#pragma once
#include <boost/shared_ptr.hpp>
#include <boost/tuple/tuple.hpp>
#include <boost/foreach.hpp>
#include <boost/lambda/lambda.hpp>
#include <boost/bind.hpp>
#include <boost/numeric/ublas/matrix_proxy.hpp>
#include <boost/pool/pool_alloc.hpp>
#include <list>
#include <set>
#include <vector>
#include <map>
#include <deque>
#include <gtsam/base/types.h>
#include <gtsam/base/Matrix.h>
#include <gtsam/base/blockMatrices.h>
#include <gtsam/inference/IndexFactor.h>
#include <gtsam/inference/inference.h>
#include <gtsam/inference/VariableSlots.h>
#include <gtsam/inference/FactorGraph.h>
#include <gtsam/linear/VectorValues.h>
#include <gtsam/linear/SharedDiagonal.h>
#include <gtsam/linear/GaussianConditional.h>
#include <gtsam/linear/GaussianBayesNet.h>
#include <gtsam/linear/Errors.h>
#include <string>
#include <utility>
namespace gtsam {
/** A map from key to dimension, useful in various contexts */
typedef std::map<Index, size_t> Dimensions;
class VectorValues;
class Permutation;
/**
* Base Class for a linear factor.
@ -59,201 +41,92 @@ namespace gtsam {
protected:
typedef boost::numeric::ublas::matrix<double, boost::numeric::ublas::column_major> AbMatrix;
typedef VerticalBlockView<AbMatrix> BlockAb;
public:
typedef GaussianConditional Conditional;
typedef boost::shared_ptr<GaussianFactor> shared_ptr;
typedef BlockAb::Block ABlock;
typedef BlockAb::constBlock constABlock;
typedef BlockAb::Column BVector;
typedef BlockAb::constColumn constBVector;
enum SolveMethod { SOLVE_QR, SOLVE_CHOLESKY };
protected:
SharedDiagonal model_; // Gaussian noise model with diagonal covariance matrix
std::vector<size_t> firstNonzeroBlocks_;
AbMatrix matrix_; // the full matrix corresponding to the factor
BlockAb Ab_; // the block view of the full matrix
public:
/** Copy constructor */
GaussianFactor(const GaussianFactor& gf);
GaussianFactor(const This& f) : IndexFactor(f) {}
/** default constructor for I/O */
GaussianFactor();
/** Construct from derived type */
GaussianFactor(const GaussianConditional& c) : IndexFactor(c) {}
/** Construct Null factor */
GaussianFactor(const Vector& b_in);
/** Constructor from a collection of keys */
template<class KeyIterator> GaussianFactor(KeyIterator beginKey, KeyIterator endKey) :
Base(beginKey, endKey) {}
/** Default constructor for I/O */
GaussianFactor() {}
/** Construct unary factor */
GaussianFactor(Index i1, const Matrix& A1,
const Vector& b, const SharedDiagonal& model);
GaussianFactor(Index j) : IndexFactor(j) {}
/** Construct binary factor */
GaussianFactor(Index i1, const Matrix& A1,
Index i2, const Matrix& A2,
const Vector& b, const SharedDiagonal& model);
GaussianFactor(Index j1, Index j2) : IndexFactor(j1, j2) {}
/** Construct ternary factor */
GaussianFactor(Index i1, const Matrix& A1, Index i2,
const Matrix& A2, Index i3, const Matrix& A3,
const Vector& b, const SharedDiagonal& model);
GaussianFactor(Index j1, Index j2, Index j3) : IndexFactor(j1, j2, j3) {}
/** Construct an n-ary factor */
GaussianFactor(const std::vector<std::pair<Index, Matrix> > &terms,
const Vector &b, const SharedDiagonal& model);
/** Construct 4-way factor */
GaussianFactor(Index j1, Index j2, Index j3, Index j4) : IndexFactor(j1, j2, j3, j4) {}
GaussianFactor(const std::list<std::pair<Index, Matrix> > &terms,
const Vector &b, const SharedDiagonal& model);
/** Construct n-way factor */
GaussianFactor(const std::set<Index>& js) : IndexFactor(js) {}
/** Construct from Conditional Gaussian */
GaussianFactor(const GaussianConditional& cg);
public:
enum SolveMethod { SOLVE_QR, SOLVE_PREFER_CHOLESKY };
typedef GaussianConditional Conditional;
typedef boost::shared_ptr<GaussianFactor> shared_ptr;
// Implementing Testable interface
void print(const std::string& s = "") const;
bool equals(const GaussianFactor& lf, double tol = 1e-9) const;
virtual void print(const std::string& s = "") const = 0;
virtual bool equals(const GaussianFactor& lf, double tol = 1e-9) const = 0;
Vector unweighted_error(const VectorValues& c) const; /** (A*x-b) */
Vector error_vector(const VectorValues& c) const; /** (A*x-b)/sigma */
double error(const VectorValues& c) const; /** 0.5*(A*x-b)'*D*(A*x-b) */
virtual double error(const VectorValues& c) const = 0; /** 0.5*(A*x-b)'*D*(A*x-b) */
/** Check if the factor contains no information, i.e. zero rows. This does
* not necessarily mean that the factor involves no variables (to check for
* involving no variables use keys().empty()).
*/
bool empty() const { return Ab_.size1() == 0;}
/**
* return the number of rows in the corresponding linear system
*/
size_t size1() const { return Ab_.size1(); }
/**
* return the number of columns in the corresponding linear system
*/
size_t size2() const { return Ab_.size2(); }
/** Get a view of the r.h.s. vector b */
constBVector getb() const { return Ab_.column(size(), 0); }
/** Get a view of the A matrix for the variable pointed to be the given key iterator */
constABlock getA(const_iterator variable) const { return Ab_(variable - keys_.begin()); }
BVector getb() { return Ab_.column(size(), 0); }
ABlock getA(iterator variable) { return Ab_(variable - keys_.begin()); }
/** Return the dimension of the variable pointed to by the given key iterator
* todo: Remove this in favor of keeping track of dimensions with variables?
*/
size_t getDim(const_iterator variable) const { return Ab_(variable - keys_.begin()).size2(); }
/** Return the dimension of the variable pointed to by the given key iterator */
virtual size_t getDim(const_iterator variable) const = 0;
/**
* Permutes the GaussianFactor, but for efficiency requires the permutation
* to already be inverted. This acts just as a change-of-name for each
* variable. The order of the variables within the factor is not changed.
*/
void permuteWithInverse(const Permutation& inversePermutation);
/**
* Whiten the matrix and r.h.s. so that the noise model is unit diagonal.
* This throws an exception if the noise model cannot whiten, e.g. if it is
* constrained.
*/
GaussianFactor whiten() const;
/**
* Named constructor for combining a set of factors with pre-computed set of variables.
*/
static shared_ptr Combine(const FactorGraph<GaussianFactor>& factors, const VariableSlots& variableSlots);
virtual void permuteWithInverse(const Permutation& inversePermutation) = 0;
/**
* Combine and eliminate several factors.
*/
static std::pair<GaussianBayesNet::shared_ptr, shared_ptr> CombineAndEliminate(
const FactorGraph<GaussianFactor>& factors, size_t nrFrontals=1, SolveMethod solveMethod = SOLVE_QR);
protected:
/** Internal debug check to make sure variables are sorted */
void assertInvariants() const;
/** Internal helper function to extract conditionals from a factor that was
* just numerically eliminated.
*/
GaussianBayesNet::shared_ptr splitEliminatedFactor(size_t nrFrontals, const std::vector<Index>& keys);
public:
/** access the sigmas */
const Vector& get_sigmas() const { return model_->sigmas(); }
/** access the noise model */
const SharedDiagonal& get_model() const { return model_; }
/** access the noise model (non-const version) */
SharedDiagonal& get_model() { return model_; }
/** get the indices list */
const std::vector<size_t>& get_firstNonzeroBlocks() const { return firstNonzeroBlocks_; }
/** whether the noise model of this factor is constrained (i.e. contains any sigmas of 0.0) */
bool isConstrained() const {return model_->isConstrained();}
/**
* return the number of rows from the b vector
* @return a integer with the number of rows from the b vector
*/
size_t numberOfRows() const { return Ab_.size1(); }
/** Return A*x */
Vector operator*(const VectorValues& x) const;
/** x += A'*e */
void transposeMultiplyAdd(double alpha, const Vector& e, VectorValues& x) const;
/**
* Return (dense) matrix associated with factor
* @param ordering of variables needed for matrix column order
* @param set weight to true to bake in the weights
*/
std::pair<Matrix, Vector> matrix(bool weight = true) const;
/**
* Return (dense) matrix associated with factor
* The returned system is an augmented matrix: [A b]
* @param ordering of variables needed for matrix column order
* @param set weight to use whitening to bake in weights
*/
Matrix matrix_augmented(bool weight = true) const;
/**
* Return vectors i, j, and s to generate an m-by-n sparse matrix
* such that S(i(k),j(k)) = s(k), which can be given to MATLAB's sparse.
* As above, the standard deviations are baked into A and b
* @param first column index for each variable
*/
boost::tuple<std::list<int>, std::list<int>, std::list<double> >
sparse(const Dimensions& columnIndices) const;
/* ************************************************************************* */
// MUTABLE functions. FD:on the path to being eradicated
/* ************************************************************************* */
GaussianConditional::shared_ptr eliminateFirst(SolveMethod solveMethod = SOLVE_QR);
GaussianBayesNet::shared_ptr eliminate(size_t nrFrontals = 1, SolveMethod solveMethod = SOLVE_QR);
void set_firstNonzeroBlocks(size_t row, size_t varpos) { firstNonzeroBlocks_[row] = varpos; }
const FactorGraph<GaussianFactor>& factors, size_t nrFrontals=1, SolveMethod solveMethod=SOLVE_PREFER_CHOLESKY);
}; // GaussianFactor
/** unnormalized error */
template<class FACTOR>
double gaussianError(const FactorGraph<FACTOR>& fg, const VectorValues& x) {
double total_error = 0.;
BOOST_FOREACH(const typename FACTOR::shared_ptr& factor, fg) {
total_error += factor->error(x);
}
return total_error;
}
/** return A*x-b */
template<class FACTOR>
Errors gaussianErrors(const FactorGraph<FACTOR>& fg, const VectorValues& x) {
return *gaussianErrors_(fg, x);
}
/** shared pointer version */
template<class FACTOR>
boost::shared_ptr<Errors> gaussianErrors_(const FactorGraph<FACTOR>& fg, const VectorValues& x) {
boost::shared_ptr<Errors> e(new Errors);
BOOST_FOREACH(const typename FACTOR::shared_ptr& factor, fg) {
e->push_back(factor->error_vector(x));
}
return e;
}
} // namespace gtsam

View File

@ -60,69 +60,6 @@ namespace gtsam {
}
}
/* ************************************************************************* */
double GaussianFactorGraph::error(const VectorValues& x) const {
double total_error = 0.;
BOOST_FOREACH(sharedFactor factor,factors_)
total_error += factor->error(x);
return total_error;
}
/* ************************************************************************* */
Errors GaussianFactorGraph::errors(const VectorValues& x) const {
return *errors_(x);
}
/* ************************************************************************* */
boost::shared_ptr<Errors> GaussianFactorGraph::errors_(const VectorValues& x) const {
boost::shared_ptr<Errors> e(new Errors);
BOOST_FOREACH(const sharedFactor& factor,factors_)
e->push_back(factor->error_vector(x));
return e;
}
/* ************************************************************************* */
Errors GaussianFactorGraph::operator*(const VectorValues& x) const {
Errors e;
BOOST_FOREACH(const sharedFactor& Ai,factors_)
e.push_back((*Ai)*x);
return e;
}
/* ************************************************************************* */
void GaussianFactorGraph::multiplyInPlace(const VectorValues& x, Errors& e) const {
multiplyInPlace(x,e.begin());
}
/* ************************************************************************* */
void GaussianFactorGraph::multiplyInPlace(const VectorValues& x,
const Errors::iterator& e) const {
Errors::iterator ei = e;
BOOST_FOREACH(const sharedFactor& Ai,factors_) {
*ei = (*Ai)*x;
ei++;
}
}
/* ************************************************************************* */
// x += alpha*A'*e
void GaussianFactorGraph::transposeMultiplyAdd(double alpha, const Errors& e,
VectorValues& x) const {
// For each factor add the gradient contribution
Errors::const_iterator ei = e.begin();
BOOST_FOREACH(const sharedFactor& Ai,factors_)
Ai->transposeMultiplyAdd(alpha,*(ei++),x);
}
/* ************************************************************************* */
VectorValues GaussianFactorGraph::gradient(const VectorValues& x) const {
// It is crucial for performance to make a zero-valued clone of x
VectorValues g = VectorValues::zero(x);
transposeMultiplyAdd(1.0, errors(x), g);
return g;
}
/* ************************************************************************* */
void GaussianFactorGraph::combine(const GaussianFactorGraph &lfg){
for(const_iterator factor=lfg.factors_.begin(); factor!=lfg.factors_.end(); factor++){
@ -145,60 +82,29 @@ namespace gtsam {
return fg;
}
void GaussianFactorGraph::residual(const VectorValues &x, VectorValues &r) const {
getb(r) ;
VectorValues Ax = VectorValues::SameStructure(r) ;
multiply(x,Ax) ;
axpy(-1.0,Ax,r) ;
}
void GaussianFactorGraph::multiply(const VectorValues &x, VectorValues &r) const {
r.makeZero() ;
Index i = 0 ;
BOOST_FOREACH(const sharedFactor& factor, factors_) {
for(GaussianFactor::const_iterator j = factor->begin(); j != factor->end(); ++j) {
r[i] += prod(factor->getA(j), x[*j]);
}
++i ;
}
}
void GaussianFactorGraph::transposeMultiply(const VectorValues &r, VectorValues &x) const {
x.makeZero() ;
Index i = 0 ;
BOOST_FOREACH(const sharedFactor& factor, factors_) {
for(GaussianFactor::const_iterator j = factor->begin(); j != factor->end(); ++j) {
x[*j] += prod(trans(factor->getA(j)), r[i]) ;
}
++i ;
}
}
VectorValues GaussianFactorGraph::allocateVectorValuesb() const {
std::vector<size_t> dimensions(size()) ;
Index i = 0 ;
BOOST_FOREACH( const sharedFactor& factor, factors_) {
dimensions[i] = factor->numberOfRows() ;
i++;
}
return VectorValues(dimensions) ;
}
void GaussianFactorGraph::getb(VectorValues &b) const {
Index i = 0 ;
BOOST_FOREACH( const sharedFactor& factor, factors_) {
b[i] = factor->getb() ;
i++;
}
}
VectorValues GaussianFactorGraph::getb() const {
VectorValues b = allocateVectorValuesb() ;
getb(b) ;
return b ;
}
// VectorValues GaussianFactorGraph::allocateVectorValuesb() const {
// std::vector<size_t> dimensions(size()) ;
// Index i = 0 ;
// BOOST_FOREACH( const sharedFactor& factor, factors_) {
// dimensions[i] = factor->numberOfRows() ;
// i++;
// }
//
// return VectorValues(dimensions) ;
// }
//
// void GaussianFactorGraph::getb(VectorValues &b) const {
// Index i = 0 ;
// BOOST_FOREACH( const sharedFactor& factor, factors_) {
// b[i] = factor->getb();
// i++;
// }
// }
//
// VectorValues GaussianFactorGraph::getb() const {
// VectorValues b = allocateVectorValuesb() ;
// getb(b) ;
// return b ;
// }
} // namespace gtsam

View File

@ -24,7 +24,9 @@
#include <gtsam/inference/FactorGraph.h>
#include <gtsam/linear/Errors.h>
#include <gtsam/linear/GaussianFactor.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/linear/GaussianBayesNet.h>
#include <gtsam/linear/SharedDiagonal.h>
namespace gtsam {
@ -55,28 +57,22 @@ namespace gtsam {
push_back(fg);
}
/* dummy constructor, to be compatible with conjugate gradient solver */
template<class DERIVEDFACTOR>
GaussianFactorGraph(const FactorGraph<DERIVEDFACTOR>& fg, const VectorValues &x0) {
push_back(fg);
}
/** Add a null factor */
void add(const Vector& b) {
push_back(sharedFactor(new GaussianFactor(b)));
push_back(sharedFactor(new JacobianFactor(b)));
}
/** Add a unary factor */
void add(Index key1, const Matrix& A1,
const Vector& b, const SharedDiagonal& model) {
push_back(sharedFactor(new GaussianFactor(key1,A1,b,model)));
push_back(sharedFactor(new JacobianFactor(key1,A1,b,model)));
}
/** Add a binary factor */
void add(Index key1, const Matrix& A1,
Index key2, const Matrix& A2,
const Vector& b, const SharedDiagonal& model) {
push_back(sharedFactor(new GaussianFactor(key1,A1,key2,A2,b,model)));
push_back(sharedFactor(new JacobianFactor(key1,A1,key2,A2,b,model)));
}
/** Add a ternary factor */
@ -84,13 +80,13 @@ namespace gtsam {
Index key2, const Matrix& A2,
Index key3, const Matrix& A3,
const Vector& b, const SharedDiagonal& model) {
push_back(sharedFactor(new GaussianFactor(key1,A1,key2,A2,key3,A3,b,model)));
push_back(sharedFactor(new JacobianFactor(key1,A1,key2,A2,key3,A3,b,model)));
}
/** Add an n-ary factor */
void add(const std::vector<std::pair<Index, Matrix> > &terms,
const Vector &b, const SharedDiagonal& model) {
push_back(sharedFactor(new GaussianFactor(terms,b,model)));
push_back(sharedFactor(new JacobianFactor(terms,b,model)));
}
/**
@ -103,37 +99,9 @@ namespace gtsam {
/** Permute the variables in the factors */
void permuteWithInverse(const Permutation& inversePermutation);
/** return A*x-b */
Errors errors(const VectorValues& x) const;
/** shared pointer version */
boost::shared_ptr<Errors> errors_(const VectorValues& x) const;
/** unnormalized error */
double error(const VectorValues& x) const;
/** return A*x */
Errors operator*(const VectorValues& x) const;
/* In-place version e <- A*x that overwrites e. */
void multiplyInPlace(const VectorValues& x, Errors& e) const;
/* In-place version e <- A*x that takes an iterator. */
void multiplyInPlace(const VectorValues& x, const Errors::iterator& e) const;
/** x += alpha*A'*e */
void transposeMultiplyAdd(double alpha, const Errors& e, VectorValues& x) const;
/**
* Calculate Gradient of A^(A*x-b) for a given config
* @param x: VectorValues specifying where to calculate gradient
* @return gradient, as a VectorValues as well
*/
VectorValues gradient(const VectorValues& x) const;
/** Unnormalized probability. O(n) */
double probPrime(const VectorValues& c) const {
return exp(-0.5 * error(c));
return exp(-0.5 * gaussianError(*this, c));
}
/**
@ -151,17 +119,12 @@ namespace gtsam {
*/
void combine(const GaussianFactorGraph &lfg);
// matrix-vector operations
void residual(const VectorValues &x, VectorValues &r) const ;
void multiply(const VectorValues &x, VectorValues &r) const ;
void transposeMultiply(const VectorValues &r, VectorValues &x) const ;
// get b
void getb(VectorValues &b) const ;
VectorValues getb() const ;
// allocate a vectorvalues of b's structure
VectorValues allocateVectorValuesb() const ;
// void getb(VectorValues &b) const ;
// VectorValues getb() const ;
//
// // allocate a vectorvalues of b's structure
// VectorValues allocateVectorValuesb() const ;
};

View File

@ -65,22 +65,22 @@ namespace gtsam {
/* ************************************************************************* */
VectorValues GaussianJunctionTree::optimize() const {
tic("GJT optimize 1: eliminate");
tic(1, "GJT eliminate");
// eliminate from leaves to the root
boost::shared_ptr<const BayesTree::Clique> rootClique(this->eliminate());
toc("GJT optimize 1: eliminate");
toc(1, "GJT eliminate");
// Allocate solution vector
tic("GJT optimize 2: allocate VectorValues");
tic(2, "allocate VectorValues");
vector<size_t> dims(rootClique->back()->key() + 1, 0);
countDims(rootClique, dims);
VectorValues result(dims);
toc("GJT optimize 2: allocate VectorValues");
toc(2, "allocate VectorValues");
// back-substitution
tic("GJT optimize 3: back-substitute");
tic(3, "back-substitute");
btreeBackSubstitute(rootClique, result);
toc("GJT optimize 3: back-substitute");
toc(3, "back-substitute");
return result;
}

View File

@ -68,7 +68,9 @@ GaussianFactor::shared_ptr GaussianMultifrontalSolver::marginalFactor(Index j) c
/* ************************************************************************* */
std::pair<Vector, Matrix> GaussianMultifrontalSolver::marginalCovariance(Index j) const {
GaussianConditional::shared_ptr conditional = Base::marginalFactor(j)->eliminateFirst();
FactorGraph<GaussianFactor> fg;
fg.push_back(Base::marginalFactor(j));
GaussianConditional::shared_ptr conditional = GaussianFactor::CombineAndEliminate(fg,1).first->front();
Matrix R = conditional->get_R();
return make_pair(conditional->get_d(), inverse(ublas::prod(ublas::trans(R), R)));
}

View File

@ -81,7 +81,9 @@ GaussianFactor::shared_ptr GaussianSequentialSolver::marginalFactor(Index j) con
}
std::pair<Vector, Matrix> GaussianSequentialSolver::marginalCovariance(Index j) const {
GaussianConditional::shared_ptr conditional = Base::marginalFactor(j)->eliminateFirst();
FactorGraph<GaussianFactor> fg;
fg.push_back(Base::marginalFactor(j));
GaussianConditional::shared_ptr conditional = GaussianFactor::CombineAndEliminate(fg,1).first->front();
Matrix R = conditional->get_R();
return make_pair(conditional->get_d(), inverse(ublas::prod(ublas::trans(R), R)));
}

View File

@ -0,0 +1,407 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file HessianFactor.cpp
* @brief
* @author Richard Roberts
* @created Dec 8, 2010
*/
#include <gtsam/base/timing.h>
#include <gtsam/base/Matrix.h>
#include <gtsam/base/FastMap.h>
#include <gtsam/base/cholesky.h>
#include <gtsam/linear/GaussianConditional.h>
#include <gtsam/linear/HessianFactor.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/GaussianBayesNet.h>
#include <boost/foreach.hpp>
#include <boost/format.hpp>
#include <boost/make_shared.hpp>
#include <boost/lambda/bind.hpp>
#include <boost/lambda/lambda.hpp>
#include <boost/tuple/tuple.hpp>
#include <boost/numeric/ublas/triangular.hpp>
#include <boost/numeric/ublas/symmetric.hpp>
#include <boost/numeric/ublas/io.hpp>
#include <boost/numeric/ublas/matrix_proxy.hpp>
#include <boost/numeric/ublas/vector_proxy.hpp>
#include <boost/numeric/ublas/blas.hpp>
#include <sstream>
using namespace std;
namespace ublas = boost::numeric::ublas;
using namespace boost::lambda;
namespace gtsam {
/* ************************************************************************* */
HessianFactor::HessianFactor(const HessianFactor& gf) :
GaussianFactor(gf), info_(matrix_) {
info_.assignNoalias(gf.info_);
}
/* ************************************************************************* */
HessianFactor::HessianFactor() : info_(matrix_) {}
/* ************************************************************************* */
HessianFactor::HessianFactor(const Vector& b_in) : info_(matrix_) {
JacobianFactor jf(b_in);
ublas::noalias(matrix_) = ublas::prod(ublas::trans(jf.matrix_), jf.matrix_);
info_.copyStructureFrom(jf.Ab_);
}
/* ************************************************************************* */
HessianFactor::HessianFactor(Index i1, const Matrix& A1,
const Vector& b, const SharedDiagonal& model) :
GaussianFactor(i1), info_(matrix_) {
JacobianFactor jf(i1, A1, b, model);
ublas::noalias(matrix_) = ublas::prod(ublas::trans(jf.matrix_), jf.matrix_);
info_.copyStructureFrom(jf.Ab_);
}
/* ************************************************************************* */
HessianFactor::HessianFactor(Index i1, const Matrix& A1, Index i2, const Matrix& A2,
const Vector& b, const SharedDiagonal& model) :
GaussianFactor(i1,i2), info_(matrix_) {
JacobianFactor jf(i1, A1, i2, A2, b, model);
ublas::noalias(matrix_) = ublas::prod(ublas::trans(jf.matrix_), jf.matrix_);
info_.copyStructureFrom(jf.Ab_);
}
/* ************************************************************************* */
HessianFactor::HessianFactor(Index i1, const Matrix& A1, Index i2, const Matrix& A2,
Index i3, const Matrix& A3, const Vector& b, const SharedDiagonal& model) :
GaussianFactor(i1,i2,i3), info_(matrix_) {
JacobianFactor jf(i1, A1, i2, A2, i3, A3, b, model);
ublas::noalias(matrix_) = ublas::prod(ublas::trans(jf.matrix_), jf.matrix_);
info_.copyStructureFrom(jf.Ab_);
}
/* ************************************************************************* */
HessianFactor::HessianFactor(const std::vector<std::pair<Index, Matrix> > &terms,
const Vector &b, const SharedDiagonal& model) : info_(matrix_) {
JacobianFactor jf(terms, b, model);
keys_ = jf.keys_;
ublas::noalias(matrix_) = ublas::prod(ublas::trans(jf.matrix_), jf.matrix_);
info_.copyStructureFrom(jf.Ab_);
}
/* ************************************************************************* */
HessianFactor::HessianFactor(const std::list<std::pair<Index, Matrix> > &terms,
const Vector &b, const SharedDiagonal& model) : info_(matrix_) {
JacobianFactor jf(terms, b, model);
keys_ = jf.keys_;
ublas::noalias(matrix_) = ublas::prod(ublas::trans(jf.matrix_), jf.matrix_);
info_.copyStructureFrom(jf.Ab_);
}
/* ************************************************************************* */
HessianFactor::HessianFactor(const GaussianConditional& cg) : GaussianFactor(cg), info_(matrix_) {
JacobianFactor jf(cg);
ublas::noalias(ublas::symmetric_adaptor<MatrixColMajor,ublas::upper>(matrix_)) =
ublas::prod(ublas::trans(jf.matrix_), jf.matrix_);
info_.copyStructureFrom(jf.Ab_);
}
/* ************************************************************************* */
HessianFactor::HessianFactor(const GaussianFactor& gf) : info_(matrix_) {
// Copy the variable indices
(GaussianFactor&)(*this) = gf;
// Copy the matrix data depending on what type of factor we're copying from
if(dynamic_cast<const JacobianFactor*>(&gf)) {
const JacobianFactor& jf(static_cast<const JacobianFactor&>(gf));
JacobianFactor whitened(jf.whiten());
matrix_ = ublas::prod(ublas::trans(whitened.matrix_), whitened.matrix_);
info_.copyStructureFrom(whitened.Ab_);
} else if(dynamic_cast<const HessianFactor*>(&gf)) {
const HessianFactor& hf(static_cast<const HessianFactor&>(gf));
info_.assignNoalias(hf.info_);
} else
throw std::invalid_argument("In HessianFactor(const GaussianFactor& gf), gf is neither a JacobianFactor nor a HessianFactor");
}
/* ************************************************************************* */
void HessianFactor::print(const std::string& s) const {
cout << s << "\n";
cout << " keys: ";
for(const_iterator key=this->begin(); key!=this->end(); ++key)
cout << *key << "(" << this->getDim(key) << ") ";
cout << "\n";
gtsam::print(ublas::symmetric_adaptor<const constBlock,ublas::upper>(
info_.range(0,info_.nBlocks(), 0,info_.nBlocks())), "Ab^T * Ab: ");
}
/* ************************************************************************* */
bool HessianFactor::equals(const GaussianFactor& lf, double tol) const {
if(!dynamic_cast<const HessianFactor*>(&lf))
return false;
else {
MatrixColMajor thisMatrix = ublas::symmetric_adaptor<const MatrixColMajor,ublas::upper>(this->info_.full());
thisMatrix(thisMatrix.size1()-1, thisMatrix.size2()-1) = 0.0;
MatrixColMajor rhsMatrix = ublas::symmetric_adaptor<const MatrixColMajor,ublas::upper>(static_cast<const HessianFactor&>(lf).info_.full());
rhsMatrix(rhsMatrix.size1()-1, rhsMatrix.size2()-1) = 0.0;
return equal_with_abs_tol(thisMatrix, rhsMatrix, tol);
}
}
/* ************************************************************************* */
double HessianFactor::error(const VectorValues& c) const {
return ublas::inner_prod(c.vector(), ublas::prod(info_.range(0, this->size(), 0, this->size()), c.vector())) -
2.0*ublas::inner_prod(c.vector(), info_.rangeColumn(0, this->size(), this->size(), 0));
}
/* ************************************************************************* */
static FastMap<Index, SlotEntry> findScatterAndDims(const FactorGraph<HessianFactor>& factors) {
static const bool debug = false;
// The "scatter" is a map from global variable indices to slot indices in the
// union of involved variables. We also include the dimensionality of the
// variable.
Scatter scatter;
// First do the set union.
BOOST_FOREACH(const HessianFactor::shared_ptr& factor, factors) {
for(HessianFactor::const_iterator variable = factor->begin(); variable != factor->end(); ++variable) {
scatter.insert(make_pair(*variable, SlotEntry(0, factor->getDim(variable))));
}
}
// Next fill in the slot indices (we can only get these after doing the set
// union.
size_t slot = 0;
BOOST_FOREACH(Scatter::value_type& var_slot, scatter) {
var_slot.second.slot = (slot ++);
if(debug)
cout << "scatter[" << var_slot.first << "] = (slot " << var_slot.second.slot << ", dim " << var_slot.second.dimension << ")" << endl;
}
return scatter;
}
///* ************************************************************************* */
//static MatrixColMajor formAbTAb(const FactorGraph<HessianFactor>& factors, const Scatter& scatter) {
//
// static const bool debug = false;
//
// tic("CombineAndEliminate: 3.1 varStarts");
// // Determine scalar indices of each variable
// vector<size_t> varStarts;
// varStarts.reserve(scatter.size() + 2);
// varStarts.push_back(0);
// BOOST_FOREACH(const Scatter::value_type& var_slot, scatter) {
// varStarts.push_back(varStarts.back() + var_slot.second.dimension);
// }
// // This is for the r.h.s. vector
// varStarts.push_back(varStarts.back() + 1);
// toc("CombineAndEliminate: 3.1 varStarts");
//
// // Allocate and zero matrix for Ab' * Ab
// MatrixColMajor ATA(ublas::zero_matrix<double>(varStarts.back(), varStarts.back()));
//
// tic("CombineAndEliminate: 3.2 updates");
// // Do blockwise low-rank updates to Ab' * Ab for each factor. Here, we
// // only update the upper triangle because this is all that Cholesky uses.
// BOOST_FOREACH(const HessianFactor::shared_ptr& factor, factors) {
//
// // Whiten the factor first so it has a unit diagonal noise model
// HessianFactor whitenedFactor(factor->whiten());
//
// if(debug) whitenedFactor.print("whitened factor: ");
//
// for(HessianFactor::const_iterator var2 = whitenedFactor.begin(); var2 != whitenedFactor.end(); ++var2) {
// assert(scatter.find(*var2) != scatter.end());
// size_t vj = scatter.find(*var2)->second.slot;
// for(HessianFactor::const_iterator var1 = whitenedFactor.begin(); var1 <= var2; ++var1) {
// assert(scatter.find(*var1) != scatter.end());
// size_t vi = scatter.find(*var1)->second.slot;
// if(debug) cout << "Updating block " << vi << ", " << vj << endl;
// if(debug) cout << "Updating (" << varStarts[vi] << ":" << varStarts[vi+1] << ", " <<
// varStarts[vj] << ":" << varStarts[vj+1] << ") from A" << *var1 << "' * A" << *var2 << endl;
// ublas::noalias(ublas::project(ATA,
// ublas::range(varStarts[vi], varStarts[vi+1]), ublas::range(varStarts[vj], varStarts[vj+1]))) +=
// ublas::prod(ublas::trans(whitenedFactor.getA(var1)), whitenedFactor.getA(var2));
// }
// }
//
// // Update r.h.s. vector
// size_t vj = scatter.size();
// for(HessianFactor::const_iterator var1 = whitenedFactor.begin(); var1 < whitenedFactor.end(); ++var1) {
// assert(scatter.find(*var1) != scatter.end());
// size_t vi = scatter.find(*var1)->second.slot;
// if(debug) cout << "Updating block " << vi << ", " << vj << endl;
// if(debug) cout << "Updating (" << varStarts[vi] << ":" << varStarts[vi+1] << ", " <<
// varStarts[vj] << ":" << varStarts[vj+1] << ") from A" << *var1 << "' * b" << endl;
// ublas::matrix_column<MatrixColMajor> col(ATA, varStarts[vj]);
// ublas::noalias(ublas::subrange(col, varStarts[vi], varStarts[vi+1])) +=
// ublas::prod(ublas::trans(whitenedFactor.getA(var1)), whitenedFactor.getb());
// }
//
// size_t vi = scatter.size();
// if(debug) cout << "Updating block " << vi << ", " << vj << endl;
// if(debug) cout << "Updating (" << varStarts[vi] << ":" << varStarts[vi+1] << ", " <<
// varStarts[vj] << ":" << varStarts[vj+1] << ") from b" << "' * b" << endl;
// ublas::noalias(ATA(varStarts[vi], varStarts[vj])) += ublas::inner_prod(whitenedFactor.getb(), whitenedFactor.getb());
// }
// toc("CombineAndEliminate: 3.2 updates");
//
// return ATA;
//}
void HessianFactor::updateATA(const HessianFactor& update, const Scatter& scatter) {
// This function updates 'combined' with the information in 'update'.
// 'scatter' maps variables in the update factor to slots in the combined
// factor.
static const bool debug = false;
// First build an array of slots
tic(1, "slots");
size_t slots[update.size()];
size_t slot = 0;
BOOST_FOREACH(Index j, update) {
slots[slot] = scatter.find(j)->second.slot;
++ slot;
}
toc(1, "slots");
// Apply updates to the upper triangle
tic(2, "update");
assert(this->info_.nBlocks() - 1 == scatter.size());
for(size_t j2=0; j2<update.info_.nBlocks(); ++j2) {
size_t slot2 = (j2 == update.size()) ? this->info_.nBlocks()-1 : slots[j2];
for(size_t j1=0; j1<=j2; ++j1) {
size_t slot1 = (j1 == update.size()) ? this->info_.nBlocks()-1 : slots[j1];
if(debug)
cout << "Updating (" << slot1 << "," << slot2 << ") from (" << j1 << "," << j2 << ")" << endl;
ublas::noalias(this->info_(slot1, slot2)) += update.info_(j1,j2);
}
}
toc(2, "update");
}
GaussianBayesNet::shared_ptr HessianFactor::splitEliminatedFactor(size_t nrFrontals, const vector<Index>& keys) {
static const bool debug = false;
// Extract conditionals
tic(1, "extract conditionals");
GaussianBayesNet::shared_ptr conditionals(new GaussianBayesNet());
typedef VerticalBlockView<MatrixColMajor> BlockAb;
BlockAb Ab(matrix_, info_);
for(size_t j=0; j<nrFrontals; ++j) {
// Temporarily restrict the matrix view to the conditional blocks of the
// eliminated Ab_ matrix to create the GaussianConditional from it.
size_t varDim = Ab(0).size2();
Ab.rowEnd() = Ab.rowStart() + varDim;
// Zero the entries below the diagonal (this relies on the matrix being
// column-major).
{
tic(1, "zero");
BlockAb::Block remainingMatrix(Ab.range(0, Ab.nBlocks()));
if(remainingMatrix.size1() > 1)
for(size_t j = 0; j < remainingMatrix.size1() - 1; ++j)
memset(&remainingMatrix(j+1, j), 0, sizeof(remainingMatrix(0,0)) * (remainingMatrix.size1() - j - 1));
toc(1, "zero");
}
tic(2, "construct cond");
const ublas::scalar_vector<double> sigmas(varDim, 1.0);
conditionals->push_back(boost::make_shared<Conditional>(keys.begin()+j, keys.end(), 1, Ab, sigmas));
toc(2, "construct cond");
if(debug) conditionals->back()->print("Extracted conditional: ");
Ab.rowStart() += varDim;
Ab.firstBlock() += 1;
if(debug) cout << "rowStart = " << Ab.rowStart() << ", rowEnd = " << Ab.rowEnd() << endl;
}
toc(1, "extract conditionals");
// Take lower-right block of Ab_ to get the new factor
tic(2, "remaining factor");
info_.blockStart() = nrFrontals;
// Assign the keys
keys_.assign(keys.begin() + nrFrontals, keys.end());
toc(2, "remaining factor");
return conditionals;
}
/* ************************************************************************* */
pair<GaussianBayesNet::shared_ptr, HessianFactor::shared_ptr> HessianFactor::CombineAndEliminate(
const FactorGraph<HessianFactor>& factors, size_t nrFrontals) {
static const bool debug = false;
// Find the scatter and variable dimensions
tic(1, "find scatter");
Scatter scatter(findScatterAndDims(factors));
toc(1, "find scatter");
// Pull out keys and dimensions
tic(2, "keys");
vector<Index> keys(scatter.size());
vector<size_t> dimensions(scatter.size() + 1);
BOOST_FOREACH(const Scatter::value_type& var_slot, scatter) {
keys[var_slot.second.slot] = var_slot.first;
dimensions[var_slot.second.slot] = var_slot.second.dimension;
}
// This is for the r.h.s. vector
dimensions.back() = 1;
toc(2, "keys");
// Form Ab' * Ab
tic(3, "combine");
tic(1, "allocate");
HessianFactor::shared_ptr combinedFactor(new HessianFactor());
combinedFactor->info_.resize(dimensions.begin(), dimensions.end(), false);
toc(1, "allocate");
tic(2, "zero");
ublas::noalias(combinedFactor->matrix_) = ublas::zero_matrix<double>(combinedFactor->matrix_.size1(), combinedFactor->matrix_.size2());
toc(2, "zero");
tic(3, "update");
BOOST_FOREACH(const HessianFactor::shared_ptr& factor, factors) {
combinedFactor->updateATA(*factor, scatter);
}
toc(3, "update");
if(debug) gtsam::print(combinedFactor->matrix_, "Ab' * Ab: ");
toc(3, "combine");
// Do Cholesky, note that after this, the lower triangle still contains
// some untouched non-zeros that should be zero. We zero them while
// extracting submatrices next.
tic(4, "partial Cholesky");
choleskyPartial(combinedFactor->matrix_, combinedFactor->info_.offset(nrFrontals));
if(debug) gtsam::print(combinedFactor->matrix_, "chol(Ab' * Ab): ");
toc(4, "partial Cholesky");
// Extract conditionals and fill in details of the remaining factor
tic(5, "split");
GaussianBayesNet::shared_ptr conditionals(combinedFactor->splitEliminatedFactor(nrFrontals, keys));
if(debug) {
conditionals->print("Extracted conditionals: ");
combinedFactor->print("Eliminated factor (L piece): ");
}
toc(5, "split");
return make_pair(conditionals, combinedFactor);
}
}

View File

@ -0,0 +1,133 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file HessianFactor.h
* @brief
* @author Richard Roberts
* @created Dec 8, 2010
*/
#pragma once
#include <gtsam/base/blockMatrices.h>
#include <gtsam/linear/GaussianFactor.h>
// Forward declarations for friend unit tests
class ConversionConstructorHessianFactorTest;
namespace gtsam {
// Forward declarations
class JacobianFactor;
class SharedDiagonal;
// Definition of Scatter
struct SlotEntry {
size_t slot;
size_t dimension;
SlotEntry(size_t _slot, size_t _dimension) : slot(_slot), dimension(_dimension) {}
};
typedef FastMap<Index, SlotEntry> Scatter;
class HessianFactor : public GaussianFactor {
protected:
typedef MatrixColMajor InfoMatrix;
typedef SymmetricBlockView<InfoMatrix> BlockInfo;
InfoMatrix matrix_; // The full information matrix [A b]^T * [A b]
BlockInfo info_; // The block view of the full information matrix.
GaussianBayesNet::shared_ptr splitEliminatedFactor(size_t nrFrontals, const std::vector<Index>& keys);
void updateATA(const HessianFactor& update, const Scatter& scatter);
public:
typedef boost::shared_ptr<HessianFactor> shared_ptr;
typedef BlockInfo::Block Block;
typedef BlockInfo::constBlock constBlock;
/** Copy constructor */
HessianFactor(const HessianFactor& gf);
/** default constructor for I/O */
HessianFactor();
/** Construct Null factor */
HessianFactor(const Vector& b_in);
/** Construct unary factor */
HessianFactor(Index i1, const Matrix& A1,
const Vector& b, const SharedDiagonal& model);
/** Construct binary factor */
HessianFactor(Index i1, const Matrix& A1,
Index i2, const Matrix& A2,
const Vector& b, const SharedDiagonal& model);
/** Construct ternary factor */
HessianFactor(Index i1, const Matrix& A1, Index i2,
const Matrix& A2, Index i3, const Matrix& A3,
const Vector& b, const SharedDiagonal& model);
/** Construct an n-ary factor */
HessianFactor(const std::vector<std::pair<Index, Matrix> > &terms,
const Vector &b, const SharedDiagonal& model);
HessianFactor(const std::list<std::pair<Index, Matrix> > &terms,
const Vector &b, const SharedDiagonal& model);
/** Construct from Conditional Gaussian */
HessianFactor(const GaussianConditional& cg);
/** Convert from a JacobianFactor or HessianFactor (computes A^T * A) */
HessianFactor(const GaussianFactor& factor);
virtual ~HessianFactor() {}
// Implementing Testable interface
virtual void print(const std::string& s = "") const;
virtual bool equals(const GaussianFactor& lf, double tol = 1e-9) const;
virtual double error(const VectorValues& c) const; /** 0.5*(A*x-b)'*D*(A*x-b) */
/** Return the dimension of the variable pointed to by the given key iterator
* todo: Remove this in favor of keeping track of dimensions with variables?
*/
virtual size_t getDim(const_iterator variable) const { return info_(variable-this->begin(), 0).size1(); }
/** Return a view of a block of the information matrix */
constBlock info(const_iterator j1, const_iterator j2) const { return info_(j1-begin(), j2-begin()); }
/**
* Permutes the GaussianFactor, but for efficiency requires the permutation
* to already be inverted. This acts just as a change-of-name for each
* variable. The order of the variables within the factor is not changed.
*/
virtual void permuteWithInverse(const Permutation& inversePermutation) {
FactorBase<Index>::permuteWithInverse(inversePermutation); }
/**
* Combine and eliminate several factors.
*/
static std::pair<GaussianBayesNet::shared_ptr, shared_ptr> CombineAndEliminate(
const FactorGraph<HessianFactor>& factors, size_t nrFrontals=1);
// Friend unit test classes
friend class ::ConversionConstructorHessianFactorTest;
// Friend JacobianFactor for conversion
friend class JacobianFactor;
};
}

View File

@ -0,0 +1,733 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file JacobianFactor.cpp
* @brief
* @author Richard Roberts
* @created Dec 8, 2010
*/
#include <gtsam/base/timing.h>
#include <gtsam/base/Matrix.h>
#include <gtsam/base/FastMap.h>
#include <gtsam/base/cholesky.h>
#include <gtsam/inference/VariableSlots.h>
#include <gtsam/inference/FactorGraph-inl.h>
#include <gtsam/linear/GaussianConditional.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/linear/HessianFactor.h>
#include <gtsam/linear/GaussianFactorGraph.h>
#include <boost/foreach.hpp>
#include <boost/format.hpp>
#include <boost/make_shared.hpp>
#include <boost/lambda/bind.hpp>
#include <boost/lambda/lambda.hpp>
#include <boost/numeric/ublas/triangular.hpp>
#include <boost/numeric/ublas/io.hpp>
#include <boost/numeric/ublas/matrix_proxy.hpp>
#include <boost/numeric/ublas/vector_proxy.hpp>
#include <boost/numeric/ublas/blas.hpp>
#include <sstream>
#include <stdexcept>
using namespace std;
namespace ublas = boost::numeric::ublas;
using namespace boost::lambda;
namespace gtsam {
/* ************************************************************************* */
inline void JacobianFactor::assertInvariants() const {
#ifndef NDEBUG
IndexFactor::assertInvariants();
assert((keys_.size() == 0 && Ab_.size1() == 0 && Ab_.nBlocks() == 0) || keys_.size()+1 == Ab_.nBlocks());
assert(firstNonzeroBlocks_.size() == Ab_.size1());
for(size_t i=0; i<firstNonzeroBlocks_.size(); ++i)
assert(firstNonzeroBlocks_[i] < Ab_.nBlocks());
#endif
}
/* ************************************************************************* */
JacobianFactor::JacobianFactor(const JacobianFactor& gf) :
GaussianFactor(gf), model_(gf.model_), firstNonzeroBlocks_(gf.firstNonzeroBlocks_), Ab_(matrix_) {
Ab_.assignNoalias(gf.Ab_);
assertInvariants();
}
/* ************************************************************************* */
JacobianFactor::JacobianFactor() : Ab_(matrix_) { assertInvariants(); }
/* ************************************************************************* */
JacobianFactor::JacobianFactor(const Vector& b_in) : firstNonzeroBlocks_(b_in.size(), 0), Ab_(matrix_) {
size_t dims[] = { 1 };
Ab_.copyStructureFrom(BlockAb(matrix_, dims, dims+1, b_in.size()));
getb() = b_in;
assertInvariants();
}
/* ************************************************************************* */
JacobianFactor::JacobianFactor(Index i1, const Matrix& A1,
const Vector& b, const SharedDiagonal& model) :
GaussianFactor(i1), model_(model), firstNonzeroBlocks_(b.size(), 0), Ab_(matrix_) {
size_t dims[] = { A1.size2(), 1};
Ab_.copyStructureFrom(BlockAb(matrix_, dims, dims+2, b.size()));
Ab_(0) = A1;
getb() = b;
assertInvariants();
}
/* ************************************************************************* */
JacobianFactor::JacobianFactor(Index i1, const Matrix& A1, Index i2, const Matrix& A2,
const Vector& b, const SharedDiagonal& model) :
GaussianFactor(i1,i2), model_(model), firstNonzeroBlocks_(b.size(), 0), Ab_(matrix_) {
size_t dims[] = { A1.size2(), A2.size2(), 1};
Ab_.copyStructureFrom(BlockAb(matrix_, dims, dims+3, b.size()));
Ab_(0) = A1;
Ab_(1) = A2;
getb() = b;
assertInvariants();
}
/* ************************************************************************* */
JacobianFactor::JacobianFactor(Index i1, const Matrix& A1, Index i2, const Matrix& A2,
Index i3, const Matrix& A3, const Vector& b, const SharedDiagonal& model) :
GaussianFactor(i1,i2,i3), model_(model), firstNonzeroBlocks_(b.size(), 0), Ab_(matrix_) {
size_t dims[] = { A1.size2(), A2.size2(), A3.size2(), 1};
Ab_.copyStructureFrom(BlockAb(matrix_, dims, dims+4, b.size()));
Ab_(0) = A1;
Ab_(1) = A2;
Ab_(2) = A3;
getb() = b;
assertInvariants();
}
/* ************************************************************************* */
JacobianFactor::JacobianFactor(const std::vector<std::pair<Index, Matrix> > &terms,
const Vector &b, const SharedDiagonal& model) :
model_(model), firstNonzeroBlocks_(b.size(), 0), Ab_(matrix_) {
keys_.resize(terms.size());
size_t dims[terms.size()+1];
for(size_t j=0; j<terms.size(); ++j) {
keys_[j] = terms[j].first;
dims[j] = terms[j].second.size2();
}
dims[terms.size()] = 1;
Ab_.copyStructureFrom(BlockAb(matrix_, dims, dims+terms.size()+1, b.size()));
for(size_t j=0; j<terms.size(); ++j)
Ab_(j) = terms[j].second;
getb() = b;
assertInvariants();
}
/* ************************************************************************* */
JacobianFactor::JacobianFactor(const std::list<std::pair<Index, Matrix> > &terms,
const Vector &b, const SharedDiagonal& model) :
model_(model), firstNonzeroBlocks_(b.size(), 0), Ab_(matrix_) {
keys_.resize(terms.size());
size_t dims[terms.size()+1];
size_t j=0;
for(std::list<std::pair<Index, Matrix> >::const_iterator term=terms.begin(); term!=terms.end(); ++term) {
keys_[j] = term->first;
dims[j] = term->second.size2();
++ j;
}
dims[j] = 1;
firstNonzeroBlocks_.resize(b.size(), 0);
Ab_.copyStructureFrom(BlockAb(matrix_, dims, dims+terms.size()+1, b.size()));
j = 0;
for(std::list<std::pair<Index, Matrix> >::const_iterator term=terms.begin(); term!=terms.end(); ++term) {
Ab_(j) = term->second;
++ j;
}
getb() = b;
assertInvariants();
}
/* ************************************************************************* */
JacobianFactor::JacobianFactor(const GaussianConditional& cg) : GaussianFactor(cg), model_(noiseModel::Diagonal::Sigmas(cg.get_sigmas(), true)), Ab_(matrix_) {
Ab_.assignNoalias(cg.rsd_);
// todo SL: make firstNonzeroCols triangular?
firstNonzeroBlocks_.resize(cg.get_d().size(), 0); // set sigmas from precisions
assertInvariants();
}
/* ************************************************************************* */
JacobianFactor::JacobianFactor(const HessianFactor& factor) : Ab_(matrix_) {
keys_ = factor.keys_;
Ab_.assignNoalias(factor.info_);
size_t maxrank = choleskyCareful(matrix_);
Ab_.rowEnd() = maxrank;
model_ = noiseModel::Unit::Create(maxrank);
size_t varpos = 0;
firstNonzeroBlocks_.resize(this->size1());
for(size_t row=0; row<this->size1(); ++row) {
while(varpos < this->keys_.size() && Ab_.offset(varpos+1) <= row)
++ varpos;
firstNonzeroBlocks_[row] = varpos;
}
assertInvariants();
}
/* ************************************************************************* */
void JacobianFactor::print(const string& s) const {
cout << s << "\n";
if (empty()) {
cout << " empty, keys: ";
BOOST_FOREACH(const Index key, keys_) { cout << key << " "; }
cout << endl;
} else {
for(const_iterator key=begin(); key!=end(); ++key)
gtsam::print(getA(key), (boost::format("A[%1%]=\n")%*key).str());
gtsam::print(getb(),"b=");
model_->print("model");
}
}
/* ************************************************************************* */
// Check if two linear factors are equal
bool JacobianFactor::equals(const GaussianFactor& f_, double tol) const {
if(!dynamic_cast<const JacobianFactor*>(&f_))
return false;
else {
const JacobianFactor& f(static_cast<const JacobianFactor&>(f_));
if (empty()) return (f.empty());
if(keys_!=f.keys_ /*|| !model_->equals(lf->model_, tol)*/)
return false;
assert(Ab_.size1() == f.Ab_.size1() && Ab_.size2() == f.Ab_.size2());
constABlock Ab1(Ab_.range(0, Ab_.nBlocks()));
constABlock Ab2(f.Ab_.range(0, f.Ab_.nBlocks()));
for(size_t row=0; row<Ab1.size1(); ++row)
if(!equal_with_abs_tol(ublas::row(Ab1, row), ublas::row(Ab2, row), tol) &&
!equal_with_abs_tol(-ublas::row(Ab1, row), ublas::row(Ab2, row), tol))
return false;
return true;
}
}
/* ************************************************************************* */
void JacobianFactor::permuteWithInverse(const Permutation& inversePermutation) {
// Build a map from the new variable indices to the old slot positions.
typedef map<size_t, size_t, std::less<size_t>, boost::fast_pool_allocator<std::pair<const size_t, size_t> > > SourceSlots;
SourceSlots sourceSlots;
for(size_t j=0; j<keys_.size(); ++j)
sourceSlots.insert(make_pair(inversePermutation[keys_[j]], j));
// Build a vector of variable dimensions in the new order
vector<size_t> dimensions(keys_.size() + 1);
size_t j = 0;
BOOST_FOREACH(const SourceSlots::value_type& sourceSlot, sourceSlots) {
dimensions[j++] = Ab_(sourceSlot.second).size2();
}
assert(j == keys_.size());
dimensions.back() = 1;
// Copy the variables and matrix into the new order
vector<Index> oldKeys(keys_.size());
keys_.swap(oldKeys);
AbMatrix oldMatrix;
BlockAb oldAb(oldMatrix, dimensions.begin(), dimensions.end(), Ab_.size1());
Ab_.swap(oldAb);
j = 0;
BOOST_FOREACH(const SourceSlots::value_type& sourceSlot, sourceSlots) {
keys_[j] = sourceSlot.first;
ublas::noalias(Ab_(j++)) = oldAb(sourceSlot.second);
}
ublas::noalias(Ab_(j)) = oldAb(j);
// Since we're permuting the variables, ensure that entire rows from this
// factor are copied when Combine is called
BOOST_FOREACH(size_t& varpos, firstNonzeroBlocks_) { varpos = 0; }
assertInvariants();
}
/* ************************************************************************* */
Vector JacobianFactor::unweighted_error(const VectorValues& c) const {
Vector e = -getb();
if (empty()) return e;
for(size_t pos=0; pos<keys_.size(); ++pos)
e += ublas::prod(Ab_(pos), c[keys_[pos]]);
return e;
}
/* ************************************************************************* */
Vector JacobianFactor::error_vector(const VectorValues& c) const {
if (empty()) return model_->whiten(-getb());
return model_->whiten(unweighted_error(c));
}
/* ************************************************************************* */
double JacobianFactor::error(const VectorValues& c) const {
if (empty()) return 0;
Vector weighted = error_vector(c);
return 0.5 * inner_prod(weighted,weighted);
}
/* ************************************************************************* */
Vector JacobianFactor::operator*(const VectorValues& x) const {
Vector Ax = zero(Ab_.size1());
if (empty()) return Ax;
// Just iterate over all A matrices and multiply in correct config part
for(size_t pos=0; pos<keys_.size(); ++pos)
Ax += ublas::prod(Ab_(pos), x[keys_[pos]]);
return model_->whiten(Ax);
}
/* ************************************************************************* */
void JacobianFactor::transposeMultiplyAdd(double alpha, const Vector& e,
VectorValues& x) const {
Vector E = alpha * model_->whiten(e);
// Just iterate over all A matrices and insert Ai^e into VectorValues
for(size_t pos=0; pos<keys_.size(); ++pos)
gtsam::transposeMultiplyAdd(1.0, Ab_(pos), E, x[keys_[pos]]);
}
/* ************************************************************************* */
pair<Matrix,Vector> JacobianFactor::matrix(bool weight) const {
Matrix A(Ab_.range(0, keys_.size()));
Vector b(getb());
// divide in sigma so error is indeed 0.5*|Ax-b|
if (weight) model_->WhitenSystem(A,b);
return make_pair(A, b);
}
/* ************************************************************************* */
Matrix JacobianFactor::matrix_augmented(bool weight) const {
if (weight) { Matrix Ab(Ab_.range(0,Ab_.nBlocks())); model_->WhitenInPlace(Ab); return Ab; }
else return Ab_.range(0, Ab_.nBlocks());
}
/* ************************************************************************* */
boost::tuple<list<int>, list<int>, list<double> >
JacobianFactor::sparse(const map<Index,size_t>& columnIndices) const {
// declare return values
list<int> I,J;
list<double> S;
// iterate over all matrices in the factor
for(size_t pos=0; pos<keys_.size(); ++pos) {
constABlock A(Ab_(pos));
// find first column index for this key
int column_start = columnIndices.at(keys_[pos]);
for (size_t i = 0; i < A.size1(); i++) {
double sigma_i = model_->sigma(i);
for (size_t j = 0; j < A.size2(); j++)
if (A(i, j) != 0.0) {
I.push_back(i + 1);
J.push_back(j + column_start);
S.push_back(A(i, j) / sigma_i);
}
}
}
// return the result
return boost::tuple<list<int>, list<int>, list<double> >(I,J,S);
}
/* ************************************************************************* */
JacobianFactor JacobianFactor::whiten() const {
JacobianFactor result(*this);
result.model_->WhitenInPlace(result.matrix_);
result.model_ = noiseModel::Unit::Create(result.model_->dim());
return result;
}
/* ************************************************************************* */
GaussianConditional::shared_ptr JacobianFactor::eliminateFirst() {
return this->eliminate(1)->front();
}
/* ************************************************************************* */
GaussianBayesNet::shared_ptr JacobianFactor::eliminate(size_t nrFrontals) {
assert(Ab_.rowStart() == 0 && Ab_.rowEnd() == matrix_.size1() && Ab_.firstBlock() == 0);
assert(keys_.size() >= nrFrontals);
assertInvariants();
static const bool debug = false;
if(debug) cout << "Eliminating " << nrFrontals << " frontal variables" << endl;
if(debug) this->print("Eliminating JacobianFactor: ");
tic(1, "stairs");
// Translate the left-most nonzero column indices into top-most zero row indices
vector<int> firstZeroRows(Ab_.size2());
{
size_t lastNonzeroRow = 0;
vector<int>::iterator firstZeroRowsIt = firstZeroRows.begin();
for(size_t var=0; var<keys().size(); ++var) {
while(lastNonzeroRow < this->size1() && firstNonzeroBlocks_[lastNonzeroRow] <= var)
++ lastNonzeroRow;
fill(firstZeroRowsIt, firstZeroRowsIt+Ab_(var).size2(), lastNonzeroRow);
firstZeroRowsIt += Ab_(var).size2();
}
assert(firstZeroRowsIt+1 == firstZeroRows.end());
*firstZeroRowsIt = this->size1();
}
toc(1, "stairs");
#ifndef NDEBUG
for(size_t col=0; col<Ab_.size2(); ++col) {
if(debug) cout << "Staircase[" << col << "] = " << firstZeroRows[col] << endl;
if(col != 0) assert(firstZeroRows[col] >= firstZeroRows[col-1]);
assert(firstZeroRows[col] <= (long)this->size1());
}
#endif
if(debug) gtsam::print(matrix_, "Augmented Ab: ");
size_t frontalDim = Ab_.range(0,nrFrontals).size2();
if(debug) cout << "frontalDim = " << frontalDim << endl;
// Use in-place QR or Cholesky on dense Ab appropriate to NoiseModel
tic(2, "QR");
SharedDiagonal noiseModel = model_->QRColumnWise(matrix_, firstZeroRows);
toc(2, "QR");
// Zero the lower-left triangle. todo: not all of these entries actually
// need to be zeroed if we are careful to start copying rows after the last
// structural zero.
if(matrix_.size1() > 0) {
for(size_t j=0; j<matrix_.size2(); ++j)
for(size_t i=j+1; i<noiseModel->dim(); ++i)
matrix_(i,j) = 0.0;
}
if(debug) gtsam::print(matrix_, "QR result: ");
if(debug) noiseModel->print("QR result noise model: ");
// Check for singular factor
if(noiseModel->dim() < frontalDim) {
throw domain_error((boost::format(
"JacobianFactor is singular in variable %1%, discovered while attempting\n"
"to eliminate this variable.") % keys_.front()).str());
}
// Extract conditionals
tic(3, "cond Rd");
GaussianBayesNet::shared_ptr conditionals(new GaussianBayesNet());
for(size_t j=0; j<nrFrontals; ++j) {
// Temporarily restrict the matrix view to the conditional blocks of the
// eliminated Ab matrix to create the GaussianConditional from it.
size_t varDim = Ab_(0).size2();
Ab_.rowEnd() = Ab_.rowStart() + varDim;
const ublas::vector_range<const Vector> sigmas(noiseModel->sigmas(), ublas::range(Ab_.rowStart(), Ab_.rowEnd()));
conditionals->push_back(boost::make_shared<Conditional>(keys_.begin()+j, keys_.end(), 1, Ab_, sigmas));
if(debug) conditionals->back()->print("Extracted conditional: ");
Ab_.rowStart() += varDim;
Ab_.firstBlock() += 1;
}
toc(3, "cond Rd");
if(debug) conditionals->print("Extracted conditionals: ");
tic(4, "remaining factor");
// Take lower-right block of Ab to get the new factor
Ab_.rowEnd() = noiseModel->dim();
keys_.assign(keys_.begin() + nrFrontals, keys_.end());
// Set sigmas with the right model
if (noiseModel->isConstrained())
model_ = noiseModel::Constrained::MixedSigmas(sub(noiseModel->sigmas(), frontalDim, noiseModel->dim()));
else
model_ = noiseModel::Diagonal::Sigmas(sub(noiseModel->sigmas(), frontalDim, noiseModel->dim()));
if(debug) this->print("Eliminated factor: ");
assert(Ab_.size1() <= Ab_.size2()-1);
toc(4, "remaining factor");
// todo SL: deal with "dead" pivot columns!!!
tic(5, "rowstarts");
size_t varpos = 0;
firstNonzeroBlocks_.resize(this->size1());
for(size_t row=0; row<size1(); ++row) {
if(debug) cout << "row " << row << " varpos " << varpos << " Ab_.offset(varpos)=" << Ab_.offset(varpos) << " Ab_.offset(varpos+1)=" << Ab_.offset(varpos+1) << endl;
while(varpos < this->keys_.size() && Ab_.offset(varpos+1) <= row)
++ varpos;
firstNonzeroBlocks_[row] = varpos;
if(debug) cout << "firstNonzeroVars_[" << row << "] = " << firstNonzeroBlocks_[row] << endl;
}
toc(5, "rowstarts");
if(debug) print("Eliminated factor: ");
assertInvariants();
return conditionals;
}
/* ************************************************************************* */
/* Used internally by JacobianFactor::Combine for sorting */
struct _RowSource {
size_t firstNonzeroVar;
size_t factorI;
size_t factorRowI;
_RowSource(size_t _firstNonzeroVar, size_t _factorI, size_t _factorRowI) :
firstNonzeroVar(_firstNonzeroVar), factorI(_factorI), factorRowI(_factorRowI) {}
bool operator<(const _RowSource& o) const { return firstNonzeroVar < o.firstNonzeroVar; }
};
/* ************************************************************************* */
// Helper functions for Combine
static boost::tuple<vector<size_t>, size_t, size_t> countDims(const std::vector<JacobianFactor::shared_ptr>& factors, const VariableSlots& variableSlots) {
#ifndef NDEBUG
vector<size_t> varDims(variableSlots.size(), numeric_limits<size_t>::max());
#else
vector<size_t> varDims(variableSlots.size());
#endif
size_t m = 0;
size_t n = 0;
{
Index jointVarpos = 0;
BOOST_FOREACH(const VariableSlots::value_type& slots, variableSlots) {
assert(slots.second.size() == factors.size());
Index sourceFactorI = 0;
BOOST_FOREACH(const Index sourceVarpos, slots.second) {
if(sourceVarpos < numeric_limits<Index>::max()) {
const JacobianFactor& sourceFactor = *factors[sourceFactorI];
size_t vardim = sourceFactor.getDim(sourceFactor.begin() + sourceVarpos);
#ifndef NDEBUG
if(varDims[jointVarpos] == numeric_limits<size_t>::max()) {
varDims[jointVarpos] = vardim;
n += vardim;
} else
assert(varDims[jointVarpos] == vardim);
#else
varDims[jointVarpos] = vardim;
n += vardim;
break;
#endif
}
++ sourceFactorI;
}
++ jointVarpos;
}
BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, factors) {
m += factor->size1();
}
}
return boost::make_tuple(varDims, m, n);
}
/* ************************************************************************* */
JacobianFactor::shared_ptr JacobianFactor::Combine(const FactorGraph<JacobianFactor>& factors, const VariableSlots& variableSlots) {
static const bool debug = false;
if(debug) factors.print("Combining factors: ");
if(debug) variableSlots.print();
// Determine dimensions
tic(1, "countDims");
vector<size_t> varDims;
size_t m;
size_t n;
boost::tie(varDims, m, n) = countDims(factors, variableSlots);
if(debug) {
cout << "Dims: " << m << " x " << n << "\n";
BOOST_FOREACH(const size_t dim, varDims) { cout << dim << " "; }
cout << endl;
}
toc(1, "countDims");
// Sort rows
tic(2, "sort rows");
vector<_RowSource> rowSources; rowSources.reserve(m);
bool anyConstrained = false;
for(size_t sourceFactorI = 0; sourceFactorI < factors.size(); ++sourceFactorI) {
const JacobianFactor& sourceFactor(*factors[sourceFactorI]);
sourceFactor.assertInvariants();
for(size_t sourceFactorRow = 0; sourceFactorRow < sourceFactor.size1(); ++sourceFactorRow) {
Index firstNonzeroVar;
if(sourceFactor.firstNonzeroBlocks_[sourceFactorRow] < sourceFactor.size())
firstNonzeroVar = sourceFactor.keys_[sourceFactor.firstNonzeroBlocks_[sourceFactorRow]];
else if(sourceFactor.firstNonzeroBlocks_[sourceFactorRow] == sourceFactor.size())
firstNonzeroVar = sourceFactor.back() + 1;
else
assert(false);
rowSources.push_back(_RowSource(firstNonzeroVar, sourceFactorI, sourceFactorRow));
}
if(sourceFactor.model_->isConstrained()) anyConstrained = true;
}
assert(rowSources.size() == m);
std::sort(rowSources.begin(), rowSources.end());
toc(2, "sort rows");
// Allocate new factor
tic(3, "allocate");
shared_ptr combined(new JacobianFactor());
combined->keys_.resize(variableSlots.size());
std::transform(variableSlots.begin(), variableSlots.end(), combined->keys_.begin(), bind(&VariableSlots::const_iterator::value_type::first, boost::lambda::_1));
varDims.push_back(1);
combined->Ab_.copyStructureFrom(BlockAb(combined->matrix_, varDims.begin(), varDims.end(), m));
combined->firstNonzeroBlocks_.resize(m);
Vector sigmas(m);
toc(3, "allocate");
// Copy rows
tic(4, "copy rows");
Index combinedSlot = 0;
BOOST_FOREACH(const VariableSlots::value_type& varslot, variableSlots) {
for(size_t row = 0; row < m; ++row) {
const Index sourceSlot = varslot.second[rowSources[row].factorI];
ABlock combinedBlock(combined->Ab_(combinedSlot));
if(sourceSlot != numeric_limits<Index>::max()) {
const JacobianFactor& source(*factors[rowSources[row].factorI]);
const size_t sourceRow = rowSources[row].factorRowI;
if(source.firstNonzeroBlocks_[sourceRow] <= sourceSlot) {
const constABlock sourceBlock(source.Ab_(sourceSlot));
ublas::noalias(ublas::row(combinedBlock, row)) = ublas::row(sourceBlock, sourceRow);
} else
ublas::noalias(ublas::row(combinedBlock, row)) = ublas::zero_vector<double>(combinedBlock.size2());
} else
ublas::noalias(ublas::row(combinedBlock, row)) = ublas::zero_vector<double>(combinedBlock.size2());
}
++ combinedSlot;
}
toc(4, "copy rows");
// Copy rhs (b), sigma, and firstNonzeroBlocks
tic(5, "copy vectors");
Index firstNonzeroSlot = 0;
for(size_t row = 0; row < m; ++row) {
const JacobianFactor& source(*factors[rowSources[row].factorI]);
const size_t sourceRow = rowSources[row].factorRowI;
combined->getb()(row) = source.getb()(sourceRow);
sigmas(row) = source.get_model()->sigmas()(sourceRow);
while(firstNonzeroSlot < variableSlots.size() && rowSources[row].firstNonzeroVar > combined->keys_[firstNonzeroSlot])
++ firstNonzeroSlot;
combined->firstNonzeroBlocks_[row] = firstNonzeroSlot;
}
toc(5, "copy vectors");
// Create noise model from sigmas
tic(6, "noise model");
if(anyConstrained) combined->model_ = noiseModel::Constrained::MixedSigmas(sigmas);
else combined->model_ = noiseModel::Diagonal::Sigmas(sigmas);
toc(6, "noise model");
combined->assertInvariants();
return combined;
}
/* ************************************************************************* */
pair<GaussianBayesNet::shared_ptr, JacobianFactor::shared_ptr> JacobianFactor::CombineAndEliminate(
const FactorGraph<JacobianFactor>& factors, size_t nrFrontals) {
tic(1, "Combine");
shared_ptr jointFactor(Combine(factors, VariableSlots(factors)));
toc(1, "Combine");
tic(2, "eliminate");
GaussianBayesNet::shared_ptr gbn(jointFactor->eliminate(nrFrontals));
toc(2, "eliminate");
return make_pair(gbn, jointFactor);
}
/* ************************************************************************* */
Errors operator*(const FactorGraph<JacobianFactor>& fg, const VectorValues& x) {
Errors e;
BOOST_FOREACH(const JacobianFactor::shared_ptr& Ai, fg) {
e.push_back((*Ai)*x);
}
return e;
}
/* ************************************************************************* */
void multiplyInPlace(const FactorGraph<JacobianFactor>& fg, const VectorValues& x, Errors& e) {
multiplyInPlace(fg,x,e.begin());
}
/* ************************************************************************* */
void multiplyInPlace(const FactorGraph<JacobianFactor>& fg, const VectorValues& x, const Errors::iterator& e) {
Errors::iterator ei = e;
BOOST_FOREACH(const JacobianFactor::shared_ptr& Ai, fg) {
*ei = (*Ai)*x;
ei++;
}
}
/* ************************************************************************* */
// x += alpha*A'*e
void transposeMultiplyAdd(const FactorGraph<JacobianFactor>& fg, double alpha, const Errors& e, VectorValues& x) {
// For each factor add the gradient contribution
Errors::const_iterator ei = e.begin();
BOOST_FOREACH(const JacobianFactor::shared_ptr& Ai, fg) {
Ai->transposeMultiplyAdd(alpha,*(ei++),x);
}
}
/* ************************************************************************* */
VectorValues gradient(const FactorGraph<JacobianFactor>& fg, const VectorValues& x) {
// It is crucial for performance to make a zero-valued clone of x
VectorValues g = VectorValues::zero(x);
Errors e;
BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, fg) {
e.push_back(factor->error_vector(x));
}
transposeMultiplyAdd(fg, 1.0, e, g);
return g;
}
/* ************************************************************************* */
void residual(const FactorGraph<JacobianFactor>& fg, const VectorValues &x, VectorValues &r) {
Index i = 0 ;
BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, fg) {
r[i] = factor->getb();
i++;
}
VectorValues Ax = VectorValues::SameStructure(r);
multiply(fg,x,Ax);
axpy(-1.0,Ax,r);
}
/* ************************************************************************* */
void multiply(const FactorGraph<JacobianFactor>& fg, const VectorValues &x, VectorValues &r) {
r.makeZero();
Index i = 0;
BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, fg) {
for(JacobianFactor::const_iterator j = factor->begin(); j != factor->end(); ++j) {
r[i] += prod(factor->getA(j), x[*j]);
}
++i;
}
}
/* ************************************************************************* */
void transposeMultiply(const FactorGraph<JacobianFactor>& fg, const VectorValues &r, VectorValues &x) {
x.makeZero();
Index i = 0;
BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, fg) {
for(JacobianFactor::const_iterator j = factor->begin(); j != factor->end(); ++j) {
x[*j] += prod(trans(factor->getA(j)), r[i]);
}
++i;
}
}
}

View File

@ -0,0 +1,238 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file JacobianFactor.h
* @brief
* @author Richard Roberts
* @created Dec 8, 2010
*/
#pragma once
#include <gtsam/base/types.h>
#include <gtsam/base/Matrix.h>
#include <gtsam/base/Vector.h>
#include <gtsam/linear/GaussianFactor.h>
#include <gtsam/linear/SharedDiagonal.h>
#include <gtsam/linear/Errors.h>
#include <map>
#include <boost/tuple/tuple.hpp>
// Forward declarations of friend unit tests
class Combine2GaussianFactorTest;
class eliminateFrontalsGaussianFactorTest;
namespace gtsam {
// Forward declarations
class HessianFactor;
class VariableSlots;
class JacobianFactor : public GaussianFactor {
protected:
typedef MatrixColMajor AbMatrix;
typedef VerticalBlockView<AbMatrix> BlockAb;
SharedDiagonal model_; // Gaussian noise model with diagonal covariance matrix
std::vector<size_t> firstNonzeroBlocks_;
AbMatrix matrix_; // the full matrix corresponding to the factor
BlockAb Ab_; // the block view of the full matrix
public:
typedef boost::shared_ptr<JacobianFactor> shared_ptr;
typedef BlockAb::Block ABlock;
typedef BlockAb::constBlock constABlock;
typedef BlockAb::Column BVector;
typedef BlockAb::constColumn constBVector;
protected:
void assertInvariants() const;
static JacobianFactor::shared_ptr Combine(const FactorGraph<JacobianFactor>& factors, const VariableSlots& variableSlots);
public:
/** Copy constructor */
JacobianFactor(const JacobianFactor& gf);
/** default constructor for I/O */
JacobianFactor();
/** Construct Null factor */
JacobianFactor(const Vector& b_in);
/** Construct unary factor */
JacobianFactor(Index i1, const Matrix& A1,
const Vector& b, const SharedDiagonal& model);
/** Construct binary factor */
JacobianFactor(Index i1, const Matrix& A1,
Index i2, const Matrix& A2,
const Vector& b, const SharedDiagonal& model);
/** Construct ternary factor */
JacobianFactor(Index i1, const Matrix& A1, Index i2,
const Matrix& A2, Index i3, const Matrix& A3,
const Vector& b, const SharedDiagonal& model);
/** Construct an n-ary factor */
JacobianFactor(const std::vector<std::pair<Index, Matrix> > &terms,
const Vector &b, const SharedDiagonal& model);
JacobianFactor(const std::list<std::pair<Index, Matrix> > &terms,
const Vector &b, const SharedDiagonal& model);
/** Construct from Conditional Gaussian */
JacobianFactor(const GaussianConditional& cg);
/** Convert from a HessianFactor (does Cholesky) */
JacobianFactor(const HessianFactor& factor);
virtual ~JacobianFactor() {}
// Implementing Testable interface
virtual void print(const std::string& s = "") const;
virtual bool equals(const GaussianFactor& lf, double tol = 1e-9) const;
Vector unweighted_error(const VectorValues& c) const; /** (A*x-b) */
Vector error_vector(const VectorValues& c) const; /** (A*x-b)/sigma */
virtual double error(const VectorValues& c) const; /** 0.5*(A*x-b)'*D*(A*x-b) */
/** Check if the factor contains no information, i.e. zero rows. This does
* not necessarily mean that the factor involves no variables (to check for
* involving no variables use keys().empty()).
*/
bool empty() const { return Ab_.size1() == 0;}
/** Return the dimension of the variable pointed to by the given key iterator
* todo: Remove this in favor of keeping track of dimensions with variables?
*/
virtual size_t getDim(const_iterator variable) const { return Ab_(variable - keys_.begin()).size2(); }
/**
* Permutes the GaussianFactor, but for efficiency requires the permutation
* to already be inverted. This acts just as a change-of-name for each
* variable. The order of the variables within the factor is not changed.
*/
virtual void permuteWithInverse(const Permutation& inversePermutation);
/**
* return the number of rows in the corresponding linear system
*/
size_t size1() const { return Ab_.size1(); }
/**
* return the number of rows in the corresponding linear system
*/
size_t numberOfRows() const { return size1(); }
/**
* return the number of columns in the corresponding linear system
*/
size_t size2() const { return Ab_.size2(); }
/** get a copy of model */
const SharedDiagonal& get_model() const { return model_; }
/** get a copy of model (non-const version) */
SharedDiagonal& get_model() { return model_; }
/** Get a view of the r.h.s. vector b */
constBVector getb() const { return Ab_.column(size(), 0); }
/** Get a view of the A matrix for the variable pointed to be the given key iterator */
constABlock getA(const_iterator variable) const { return Ab_(variable - keys_.begin()); }
BVector getb() { return Ab_.column(size(), 0); }
ABlock getA(iterator variable) { return Ab_(variable - keys_.begin()); }
/** Return A*x */
Vector operator*(const VectorValues& x) const;
/** x += A'*e */
void transposeMultiplyAdd(double alpha, const Vector& e, VectorValues& x) const;
/**
* Return (dense) matrix associated with factor
* @param ordering of variables needed for matrix column order
* @param set weight to true to bake in the weights
*/
std::pair<Matrix, Vector> matrix(bool weight = true) const;
/**
* Return (dense) matrix associated with factor
* The returned system is an augmented matrix: [A b]
* @param ordering of variables needed for matrix column order
* @param set weight to use whitening to bake in weights
*/
Matrix matrix_augmented(bool weight = true) const;
/**
* Return vectors i, j, and s to generate an m-by-n sparse matrix
* such that S(i(k),j(k)) = s(k), which can be given to MATLAB's sparse.
* As above, the standard deviations are baked into A and b
* @param first column index for each variable
*/
boost::tuple<std::list<int>, std::list<int>, std::list<double> >
sparse(const std::map<Index, size_t>& columnIndices) const;
/**
* Return a whitened version of the factor, i.e. with unit diagonal noise
* model. */
JacobianFactor whiten() const;
/**
* Combine and eliminate several factors.
*/
static std::pair<GaussianBayesNet::shared_ptr, shared_ptr> CombineAndEliminate(
const FactorGraph<JacobianFactor>& factors, size_t nrFrontals=1);
GaussianConditional::shared_ptr eliminateFirst();
GaussianBayesNet::shared_ptr eliminate(size_t nrFrontals = 1);
// Friend HessianFactor to facilitate convertion constructors
friend class HessianFactor;
// Friend unit tests (see also forward declarations above)
friend class ::Combine2GaussianFactorTest;
friend class ::eliminateFrontalsGaussianFactorTest;
};
/** return A*x */
Errors operator*(const FactorGraph<JacobianFactor>& fg, const VectorValues& x);
/* In-place version e <- A*x that overwrites e. */
void multiplyInPlace(const FactorGraph<JacobianFactor>& fg, const VectorValues& x, Errors& e);
/* In-place version e <- A*x that takes an iterator. */
void multiplyInPlace(const FactorGraph<JacobianFactor>& fg, const VectorValues& x, const Errors::iterator& e);
/** x += alpha*A'*e */
void transposeMultiplyAdd(const FactorGraph<JacobianFactor>& fg, double alpha, const Errors& e, VectorValues& x);
/**
* Calculate Gradient of A^(A*x-b) for a given config
* @param x: VectorValues specifying where to calculate gradient
* @return gradient, as a VectorValues as well
*/
VectorValues gradient(const FactorGraph<JacobianFactor>& fg, const VectorValues& x);
// matrix-vector operations
void residual(const FactorGraph<JacobianFactor>& fg, const VectorValues &x, VectorValues &r);
void multiply(const FactorGraph<JacobianFactor>& fg, const VectorValues &x, VectorValues &r);
void transposeMultiply(const FactorGraph<JacobianFactor>& fg, const VectorValues &r, VectorValues &x);
}

View File

@ -25,11 +25,12 @@ check_PROGRAMS += tests/testVectorValues
sources += GaussianSequentialSolver.cpp GaussianMultifrontalSolver.cpp
# Gaussian Factor Graphs
sources += JacobianFactor.cpp HessianFactor.cpp
sources += GaussianFactor.cpp GaussianFactorGraph.cpp
sources += GaussianJunctionTree.cpp
sources += GaussianConditional.cpp GaussianBayesNet.cpp
sources += GaussianISAM.cpp
check_PROGRAMS += tests/testGaussianFactor tests/testGaussianConditional
check_PROGRAMS += tests/testHessianFactor tests/testGaussianFactor tests/testGaussianConditional
check_PROGRAMS += tests/testGaussianJunctionTree
# Iterative Methods

View File

@ -124,6 +124,8 @@ void Gaussian::WhitenInPlace(MatrixColMajor& H) const {
// General QR, see also special version in Constrained
SharedDiagonal Gaussian::QR(Matrix& Ab, boost::optional<vector<int>&> firstZeroRows) const {
static const bool debug = false;
// get size(A) and maxRank
// TODO: really no rank problems ?
size_t m = Ab.size1(), n = Ab.size2()-1;
@ -132,6 +134,8 @@ SharedDiagonal Gaussian::QR(Matrix& Ab, boost::optional<vector<int>&> firstZeroR
// pre-whiten everything (cheaply if possible)
WhitenInPlace(Ab);
if(debug) gtsam::print(Ab, "Whitened Ab: ");
// Perform in-place Householder
#ifdef GT_USE_LAPACK
if(firstZeroRows)
@ -222,6 +226,9 @@ SharedDiagonal Gaussian::QR(Matrix& Ab, boost::optional<vector<int>&> firstZeroR
// General QR, see also special version in Constrained
SharedDiagonal Gaussian::QRColumnWise(MatrixColMajor& Ab, vector<int>& firstZeroRows) const {
static const bool debug = false;
// get size(A) and maxRank
// TODO: really no rank problems ?
size_t m = Ab.size1(), n = Ab.size2()-1;
@ -230,6 +237,8 @@ SharedDiagonal Gaussian::QRColumnWise(MatrixColMajor& Ab, vector<int>& firstZero
// pre-whiten everything (cheaply if possible)
WhitenInPlace(Ab);
if(debug) gtsam::print(Ab, "Whitened Ab: ");
// Perform in-place Householder
#ifdef GT_USE_LAPACK
#ifdef USE_LAPACK_QR

View File

@ -17,6 +17,8 @@
#include <boost/foreach.hpp>
#include <gtsam/linear/SubgraphPreconditioner.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/inference/FactorGraph-inl.h>
using namespace std;
@ -25,7 +27,7 @@ namespace gtsam {
/* ************************************************************************* */
SubgraphPreconditioner::SubgraphPreconditioner(const sharedFG& Ab1, const sharedFG& Ab2,
const sharedBayesNet& Rc1, const sharedValues& xbar) :
Ab1_(Ab1), Ab2_(Ab2), Rc1_(Rc1), xbar_(xbar), b2bar_(Ab2_->errors_(*xbar)) {
Ab1_(Ab1), Ab2_(Ab2), Rc1_(Rc1), xbar_(xbar), b2bar_(gaussianErrors_(*Ab2_,*xbar)) {
}
/* ************************************************************************* */
@ -48,35 +50,35 @@ namespace gtsam {
// }
/* ************************************************************************* */
double SubgraphPreconditioner::error(const VectorValues& y) const {
double error(const SubgraphPreconditioner& sp, const VectorValues& y) {
Errors e(y);
VectorValues x = this->x(y);
Errors e2 = Ab2_->errors(x);
VectorValues x = sp.x(y);
Errors e2 = gaussianErrors(*sp.Ab2(),x);
return 0.5 * (dot(e, e) + dot(e2,e2));
}
/* ************************************************************************* */
// gradient is y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar),
VectorValues SubgraphPreconditioner::gradient(const VectorValues& y) const {
VectorValues x = this->x(y); // x = inv(R1)*y
Errors e2 = Ab2_->errors(x);
VectorValues gradient(const SubgraphPreconditioner& sp, const VectorValues& y) {
VectorValues x = sp.x(y); // x = inv(R1)*y
Errors e2 = gaussianErrors(*sp.Ab2(),x);
VectorValues gx2 = VectorValues::zero(y);
Ab2_->transposeMultiplyAdd(1.0,e2,gx2); // A2'*e2;
VectorValues gy2 = gtsam::backSubstituteTranspose(*Rc1_, gx2); // inv(R1')*gx2
gtsam::transposeMultiplyAdd(*sp.Ab2(),1.0,e2,gx2); // A2'*e2;
VectorValues gy2 = gtsam::backSubstituteTranspose(*sp.Rc1(), gx2); // inv(R1')*gx2
return y + gy2;
}
/* ************************************************************************* */
// Apply operator A, A*y = [I;A2*inv(R1)]*y = [y; A2*inv(R1)*y]
Errors SubgraphPreconditioner::operator*(const VectorValues& y) const {
Errors operator*(const SubgraphPreconditioner& sp, const VectorValues& y) {
Errors e(y);
// Add A2 contribution
VectorValues x = y; // TODO avoid ?
gtsam::backSubstituteInPlace(*Rc1_, x); // x=inv(R1)*y
Errors e2 = *Ab2_ * x; // A2*x
gtsam::backSubstituteInPlace(*sp.Rc1(), x); // x=inv(R1)*y
Errors e2 = *sp.Ab2() * x; // A2*x
e.splice(e.end(), e2);
return e;
@ -84,7 +86,7 @@ namespace gtsam {
/* ************************************************************************* */
// In-place version that overwrites e
void SubgraphPreconditioner::multiplyInPlace(const VectorValues& y, Errors& e) const {
void multiplyInPlace(const SubgraphPreconditioner& sp, const VectorValues& y, Errors& e) {
Errors::iterator ei = e.begin();
@ -94,27 +96,26 @@ namespace gtsam {
// Add A2 contribution
VectorValues x = y; // TODO avoid ?
gtsam::backSubstituteInPlace(*Rc1_, x); // x=inv(R1)*y
Ab2_->multiplyInPlace(x,ei); // use iterator version
gtsam::backSubstituteInPlace(*sp.Rc1(), x); // x=inv(R1)*y
gtsam::multiplyInPlace(*sp.Ab2(),x,ei); // use iterator version
}
/* ************************************************************************* */
// Apply operator A', A'*e = [I inv(R1')*A2']*e = e1 + inv(R1')*A2'*e2
VectorValues SubgraphPreconditioner::operator^(const Errors& e) const {
VectorValues operator^(const SubgraphPreconditioner& sp, const Errors& e) {
Errors::const_iterator it = e.begin();
VectorValues y = zero();
VectorValues y = sp.zero();
for ( Index i = 0 ; i < y.size() ; ++i, ++it )
y[i] = *it ;
transposeMultiplyAdd2(1.0,it,e.end(),y);
sp.transposeMultiplyAdd2(1.0,it,e.end(),y);
return y;
}
/* ************************************************************************* */
// y += alpha*A'*e
void SubgraphPreconditioner::transposeMultiplyAdd
(double alpha, const Errors& e, VectorValues& y) const {
void transposeMultiplyAdd
(const SubgraphPreconditioner& sp, double alpha, const Errors& e, VectorValues& y) {
Errors::const_iterator it = e.begin();
@ -122,7 +123,7 @@ namespace gtsam {
const Vector& ei = *it;
axpy(alpha,ei,y[i]) ;
}
transposeMultiplyAdd2(alpha,it,e.end(),y);
sp.transposeMultiplyAdd2(alpha,it,e.end(),y);
}
/* ************************************************************************* */
@ -137,7 +138,7 @@ namespace gtsam {
e2.push_back(*(it++));
VectorValues x = VectorValues::zero(y); // x = 0
Ab2_->transposeMultiplyAdd(1.0,e2,x); // x += A2'*e2
gtsam::transposeMultiplyAdd(*Ab2_,1.0,e2,x); // x += A2'*e2
axpy(alpha, gtsam::backSubstituteTranspose(*Rc1_, x), y); // y += alpha*inv(R1')*x
}

View File

@ -17,7 +17,7 @@
#pragma once
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/linear/GaussianBayesNet.h>
#include <gtsam/nonlinear/Ordering.h>
@ -34,7 +34,7 @@ namespace gtsam {
public:
typedef boost::shared_ptr<const GaussianBayesNet> sharedBayesNet;
typedef boost::shared_ptr<const GaussianFactorGraph> sharedFG;
typedef boost::shared_ptr<const FactorGraph<JacobianFactor> > sharedFG;
typedef boost::shared_ptr<const VectorValues> sharedValues;
typedef boost::shared_ptr<const Errors> sharedErrors;
@ -56,6 +56,14 @@ namespace gtsam {
*/
SubgraphPreconditioner(const sharedFG& Ab1, const sharedFG& Ab2, const sharedBayesNet& Rc1, const sharedValues& xbar);
/** Access Ab1 */
const sharedFG& Ab1() const { return Ab1_; }
/** Access Ab2 */
const sharedFG& Ab2() const { return Ab2_; }
/** Access Rc1 */
const sharedBayesNet& Rc1() const { return Rc1_; }
/**
* Add zero-mean i.i.d. Gaussian prior terms to each variable
@ -73,27 +81,6 @@ namespace gtsam {
return V ;
}
/* error, given y */
double error(const VectorValues& y) const;
/** gradient = y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar) */
VectorValues gradient(const VectorValues& y) const;
/** Apply operator A */
Errors operator*(const VectorValues& y) const;
/** Apply operator A in place: needs e allocated already */
void multiplyInPlace(const VectorValues& y, Errors& e) const;
/** Apply operator A' */
VectorValues operator^(const Errors& e) const;
/**
* Add A'*e to y
* y += alpha*A'*[e1;e2] = [alpha*e1; alpha*inv(R1')*A2'*e2]
*/
void transposeMultiplyAdd(double alpha, const Errors& e, VectorValues& y) const;
/**
* Add constraint part of the error only, used in both calls above
* y += alpha*inv(R1')*A2'*e2
@ -106,4 +93,25 @@ namespace gtsam {
void print(const std::string& s = "SubgraphPreconditioner") const;
};
/* error, given y */
double error(const SubgraphPreconditioner& sp, const VectorValues& y);
/** gradient = y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar) */
VectorValues gradient(const SubgraphPreconditioner& sp, const VectorValues& y);
/** Apply operator A */
Errors operator*(const SubgraphPreconditioner& sp, const VectorValues& y);
/** Apply operator A in place: needs e allocated already */
void multiplyInPlace(const SubgraphPreconditioner& sp, const VectorValues& y, Errors& e);
/** Apply operator A' */
VectorValues operator^(const SubgraphPreconditioner& sp, const Errors& e);
/**
* Add A'*e to y
* y += alpha*A'*[e1;e2] = [alpha*e1; alpha*inv(R1')*A2'*e2]
*/
void transposeMultiplyAdd(const SubgraphPreconditioner& sp, double alpha, const Errors& e, VectorValues& y);
} // namespace gtsam

View File

@ -20,11 +20,11 @@
#include <gtsam/linear/GaussianFactor.h>
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/GaussianBayesNet.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/nonlinear/Key.h>
#include <gtsam/linear/iterative-inl.h>
#include <gtsam/inference/EliminationTree-inl.h>
using namespace std;
namespace gtsam {
@ -62,12 +62,11 @@ bool split(const std::map<Index, Index> &M,
}
template<class GRAPH, class LINEAR, class VALUES>
void SubgraphSolver<GRAPH,LINEAR,VALUES>::replaceFactors(const typename LINEAR::shared_ptr &graph) {
shared_linear Ab1 = boost::make_shared<LINEAR>(),
Ab2 = boost::make_shared<LINEAR>();
GaussianFactorGraph::shared_ptr Ab1 = boost::make_shared<GaussianFactorGraph>();
GaussianFactorGraph::shared_ptr Ab2 = boost::make_shared<GaussianFactorGraph>();
if (parameters_->verbosity()) cout << "split the graph ...";
split(pairs_, *graph, *Ab1, *Ab2) ;
@ -84,7 +83,8 @@ void SubgraphSolver<GRAPH,LINEAR,VALUES>::replaceFactors(const typename LINEAR::
SubgraphPreconditioner::sharedBayesNet Rc1 = EliminationTree<GaussianFactor>::Create(sacrificialAb1)->eliminate();
SubgraphPreconditioner::sharedValues xbar = gtsam::optimize_(*Rc1);
pc_ = boost::make_shared<SubgraphPreconditioner>(Ab1,Ab2,Rc1,xbar);
pc_ = boost::make_shared<SubgraphPreconditioner>(
Ab1->dynamicCastFactors<FactorGraph<JacobianFactor> >(), Ab2->dynamicCastFactors<FactorGraph<JacobianFactor> >(),Rc1,xbar);
}
template<class GRAPH, class LINEAR, class VALUES>

View File

@ -48,7 +48,7 @@ namespace gtsam {
// Start with g0 = A'*(A*x0-b), d0 = - g0
// i.e., first step is in direction of negative gradient
g = Ab.gradient(x);
g = gradient(Ab,x);
d = g; // instead of negating gradient, alpha will be negated
// init gamma and calculate threshold
@ -88,9 +88,9 @@ namespace gtsam {
double alpha = takeOptimalStep(x);
// update gradient (or re-calculate at reset time)
if (k % parameters_.reset() == 0) g = Ab.gradient(x);
if (k % parameters_.reset() == 0) g = gradient(Ab,x);
// axpy(alpha, Ab ^ Ad, g); // g += alpha*(Ab^Ad)
else Ab.transposeMultiplyAdd(alpha, Ad, g);
else transposeMultiplyAdd(Ab, alpha, Ad, g);
// check for convergence
double new_gamma = dot(g, g);
@ -111,7 +111,7 @@ namespace gtsam {
gamma = new_gamma;
// In-place recalculation Ad <- A*d to avoid re-allocating Ad
Ab.multiplyInPlace(d, Ad);
multiplyInPlace(Ab, d, Ad);
return false;
}

View File

@ -54,37 +54,43 @@ namespace gtsam {
A_(A), b_(b) {
}
/** gradient of objective function 0.5*|Ax-b_|^2 at x = A_'*(Ax-b_) */
Vector gradient(const Vector& x) const {
return A_ ^ (A_ * x - b_);
}
/** Access A matrix */
const Matrix& A() const { return A_; }
/** Apply operator A */
inline Vector operator*(const Vector& x) const {
return A_ * x;
}
/** Apply operator A in place */
inline void multiplyInPlace(const Vector& x, Vector& e) const {
e = A_ * x;
}
/** Access b vector */
const Vector& b() const { return b_; }
/** Apply operator A'*e */
inline Vector operator^(const Vector& e) const {
Vector operator^(const Vector& e) const {
return A_ ^ e;
}
/** x += alpha* A'*e */
inline void transposeMultiplyAdd(double alpha, const Vector& e, Vector& x) const {
gtsam::transposeMultiplyAdd(alpha,A_,e,x);
}
/**
* Print with optional string
*/
void print (const std::string& s = "System") const;
};
/** gradient of objective function 0.5*|Ax-b_|^2 at x = A_'*(Ax-b_) */
inline Vector gradient(const System& system, const Vector& x) {
return system.A() ^ (system.A() * x - system.b());
}
/** Apply operator A */
inline Vector operator*(const System& system, const Vector& x) {
return system.A() * x;
}
/** Apply operator A in place */
inline void multiplyInPlace(const System& system, const Vector& x, Vector& e) {
e = system.A() * x;
}
/** x += alpha* A'*e */
inline void transposeMultiplyAdd(const System& system, double alpha, const Vector& e, Vector& x) {
transposeMultiplyAdd(alpha,system.A(),e,x);
}
/**
* Method of steepest gradients, System version
*/

View File

@ -35,6 +35,9 @@ using namespace boost::assign;
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/SharedDiagonal.h>
#include <gtsam/linear/GaussianSequentialSolver.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/linear/HessianFactor.h>
#include <gtsam/inference/VariableSlots.h>
using namespace std;
using namespace gtsam;
@ -48,30 +51,30 @@ static SharedDiagonal
constraintModel = noiseModel::Constrained::All(2);
/* ************************************************************************* */
TEST( GaussianFactor, constructor)
TEST(GaussianFactor, constructor)
{
Vector b = Vector_(3, 1., 2., 3.);
SharedDiagonal noise = noiseModel::Diagonal::Sigmas(Vector_(3,1.,1.,1.));
std::list<std::pair<Index, Matrix> > terms;
terms.push_back(make_pair(_x0_, eye(3)));
terms.push_back(make_pair(_x1_, 2.*eye(3)));
GaussianFactor actual(terms, b, noise);
GaussianFactor expected(_x0_, eye(3), _x1_, 2.*eye(3), b, noise);
JacobianFactor actual(terms, b, noise);
JacobianFactor expected(_x0_, eye(3), _x1_, 2.*eye(3), b, noise);
EXPECT(assert_equal(expected, actual));
}
/* ************************************************************************* */
TEST( GaussianFactor, constructor2)
TEST(GaussianFactor, constructor2)
{
Vector b = Vector_(3, 1., 2., 3.);
SharedDiagonal noise = noiseModel::Diagonal::Sigmas(Vector_(3,1.,1.,1.));
std::list<std::pair<Index, Matrix> > terms;
terms.push_back(make_pair(_x0_, eye(3)));
terms.push_back(make_pair(_x1_, 2.*eye(3)));
const GaussianFactor actual(terms, b, noise);
const JacobianFactor actual(terms, b, noise);
GaussianFactor::const_iterator key0 = actual.begin();
GaussianFactor::const_iterator key1 = key0 + 1;
JacobianFactor::const_iterator key0 = actual.begin();
JacobianFactor::const_iterator key1 = key0 + 1;
EXPECT(assert_equal(*key0, _x0_));
EXPECT(assert_equal(*key1, _x1_));
@ -125,7 +128,7 @@ TEST( GaussianFactor, constructor2)
// variablePositions[1].resize(2); variablePositions[1][0]=0; variablePositions[1][1]=1;
// variablePositions[2].resize(1); variablePositions[2][0]=0;
//
// GaussianFactor actual = *GaussianFactor::Combine(gfg, varindex, factors, variables, variablePositions);
// JacobianFactor actual = *JacobianFactor::Combine(gfg, varindex, factors, variables, variablePositions);
//
// Matrix zero3x3 = zeros(3,3);
// Matrix A0 = gtsam::stack(3, &A00, &A10, &A20);
@ -133,7 +136,7 @@ TEST( GaussianFactor, constructor2)
// Vector b = gtsam::concatVectors(3, &b0, &b1, &b2);
// Vector sigmas = gtsam::concatVectors(3, &s0, &s1, &s2);
//
// GaussianFactor expected(0, A0, 1, A1, b, noiseModel::Diagonal::Sigmas(sigmas, true));
// JacobianFactor expected(0, A0, 1, A1, b, noiseModel::Diagonal::Sigmas(sigmas, true));
//
// EXPECT(assert_equal(expected, actual));
//}
@ -171,7 +174,7 @@ TEST(GaussianFactor, Combine2)
gfg.add(0, A10, 1, A11, b1, noiseModel::Diagonal::Sigmas(s1, true));
gfg.add(1, A21, b2, noiseModel::Diagonal::Sigmas(s2, true));
GaussianFactor actual = *GaussianFactor::Combine(gfg, VariableSlots(gfg));
JacobianFactor actual = *JacobianFactor::Combine(gfg, VariableSlots(gfg));
Matrix zero3x3 = zeros(3,3);
Matrix A0 = gtsam::stack(3, &A10, &zero3x3, &zero3x3);
@ -179,13 +182,13 @@ TEST(GaussianFactor, Combine2)
Vector b = gtsam::concatVectors(3, &b1, &b0, &b2);
Vector sigmas = gtsam::concatVectors(3, &s1, &s0, &s2);
GaussianFactor expected(0, A0, 1, A1, b, noiseModel::Diagonal::Sigmas(sigmas, true));
JacobianFactor expected(0, A0, 1, A1, b, noiseModel::Diagonal::Sigmas(sigmas, true));
EXPECT(assert_equal(expected, actual));
}
/* ************************************************************************* */
TEST(GaussianFactor, CombineAndEliminate)
TEST_UNSAFE(GaussianFactor, CombineAndEliminate)
{
Matrix A01 = Matrix_(3,3,
1.0, 0.0, 0.0,
@ -223,22 +226,22 @@ TEST(GaussianFactor, CombineAndEliminate)
Vector b = gtsam::concatVectors(3, &b1, &b0, &b2);
Vector sigmas = gtsam::concatVectors(3, &s1, &s0, &s2);
GaussianFactor expectedFactor(0, A0, 1, A1, b, noiseModel::Diagonal::Sigmas(sigmas, true));
GaussianBayesNet expectedBN(*expectedFactor.eliminate(1, GaussianFactor::SOLVE_QR));
JacobianFactor expectedFactor(0, A0, 1, A1, b, noiseModel::Diagonal::Sigmas(sigmas, true));
GaussianBayesNet expectedBN(*expectedFactor.eliminate(1));
pair<GaussianBayesNet::shared_ptr, GaussianFactor::shared_ptr> actual(
GaussianFactor::CombineAndEliminate(gfg, 1, GaussianFactor::SOLVE_CHOLESKY));
pair<GaussianBayesNet::shared_ptr, JacobianFactor::shared_ptr> actualQR(JacobianFactor::CombineAndEliminate(
*gfg.dynamicCastFactors<FactorGraph<JacobianFactor> >(), 1));
EXPECT(assert_equal(expectedBN, *actual.first));
EXPECT(assert_equal(expectedFactor, *actual.second));
EXPECT(assert_equal(expectedBN, *actualQR.first));
EXPECT(assert_equal(expectedFactor, *actualQR.second));
}
///* ************************************************************************* */
//TEST( GaussianFactor, operators )
//TEST(GaussianFactor, operators )
//{
// Matrix I = eye(2);
// Vector b = Vector_(2,0.2,-0.1);
// GaussianFactor lf(_x1_, -I, _x2_, I, b, sigma0_1);
// JacobianFactor lf(_x1_, -I, _x2_, I, b, sigma0_1);
//
// VectorValues c;
// c.insert(_x1_,Vector_(2,10.,20.));
@ -271,33 +274,33 @@ TEST(GaussianFactor, CombineAndEliminate)
// A11(1,0) = 0; A11(1,1) = 1;
// Vector b(2);
// b(0) = 2; b(1) = -1;
// GaussianFactor::shared_ptr f1(new GaussianFactor(_x1_, A11, b*sigma1, sharedSigma(2,sigma1)));
// JacobianFactor::shared_ptr f1(new JacobianFactor(_x1_, A11, b*sigma1, sharedSigma(2,sigma1)));
//
// double sigma2 = 0.5;
// A11(0,0) = 1; A11(0,1) = 0;
// A11(1,0) = 0; A11(1,1) = -1;
// b(0) = 4 ; b(1) = -5;
// GaussianFactor::shared_ptr f2(new GaussianFactor(_x1_, A11, b*sigma2, sharedSigma(2,sigma2)));
// JacobianFactor::shared_ptr f2(new JacobianFactor(_x1_, A11, b*sigma2, sharedSigma(2,sigma2)));
//
// double sigma3 = 0.25;
// A11(0,0) = 1; A11(0,1) = 0;
// A11(1,0) = 0; A11(1,1) = -1;
// b(0) = 3 ; b(1) = -88;
// GaussianFactor::shared_ptr f3(new GaussianFactor(_x1_, A11, b*sigma3, sharedSigma(2,sigma3)));
// JacobianFactor::shared_ptr f3(new JacobianFactor(_x1_, A11, b*sigma3, sharedSigma(2,sigma3)));
//
// // TODO: find a real sigma value for this example
// double sigma4 = 0.1;
// A11(0,0) = 6; A11(0,1) = 0;
// A11(1,0) = 0; A11(1,1) = 7;
// b(0) = 5 ; b(1) = -6;
// GaussianFactor::shared_ptr f4(new GaussianFactor(_x1_, A11*sigma4, b*sigma4, sharedSigma(2,sigma4)));
// JacobianFactor::shared_ptr f4(new JacobianFactor(_x1_, A11*sigma4, b*sigma4, sharedSigma(2,sigma4)));
//
// vector<GaussianFactor::shared_ptr> lfg;
// vector<JacobianFactor::shared_ptr> lfg;
// lfg.push_back(f1);
// lfg.push_back(f2);
// lfg.push_back(f3);
// lfg.push_back(f4);
// GaussianFactor combined(lfg);
// JacobianFactor combined(lfg);
//
// Vector sigmas = Vector_(8, sigma1, sigma1, sigma2, sigma2, sigma3, sigma3, sigma4, sigma4);
// Matrix A22(8,2);
@ -315,25 +318,25 @@ TEST(GaussianFactor, CombineAndEliminate)
//
// vector<pair<Index, Matrix> > meas;
// meas.push_back(make_pair(_x1_, A22));
// GaussianFactor expected(meas, exb, sigmas);
// JacobianFactor expected(meas, exb, sigmas);
// EXPECT(assert_equal(expected,combined));
//}
//
///* ************************************************************************* */
//TEST( GaussianFactor, linearFactorN){
//TEST(GaussianFactor, linearFactorN){
// Matrix I = eye(2);
// vector<GaussianFactor::shared_ptr> f;
// vector<JacobianFactor::shared_ptr> f;
// SharedDiagonal model = sharedSigma(2,1.0);
// f.push_back(GaussianFactor::shared_ptr(new GaussianFactor(_x1_, I, Vector_(2,
// f.push_back(JacobianFactor::shared_ptr(new JacobianFactor(_x1_, I, Vector_(2,
// 10.0, 5.0), model)));
// f.push_back(GaussianFactor::shared_ptr(new GaussianFactor(_x1_, -10 * I,
// f.push_back(JacobianFactor::shared_ptr(new JacobianFactor(_x1_, -10 * I,
// _x2_, 10 * I, Vector_(2, 1.0, -2.0), model)));
// f.push_back(GaussianFactor::shared_ptr(new GaussianFactor(_x2_, -10 * I,
// f.push_back(JacobianFactor::shared_ptr(new JacobianFactor(_x2_, -10 * I,
// _x3_, 10 * I, Vector_(2, 1.5, -1.5), model)));
// f.push_back(GaussianFactor::shared_ptr(new GaussianFactor(_x3_, -10 * I,
// f.push_back(JacobianFactor::shared_ptr(new JacobianFactor(_x3_, -10 * I,
// _x4_, 10 * I, Vector_(2, 2.0, -1.0), model)));
//
// GaussianFactor combinedFactor(f);
// JacobianFactor combinedFactor(f);
//
// vector<pair<Index, Matrix> > combinedMeasurement;
// combinedMeasurement.push_back(make_pair(_x1_, Matrix_(8,2,
@ -376,12 +379,12 @@ TEST(GaussianFactor, CombineAndEliminate)
// 10.0, 5.0, 1.0, -2.0, 1.5, -1.5, 2.0, -1.0);
//
// Vector sigmas = repeat(8,1.0);
// GaussianFactor expected(combinedMeasurement, b, sigmas);
// JacobianFactor expected(combinedMeasurement, b, sigmas);
// EXPECT(assert_equal(expected,combinedFactor));
//}
/* ************************************************************************* */
TEST( GaussianFactor, eliminate2 )
TEST(GaussianFactor, eliminate2 )
{
// sigmas
double sigma1 = 0.2;
@ -415,14 +418,12 @@ TEST( GaussianFactor, eliminate2 )
vector<pair<Index, Matrix> > meas;
meas.push_back(make_pair(_x2_, Ax2));
meas.push_back(make_pair(_l11_, Al1x1));
GaussianFactor combined(meas, b2, sigmas);
JacobianFactor combined(meas, b2, sigmas);
// eliminate the combined factor
GaussianConditional::shared_ptr actualCG_QR, actualCG_Chol;
GaussianFactor::shared_ptr actualLF_QR(new GaussianFactor(combined));
GaussianFactor::shared_ptr actualLF_Chol(new GaussianFactor(combined));
actualCG_QR = actualLF_QR->eliminateFirst(GaussianFactor::SOLVE_QR);
actualCG_Chol = actualLF_Chol->eliminateFirst(GaussianFactor::SOLVE_CHOLESKY);
GaussianConditional::shared_ptr actualCG_QR;
JacobianFactor::shared_ptr actualLF_QR(new JacobianFactor(combined));
actualCG_QR = actualLF_QR->eliminateFirst();
// create expected Conditional Gaussian
double oldSigma = 0.0894427; // from when R was made unit
@ -437,7 +438,6 @@ TEST( GaussianFactor, eliminate2 )
Vector d = Vector_(2,0.2,-0.14)/oldSigma;
GaussianConditional expectedCG(_x2_,d,R11,_l11_,S12,ones(2));
EXPECT(assert_equal(expectedCG,*actualCG_QR,1e-4));
EXPECT(assert_equal(expectedCG,*actualCG_Chol,1e-4));
// the expected linear factor
double sigma = 0.2236;
@ -447,9 +447,8 @@ TEST( GaussianFactor, eliminate2 )
0.00, 1.00, +0.00, -1.00
)/sigma;
Vector b1 = Vector_(2,0.0,0.894427);
GaussianFactor expectedLF(_l11_, Bl1x1, b1, repeat(2,1.0));
JacobianFactor expectedLF(_l11_, Bl1x1, b1, repeat(2,1.0));
EXPECT(assert_equal(expectedLF,*actualLF_QR,1e-3));
EXPECT(assert_equal(expectedLF,*actualLF_Chol,1e-3));
}
/* ************************************************************************* */
@ -481,7 +480,7 @@ TEST(GaussianFactor, eliminateFrontals)
make_pair(9, ublas::project(Ab, ublas::range(0,4), ublas::range(6,8))),
make_pair(11, ublas::project(Ab, ublas::range(0,4), ublas::range(8,10)));
Vector b1 = ublas::project(ublas::column(Ab, 10), ublas::range(0,4));
GaussianFactor::shared_ptr factor1(new GaussianFactor(terms1, b1, sharedSigma(4, 0.5)));
JacobianFactor::shared_ptr factor1(new JacobianFactor(terms1, b1, sharedSigma(4, 0.5)));
// Create second factor
list<pair<Index, Matrix> > terms2;
@ -491,7 +490,7 @@ TEST(GaussianFactor, eliminateFrontals)
make_pair(9, ublas::project(Ab, ublas::range(4,8), ublas::range(6,8))),
make_pair(11, ublas::project(Ab, ublas::range(4,8), ublas::range(8,10)));
Vector b2 = ublas::project(ublas::column(Ab, 10), ublas::range(4,8));
GaussianFactor::shared_ptr factor2(new GaussianFactor(terms2, b2, sharedSigma(4, 0.5)));
JacobianFactor::shared_ptr factor2(new JacobianFactor(terms2, b2, sharedSigma(4, 0.5)));
// Create third factor
list<pair<Index, Matrix> > terms3;
@ -500,14 +499,14 @@ TEST(GaussianFactor, eliminateFrontals)
make_pair(9, ublas::project(Ab, ublas::range(8,12), ublas::range(6,8))),
make_pair(11, ublas::project(Ab, ublas::range(8,12), ublas::range(8,10)));
Vector b3 = ublas::project(ublas::column(Ab, 10), ublas::range(8,12));
GaussianFactor::shared_ptr factor3(new GaussianFactor(terms3, b3, sharedSigma(4, 0.5)));
JacobianFactor::shared_ptr factor3(new JacobianFactor(terms3, b3, sharedSigma(4, 0.5)));
// Create fourth factor
list<pair<Index, Matrix> > terms4;
terms4 +=
make_pair(11, ublas::project(Ab, ublas::range(12,14), ublas::range(8,10)));
Vector b4 = ublas::project(ublas::column(Ab, 10), ublas::range(12,14));
GaussianFactor::shared_ptr factor4(new GaussianFactor(terms4, b4, sharedSigma(2, 0.5)));
JacobianFactor::shared_ptr factor4(new JacobianFactor(terms4, b4, sharedSigma(2, 0.5)));
// Create factor graph
GaussianFactorGraph factors;
@ -517,11 +516,11 @@ TEST(GaussianFactor, eliminateFrontals)
factors.push_back(factor4);
// Create combined factor
GaussianFactor combined(*GaussianFactor::Combine(factors, VariableSlots(factors)));
JacobianFactor combined(*JacobianFactor::Combine(factors, VariableSlots(factors)));
// Copies factors as they will be eliminated in place
GaussianFactor actualFactor_QR = combined;
GaussianFactor actualFactor_Chol = combined;
JacobianFactor actualFactor_QR = combined;
JacobianFactor actualFactor_Chol = combined;
// Expected augmented matrix, both GaussianConditional (first 6 rows) and remaining factor (next 4 rows)
Matrix R = 2.0*Matrix_(11,11,
@ -579,7 +578,7 @@ TEST(GaussianFactor, eliminateFrontals)
Vector be = ublas::project(ublas::column(R, 10), ublas::range(6,10));
// Eliminate (3 frontal variables, 6 scalar columns) using QR !!!!
GaussianBayesNet actualFragment_QR = *actualFactor_QR.eliminate(3, GaussianFactor::SOLVE_QR);
GaussianBayesNet actualFragment_QR = *actualFactor_QR.eliminate(3);
EXPECT(assert_equal(expectedFragment, actualFragment_QR, 0.001));
EXPECT(assert_equal(size_t(2), actualFactor_QR.keys().size()));
EXPECT(assert_equal(Index(9), actualFactor_QR.keys()[0]));
@ -587,10 +586,10 @@ TEST(GaussianFactor, eliminateFrontals)
EXPECT(assert_equal(Ae1, actualFactor_QR.getA(actualFactor_QR.begin()), 0.001));
EXPECT(assert_equal(Ae2, actualFactor_QR.getA(actualFactor_QR.begin()+1), 0.001));
EXPECT(assert_equal(be, actualFactor_QR.getb(), 0.001));
EXPECT(assert_equal(ones(4), actualFactor_QR.get_sigmas(), 0.001));
EXPECT(assert_equal(ones(4), actualFactor_QR.get_model()->sigmas(), 0.001));
// Eliminate (3 frontal variables, 6 scalar columns) using Cholesky !!!!
// GaussianBayesNet actualFragment_Chol = *actualFactor_Chol.eliminate(3, GaussianFactor::SOLVE_CHOLESKY);
// GaussianBayesNet actualFragment_Chol = *actualFactor_Chol.eliminate(3, JacobianFactor::SOLVE_CHOLESKY);
// EXPECT(assert_equal(expectedFragment, actualFragment_Chol, 0.001));
// EXPECT(assert_equal(size_t(2), actualFactor_Chol.keys().size()));
// EXPECT(assert_equal(Index(9), actualFactor_Chol.keys()[0]));
@ -602,9 +601,9 @@ TEST(GaussianFactor, eliminateFrontals)
}
/* ************************************************************************* */
TEST( GaussianFactor, default_error )
TEST(GaussianFactor, default_error )
{
GaussianFactor f;
JacobianFactor f;
vector<size_t> dims;
VectorValues c(dims);
double actual = f.error(c);
@ -612,21 +611,21 @@ TEST( GaussianFactor, default_error )
}
////* ************************************************************************* */
//TEST( GaussianFactor, eliminate_empty )
//TEST(GaussianFactor, eliminate_empty )
//{
// // create an empty factor
// GaussianFactor f;
// JacobianFactor f;
//
// // eliminate the empty factor
// GaussianConditional::shared_ptr actualCG;
// GaussianFactor::shared_ptr actualLF(new GaussianFactor(f));
// JacobianFactor::shared_ptr actualLF(new JacobianFactor(f));
// actualCG = actualLF->eliminateFirst();
//
// // expected Conditional Gaussian is just a parent-less node with P(x)=1
// GaussianConditional expectedCG(_x2_);
//
// // expected remaining factor is still empty :-)
// GaussianFactor expectedLF;
// JacobianFactor expectedLF;
//
// // check if the result matches
// EXPECT(actualCG->equals(expectedCG));
@ -634,10 +633,10 @@ TEST( GaussianFactor, default_error )
//}
//* ************************************************************************* */
TEST( GaussianFactor, empty )
TEST(GaussianFactor, empty )
{
// create an empty factor
GaussianFactor f;
JacobianFactor f;
EXPECT(f.empty()==true);
}
@ -650,9 +649,9 @@ void print(const list<T>& i) {
}
///* ************************************************************************* */
//TEST( GaussianFactor, tally_separator )
//TEST(GaussianFactor, tally_separator )
//{
// GaussianFactor f(_x1_, eye(2), _x2_, eye(2), _l1_, eye(2), ones(2), sigma0_1);
// JacobianFactor f(_x1_, eye(2), _x2_, eye(2), _l1_, eye(2), ones(2), sigma0_1);
//
// std::set<Index> act1, act2, act3;
// f.tally_separator(_x1_, act1);
@ -673,7 +672,7 @@ void print(const list<T>& i) {
//}
/* ************************************************************************* */
TEST( GaussianFactor, CONSTRUCTOR_GaussianConditional )
TEST(GaussianFactor, CONSTRUCTOR_GaussianConditional )
{
Matrix R11 = eye(2);
Matrix S12 = Matrix_(2,2,
@ -685,39 +684,39 @@ TEST( GaussianFactor, CONSTRUCTOR_GaussianConditional )
GaussianConditional::shared_ptr CG(new GaussianConditional(_x2_,d,R11,_l11_,S12,sigmas));
// Call the constructor we are testing !
GaussianFactor actualLF(*CG);
JacobianFactor actualLF(*CG);
GaussianFactor expectedLF(_x2_,R11,_l11_,S12,d, sigmas);
JacobianFactor expectedLF(_x2_,R11,_l11_,S12,d, sigmas);
EXPECT(assert_equal(expectedLF,actualLF,1e-5));
}
///* ************************************************************************* */
//TEST( GaussianFactor, CONSTRUCTOR_GaussianConditionalConstrained )
//TEST(GaussianFactor, CONSTRUCTOR_GaussianConditionalConstrained )
//{
// Matrix Ax = eye(2);
// Vector b = Vector_(2, 3.0, 5.0);
// SharedDiagonal noisemodel = noiseModel::Constrained::All(2);
// GaussianFactor::shared_ptr expected(new GaussianFactor(_x0_, Ax, b, noisemodel));
// JacobianFactor::shared_ptr expected(new JacobianFactor(_x0_, Ax, b, noisemodel));
// GaussianFactorGraph graph;
// graph.push_back(expected);
//
// GaussianConditional::shared_ptr conditional = GaussianSequentialSolver::EliminateUntil(graph,_x0_+1);
// GaussianFactor actual(*conditional);
// JacobianFactor actual(*conditional);
//
// EXPECT(assert_equal(*expected, actual));
//}
/* ************************************************************************* */
TEST ( GaussianFactor, constraint_eliminate1 )
TEST ( JacobianFactor, constraint_eliminate1 )
{
// construct a linear constraint
Vector v(2); v(0)=1.2; v(1)=3.4;
Index key = _x0_;
GaussianFactor lc(key, eye(2), v, constraintModel);
JacobianFactor lc(key, eye(2), v, constraintModel);
// eliminate it
GaussianConditional::shared_ptr actualCG;
GaussianFactor::shared_ptr actualLF(new GaussianFactor(lc));
JacobianFactor::shared_ptr actualLF(new JacobianFactor(lc));
actualCG = actualLF->eliminateFirst();
// verify linear factor
@ -730,7 +729,7 @@ TEST ( GaussianFactor, constraint_eliminate1 )
}
/* ************************************************************************* */
TEST ( GaussianFactor, constraint_eliminate2 )
TEST ( JacobianFactor, constraint_eliminate2 )
{
// Construct a linear constraint
// RHS
@ -746,15 +745,15 @@ TEST ( GaussianFactor, constraint_eliminate2 )
A2(0,0) = 1.0 ; A2(0,1) = 2.0;
A2(1,0) = 2.0 ; A2(1,1) = 4.0;
GaussianFactor lc(_x_, A1, _y_, A2, b, constraintModel);
JacobianFactor lc(_x_, A1, _y_, A2, b, constraintModel);
// eliminate x and verify results
GaussianConditional::shared_ptr actualCG;
GaussianFactor::shared_ptr actualLF(new GaussianFactor(lc));
JacobianFactor::shared_ptr actualLF(new JacobianFactor(lc));
actualCG = actualLF->eliminateFirst();
// LF should be null
GaussianFactor expectedLF;
JacobianFactor expectedLF;
EXPECT(assert_equal(*actualLF, expectedLF));
// verify CG
@ -791,14 +790,14 @@ TEST(GaussianFactor, permuteWithInverse)
inversePermutation[4] = 1;
inversePermutation[5] = 0;
GaussianFactor actual(1, A1, 3, A2, 5, A3, b, sharedSigma(2, 1.0));
GaussianFactorGraph actualFG; actualFG.push_back(GaussianFactor::shared_ptr(new GaussianFactor(actual)));
JacobianFactor actual(1, A1, 3, A2, 5, A3, b, sharedSigma(2, 1.0));
GaussianFactorGraph actualFG; actualFG.push_back(JacobianFactor::shared_ptr(new JacobianFactor(actual)));
VariableIndex actualIndex(actualFG);
actual.permuteWithInverse(inversePermutation);
// actualIndex.permute(*inversePermutation.inverse());
GaussianFactor expected(0, A3, 2, A2, 4, A1, b, sharedSigma(2, 1.0));
GaussianFactorGraph expectedFG; expectedFG.push_back(GaussianFactor::shared_ptr(new GaussianFactor(expected)));
JacobianFactor expected(0, A3, 2, A2, 4, A1, b, sharedSigma(2, 1.0));
GaussianFactorGraph expectedFG; expectedFG.push_back(JacobianFactor::shared_ptr(new JacobianFactor(expected)));
// GaussianVariableIndex expectedIndex(expectedFG);
EXPECT(assert_equal(expected, actual));
@ -816,7 +815,7 @@ TEST(GaussianFactor, permuteWithInverse)
}
///* ************************************************************************* */
//TEST( GaussianFactor, erase)
//TEST(GaussianFactor, erase)
//{
// Vector b = Vector_(3, 1., 2., 3.);
// SharedDiagonal noise = noiseModel::Diagonal::Sigmas(Vector_(3,1.,1.,1.));
@ -824,16 +823,16 @@ TEST(GaussianFactor, permuteWithInverse)
// terms.push_back(make_pair(_x0_, eye(2)));
// terms.push_back(make_pair(_x1_, 2.*eye(2)));
//
// GaussianFactor actual(terms, b, noise);
// JacobianFactor actual(terms, b, noise);
// int erased = actual.erase_A(_x0_);
//
// LONGS_EQUAL(1, erased);
// GaussianFactor expected(_x1_, 2.*eye(2), b, noise);
// JacobianFactor expected(_x1_, 2.*eye(2), b, noise);
// EXPECT(assert_equal(expected, actual));
//}
///* ************************************************************************* */
//TEST( GaussianFactor, eliminateMatrix)
//TEST(GaussianFactor, eliminateMatrix)
//{
// Matrix Ab = Matrix_(3, 4,
// 1., 2., 0., 3.,
@ -847,10 +846,10 @@ TEST(GaussianFactor, permuteWithInverse)
// dimensions.insert(make_pair(_x2_, 1));
// dimensions.insert(make_pair(_x3_, 1));
//
// GaussianFactor::shared_ptr factor;
// JacobianFactor::shared_ptr factor;
// GaussianBayesNet bn;
// boost::tie(bn, factor) =
// GaussianFactor::eliminateMatrix(Ab, NULL, model, frontals, separator, dimensions);
// JacobianFactor::eliminateMatrix(Ab, NULL, model, frontals, separator, dimensions);
//
// GaussianBayesNet bn_expected;
// GaussianBayesNet::sharedConditional conditional1(new GaussianConditional(_x1_, Vector_(1, 6.), Matrix_(1, 1, 2.),
@ -861,7 +860,7 @@ TEST(GaussianFactor, permuteWithInverse)
// bn_expected.push_back(conditional2);
// EXPECT(assert_equal(bn_expected, bn));
//
// GaussianFactor factor_expected(_x3_, Matrix_(1, 1, 14.), Vector_(1, 16.), SharedDiagonal(Vector_(1, 1.)));
// JacobianFactor factor_expected(_x3_, Matrix_(1, 1, 14.), Vector_(1, 16.), SharedDiagonal(Vector_(1, 1.)));
// EXPECT(assert_equal(factor_expected, *factor));
//}

View File

@ -36,10 +36,10 @@ GaussianFactorGraph createChain() {
typedef GaussianFactorGraph::sharedFactor Factor;
SharedDiagonal model(Vector_(1, 0.5));
Factor factor1(new GaussianFactor(x2, Matrix_(1,1,1.), x1, Matrix_(1,1,1.), Vector_(1,1.), model));
Factor factor2(new GaussianFactor(x2, Matrix_(1,1,1.), x3, Matrix_(1,1,1.), Vector_(1,1.), model));
Factor factor3(new GaussianFactor(x3, Matrix_(1,1,1.), x4, Matrix_(1,1,1.), Vector_(1,1.), model));
Factor factor4(new GaussianFactor(x4, Matrix_(1,1,1.), Vector_(1,1.), model));
Factor factor1(new JacobianFactor(x2, Matrix_(1,1,1.), x1, Matrix_(1,1,1.), Vector_(1,1.), model));
Factor factor2(new JacobianFactor(x2, Matrix_(1,1,1.), x3, Matrix_(1,1,1.), Vector_(1,1.), model));
Factor factor3(new JacobianFactor(x3, Matrix_(1,1,1.), x4, Matrix_(1,1,1.), Vector_(1,1.), model));
Factor factor4(new JacobianFactor(x4, Matrix_(1,1,1.), Vector_(1,1.), model));
GaussianFactorGraph fg;
fg.push_back(factor1);

View File

@ -0,0 +1,210 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file testCholeskyFactor.cpp
* @brief
* @author Richard Roberts
* @created Dec 15, 2010
*/
#include <gtsam/linear/HessianFactor.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/linear/GaussianFactorGraph.h>
#include <vector>
#include <utility>
#include <gtsam/base/TestableAssertions.h>
#include <CppUnitLite/TestHarness.h>
using namespace gtsam;
using namespace std;
/* ************************************************************************* */
TEST(HessianFactor, ConversionConstructor) {
HessianFactor expected;
expected.keys_.push_back(0);
expected.keys_.push_back(1);
size_t dims[] = { 2, 4, 1 };
expected.info_.resize(dims, dims+3, false);
expected.matrix_ = Matrix_(7,7,
125.0000, 0.0, -25.0000, 0.0, -100.0000, 0.0, 25.0000,
0.0, 125.0000, 0.0, -25.0000, 0.0, -100.0000, -17.5000,
-25.0000, 0.0, 25.0000, 0.0, 0.0, 0.0, -5.0000,
0.0, -25.0000, 0.0, 25.0000, 0.0, 0.0, 7.5000,
-100.0000, 0.0, 0.0, 0.0, 100.0000, 0.0, -20.0000,
0.0, -100.0000, 0.0, 0.0, 0.0, 100.0000, 10.0000,
25.0000, -17.5000, -5.0000, 7.5000, -20.0000, 10.0000, 8.2500);
// sigmas
double sigma1 = 0.2;
double sigma2 = 0.1;
Vector sigmas = Vector_(4, sigma1, sigma1, sigma2, sigma2);
// the combined linear factor
Matrix Ax2 = Matrix_(4,2,
// x2
-1., 0.,
+0.,-1.,
1., 0.,
+0.,1.
);
Matrix Al1x1 = Matrix_(4,4,
// l1 x1
1., 0., 0.00, 0., // f4
0., 1., 0.00, 0., // f4
0., 0., -1., 0., // f2
0., 0., 0.00,-1. // f2
);
// the RHS
Vector b2(4);
b2(0) = -0.2;
b2(1) = 0.3;
b2(2) = 0.2;
b2(3) = -0.1;
vector<pair<Index, Matrix> > meas;
meas.push_back(make_pair(0, Ax2));
meas.push_back(make_pair(1, Al1x1));
JacobianFactor combined(meas, b2, sigmas);
HessianFactor actual(combined);
EXPECT(assert_equal(expected, actual, 1e-9));
}
/* ************************************************************************* */
TEST_UNSAFE(GaussianFactor, CombineAndEliminate)
{
Matrix A01 = Matrix_(3,3,
1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0);
Vector b0 = Vector_(3, 1.5, 1.5, 1.5);
Vector s0 = Vector_(3, 1.6, 1.6, 1.6);
Matrix A10 = Matrix_(3,3,
2.0, 0.0, 0.0,
0.0, 2.0, 0.0,
0.0, 0.0, 2.0);
Matrix A11 = Matrix_(3,3,
-2.0, 0.0, 0.0,
0.0, -2.0, 0.0,
0.0, 0.0, -2.0);
Vector b1 = Vector_(3, 2.5, 2.5, 2.5);
Vector s1 = Vector_(3, 2.6, 2.6, 2.6);
Matrix A21 = Matrix_(3,3,
3.0, 0.0, 0.0,
0.0, 3.0, 0.0,
0.0, 0.0, 3.0);
Vector b2 = Vector_(3, 3.5, 3.5, 3.5);
Vector s2 = Vector_(3, 3.6, 3.6, 3.6);
GaussianFactorGraph gfg;
gfg.add(1, A01, b0, noiseModel::Diagonal::Sigmas(s0, true));
gfg.add(0, A10, 1, A11, b1, noiseModel::Diagonal::Sigmas(s1, true));
gfg.add(1, A21, b2, noiseModel::Diagonal::Sigmas(s2, true));
Matrix zero3x3 = zeros(3,3);
Matrix A0 = gtsam::stack(3, &A10, &zero3x3, &zero3x3);
Matrix A1 = gtsam::stack(3, &A11, &A01, &A21);
Vector b = gtsam::concatVectors(3, &b1, &b0, &b2);
Vector sigmas = gtsam::concatVectors(3, &s1, &s0, &s2);
JacobianFactor expectedFactor(0, A0, 1, A1, b, noiseModel::Diagonal::Sigmas(sigmas, true));
GaussianBayesNet expectedBN(*expectedFactor.eliminate(1));
pair<GaussianBayesNet::shared_ptr, HessianFactor::shared_ptr> actualCholesky(HessianFactor::CombineAndEliminate(
*gfg.convertCastFactors<FactorGraph<HessianFactor> >(), 1));
EXPECT(assert_equal(expectedBN, *actualCholesky.first, 1e-6));
EXPECT(assert_equal(HessianFactor(expectedFactor), *actualCholesky.second, 1e-6));
}
/* ************************************************************************* */
TEST(GaussianFactor, eliminate2 )
{
// sigmas
double sigma1 = 0.2;
double sigma2 = 0.1;
Vector sigmas = Vector_(4, sigma1, sigma1, sigma2, sigma2);
// the combined linear factor
Matrix Ax2 = Matrix_(4,2,
// x2
-1., 0.,
+0.,-1.,
1., 0.,
+0.,1.
);
Matrix Al1x1 = Matrix_(4,4,
// l1 x1
1., 0., 0.00, 0., // f4
0., 1., 0.00, 0., // f4
0., 0., -1., 0., // f2
0., 0., 0.00,-1. // f2
);
// the RHS
Vector b2(4);
b2(0) = -0.2;
b2(1) = 0.3;
b2(2) = 0.2;
b2(3) = -0.1;
vector<pair<Index, Matrix> > meas;
meas.push_back(make_pair(0, Ax2));
meas.push_back(make_pair(1, Al1x1));
JacobianFactor combined(meas, b2, sigmas);
// eliminate the combined factor
HessianFactor::shared_ptr combinedLF_Chol(new HessianFactor(combined));
FactorGraph<HessianFactor> combinedLFG_Chol;
combinedLFG_Chol.push_back(combinedLF_Chol);
pair<GaussianBayesNet::shared_ptr, HessianFactor::shared_ptr> actual_Chol =
HessianFactor::CombineAndEliminate(combinedLFG_Chol, 1);
// create expected Conditional Gaussian
double oldSigma = 0.0894427; // from when R was made unit
Matrix R11 = Matrix_(2,2,
1.00, 0.00,
0.00, 1.00
)/oldSigma;
Matrix S12 = Matrix_(2,4,
-0.20, 0.00,-0.80, 0.00,
+0.00,-0.20,+0.00,-0.80
)/oldSigma;
Vector d = Vector_(2,0.2,-0.14)/oldSigma;
GaussianConditional expectedCG(0,d,R11,1,S12,ones(2));
EXPECT(assert_equal(expectedCG,*actual_Chol.first->front(),1e-4));
// the expected linear factor
double sigma = 0.2236;
Matrix Bl1x1 = Matrix_(2,4,
// l1 x1
1.00, 0.00, -1.00, 0.00,
0.00, 1.00, +0.00, -1.00
)/sigma;
Vector b1 = Vector_(2,0.0,0.894427);
JacobianFactor expectedLF(1, Bl1x1, b1, repeat(2,1.0));
EXPECT(assert_equal(HessianFactor(expectedLF), *actual_Chol.second, 1.5e-3));
}
/* ************************************************************************* */
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
/* ************************************************************************* */

View File

@ -27,7 +27,7 @@
using namespace gtsam;
using namespace std;
typedef EliminationTree<GaussianFactor> GaussianEliminationTree;
typedef EliminationTree<JacobianFactor> GaussianEliminationTree;
static boost::variate_generator<boost::mt19937, boost::uniform_real<> > rg(boost::mt19937(), boost::uniform_real<>(0.0, 1.0));
@ -70,7 +70,7 @@ int main(int argc, char *argv[]) {
Vector b(blockdim);
for(size_t j=0; j<blockdim; ++j)
b(j) = rg();
blockGfgs[trial].push_back(GaussianFactor::shared_ptr(new GaussianFactor(key, A, b, noise)));
blockGfgs[trial].push_back(JacobianFactor::shared_ptr(new JacobianFactor(key, A, b, noise)));
}
}
blockbuild = timer.elapsed();
@ -115,7 +115,7 @@ int main(int argc, char *argv[]) {
for(size_t j=0; j<blockdim; ++j)
bcomb(blockdim*i+j) = rg();
}
combGfgs[trial].push_back(GaussianFactor::shared_ptr(new GaussianFactor(key, Acomb, bcomb,
combGfgs[trial].push_back(JacobianFactor::shared_ptr(new JacobianFactor(key, Acomb, bcomb,
sharedSigma(blockdim*nBlocks, 1.0))));
}
combbuild = timer.elapsed();

View File

@ -11,7 +11,7 @@
/**
* @file timeGaussianFactor.cpp
* @brief time GaussianFactor.eliminate
* @brief time JacobianFactor.eliminate
* @author Alireza Fathi
*/
@ -27,8 +27,10 @@ using namespace std;
#include <boost/assign/std/list.hpp> // for operator += in Ordering
#include <gtsam/base/Matrix.h>
#include <gtsam/linear/GaussianFactor.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/linear/GaussianConditional.h>
#include <gtsam/linear/SharedDiagonal.h>
using namespace gtsam;
using namespace boost::assign;
@ -101,14 +103,14 @@ int main()
b2(7) = -1;
// time eliminate
GaussianFactor combined(_x2_, Ax2, _l1_, Al1, _x1_, Ax1, b2,sharedSigma(8,1));
JacobianFactor combined(_x2_, Ax2, _l1_, Al1, _x1_, Ax1, b2, sharedSigma(8,1));
long timeLog = clock();
int n = 1000000;
GaussianConditional::shared_ptr conditional;
GaussianFactor::shared_ptr factor;
JacobianFactor::shared_ptr factor;
for(int i = 0; i < n; i++)
conditional = GaussianFactor(combined).eliminateFirst();
conditional = JacobianFactor(combined).eliminateFirst();
long timeLog2 = clock();
double seconds = (double)(timeLog2-timeLog)/CLOCKS_PER_SEC;

View File

@ -31,7 +31,7 @@ using namespace gtsam;
using namespace std;
using namespace boost::lambda;
typedef EliminationTree<GaussianFactor> GaussianEliminationTree;
typedef EliminationTree<JacobianFactor> GaussianEliminationTree;
static boost::variate_generator<boost::mt19937, boost::uniform_real<> > rg(boost::mt19937(), boost::uniform_real<>(0.0, 1.0));
@ -103,7 +103,7 @@ int main(int argc, char *argv[]) {
for(size_t j=0; j<blockdim; ++j)
b(j) = rg();
if(!terms.empty())
blockGfgs[trial].push_back(GaussianFactor::shared_ptr(new GaussianFactor(terms, b, noise)));
blockGfgs[trial].push_back(JacobianFactor::shared_ptr(new JacobianFactor(terms, b, noise)));
}
}

View File

@ -125,13 +125,13 @@ namespace gtsam {
}
// Linearize is over-written, because base linearization tries to whiten
virtual boost::shared_ptr<GaussianFactor> linearize(const VALUES& x, const Ordering& ordering) const {
virtual JacobianFactor::shared_ptr linearize(const VALUES& x, const Ordering& ordering) const {
const T& xj = x[this->key_];
Matrix A;
Vector b = evaluateError(xj, A);
// TODO pass unwhitened + noise model to Gaussian factor
SharedDiagonal model = noiseModel::Constrained::All(b.size());
return GaussianFactor::shared_ptr(new GaussianFactor(ordering[this->key_], A, b, model));
return JacobianFactor::shared_ptr(new JacobianFactor(ordering[this->key_], A, b, model));
}
}; // NonlinearEquality

View File

@ -31,6 +31,8 @@
#include <gtsam/base/Matrix.h>
#include <gtsam/linear/SharedGaussian.h>
#include <gtsam/linear/GaussianFactor.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/nonlinear/Ordering.h>
// FIXME: is this necessary? These don't even fit the right format
@ -131,7 +133,7 @@ namespace gtsam {
}
/** linearize to a GaussianFactor */
virtual boost::shared_ptr<GaussianFactor>
virtual boost::shared_ptr<JacobianFactor>
linearize(const VALUES& c, const Ordering& ordering) const = 0;
/**
@ -221,7 +223,7 @@ namespace gtsam {
* Ax-b \approx h(x0+dx)-z = h(x0) + A*dx - z
* Hence b = z - h(x0) = - error_vector(x)
*/
virtual boost::shared_ptr<GaussianFactor> linearize(const VALUES& x, const Ordering& ordering) const {
virtual boost::shared_ptr<JacobianFactor> linearize(const VALUES& x, const Ordering& ordering) const {
const X& xj = x[key_];
Matrix A;
Vector b = - evaluateError(xj, A);
@ -230,10 +232,10 @@ namespace gtsam {
SharedDiagonal constrained =
boost::shared_dynamic_cast<noiseModel::Constrained>(this->noiseModel_);
if (constrained.get() != NULL)
return GaussianFactor::shared_ptr(new GaussianFactor(var, A, b, constrained));
return JacobianFactor::shared_ptr(new JacobianFactor(var, A, b, constrained));
this->noiseModel_->WhitenInPlace(A);
this->noiseModel_->whitenInPlace(b);
return GaussianFactor::shared_ptr(new GaussianFactor(var, A, b,
return JacobianFactor::shared_ptr(new JacobianFactor(var, A, b,
noiseModel::Unit::Create(b.size())));
}
@ -333,7 +335,7 @@ namespace gtsam {
* Ax-b \approx h(x1+dx1,x2+dx2)-z = h(x1,x2) + A2*dx1 + A2*dx2 - z
* Hence b = z - h(x1,x2) = - error_vector(x)
*/
boost::shared_ptr<GaussianFactor> linearize(const VALUES& c, const Ordering& ordering) const {
boost::shared_ptr<JacobianFactor> linearize(const VALUES& c, const Ordering& ordering) const {
const X1& x1 = c[key1_];
const X2& x2 = c[key2_];
Matrix A1, A2;
@ -343,17 +345,17 @@ namespace gtsam {
SharedDiagonal constrained =
boost::shared_dynamic_cast<noiseModel::Constrained>(this->noiseModel_);
if (constrained.get() != NULL) {
return GaussianFactor::shared_ptr(new GaussianFactor(var1, A1, var2,
return JacobianFactor::shared_ptr(new JacobianFactor(var1, A1, var2,
A2, b, constrained));
}
this->noiseModel_->WhitenInPlace(A1);
this->noiseModel_->WhitenInPlace(A2);
this->noiseModel_->whitenInPlace(b);
if(var1 < var2)
return GaussianFactor::shared_ptr(new GaussianFactor(var1, A1, var2,
return JacobianFactor::shared_ptr(new JacobianFactor(var1, A1, var2,
A2, b, noiseModel::Unit::Create(b.size())));
else
return GaussianFactor::shared_ptr(new GaussianFactor(var2, A2, var1,
return JacobianFactor::shared_ptr(new JacobianFactor(var2, A2, var1,
A1, b, noiseModel::Unit::Create(b.size())));
}
@ -474,7 +476,7 @@ namespace gtsam {
* Ax-b \approx h(x1+dx1,x2+dx2,x3+dx3)-z = h(x1,x2,x3) + A2*dx1 + A2*dx2 + A3*dx3 - z
* Hence b = z - h(x1,x2,x3) = - error_vector(x)
*/
boost::shared_ptr<GaussianFactor> linearize(const VALUES& c, const Ordering& ordering) const {
boost::shared_ptr<JacobianFactor> linearize(const VALUES& c, const Ordering& ordering) const {
const X1& x1 = c[key1_];
const X2& x2 = c[key2_];
const X3& x3 = c[key3_];
@ -485,34 +487,34 @@ namespace gtsam {
SharedDiagonal constrained =
boost::shared_dynamic_cast<noiseModel::Constrained>(this->noiseModel_);
if (constrained.get() != NULL) {
return GaussianFactor::shared_ptr(
new GaussianFactor(var1, A1, var2, A2, var3, A3, b, constrained));
return JacobianFactor::shared_ptr(
new JacobianFactor(var1, A1, var2, A2, var3, A3, b, constrained));
}
this->noiseModel_->WhitenInPlace(A1);
this->noiseModel_->WhitenInPlace(A2);
this->noiseModel_->WhitenInPlace(A3);
this->noiseModel_->whitenInPlace(b);
if(var1 < var2 && var2 < var3)
return GaussianFactor::shared_ptr(
new GaussianFactor(var1, A1, var2, A2, var3, A3, b, noiseModel::Unit::Create(b.size())));
return JacobianFactor::shared_ptr(
new JacobianFactor(var1, A1, var2, A2, var3, A3, b, noiseModel::Unit::Create(b.size())));
else if(var2 < var1 && var1 < var3)
return GaussianFactor::shared_ptr(
new GaussianFactor(var2, A2, var1, A1, var3, A3, b, noiseModel::Unit::Create(b.size())));
return JacobianFactor::shared_ptr(
new JacobianFactor(var2, A2, var1, A1, var3, A3, b, noiseModel::Unit::Create(b.size())));
else if(var1 < var3 && var3 < var2)
return GaussianFactor::shared_ptr(
new GaussianFactor(var1, A1, var3, A3, var2, A2, b, noiseModel::Unit::Create(b.size())));
return JacobianFactor::shared_ptr(
new JacobianFactor(var1, A1, var3, A3, var2, A2, b, noiseModel::Unit::Create(b.size())));
else if(var2 < var3 && var3 < var1)
return GaussianFactor::shared_ptr(
new GaussianFactor(var2, A2, var3, A3, var1, A1, b, noiseModel::Unit::Create(b.size())));
return JacobianFactor::shared_ptr(
new JacobianFactor(var2, A2, var3, A3, var1, A1, b, noiseModel::Unit::Create(b.size())));
else if(var3 < var1 && var1 < var2)
return GaussianFactor::shared_ptr(
new GaussianFactor(var3, A3, var1, A1, var2, A2, b, noiseModel::Unit::Create(b.size())));
return JacobianFactor::shared_ptr(
new JacobianFactor(var3, A3, var1, A1, var2, A2, b, noiseModel::Unit::Create(b.size())));
else if(var3 < var2 && var2 < var1)
return GaussianFactor::shared_ptr(
new GaussianFactor(var3, A3, var2, A2, var1, A1, b, noiseModel::Unit::Create(b.size())));
return JacobianFactor::shared_ptr(
new JacobianFactor(var3, A3, var2, A2, var1, A1, b, noiseModel::Unit::Create(b.size())));
else {
assert(false);
return GaussianFactor::shared_ptr();
return JacobianFactor::shared_ptr();
}
}

View File

@ -120,16 +120,16 @@ void NonlinearFactorGraph<VALUES>::print(const std::string& str) const {
/* ************************************************************************* */
template<class VALUES>
boost::shared_ptr<GaussianFactorGraph> NonlinearFactorGraph<VALUES>::linearize(
typename FactorGraph<JacobianFactor>::shared_ptr NonlinearFactorGraph<VALUES>::linearize(
const VALUES& config, const Ordering& ordering) const {
// create an empty linear FG
GaussianFactorGraph::shared_ptr linearFG(new GaussianFactorGraph);
typename FactorGraph<JacobianFactor>::shared_ptr linearFG(new FactorGraph<JacobianFactor>);
linearFG->reserve(this->size());
// linearize all factors
BOOST_FOREACH(const sharedFactor& factor, this->factors_) {
boost::shared_ptr<GaussianFactor> lf = factor->linearize(config, ordering);
JacobianFactor::shared_ptr lf = factor->linearize(config, ordering);
if (lf) linearFG->push_back(lf);
}

View File

@ -93,7 +93,7 @@ namespace gtsam {
/**
* linearize a nonlinear factor graph
*/
boost::shared_ptr<GaussianFactorGraph>
boost::shared_ptr<FactorGraph<JacobianFactor> >
linearize(const VALUES& config, const Ordering& ordering) const;

View File

@ -51,7 +51,8 @@ void NonlinearISAM<Values>::update(const Factors& newFactors, const Values& init
}
}
boost::shared_ptr<GaussianFactorGraph> linearizedNewFactors(newFactors.linearize(linPoint_, ordering_));
boost::shared_ptr<GaussianFactorGraph> linearizedNewFactors(
newFactors.linearize(linPoint_, ordering_)->template dynamicCastFactors<GaussianFactorGraph>());
// Update ISAM
isam_.update(*linearizedNewFactors);
@ -73,7 +74,8 @@ void NonlinearISAM<Values>::reorder_relinearize() {
ordering_ = *factors_.orderingCOLAMD(newLinPoint);
// Create a linear factor graph at the new linearization point
boost::shared_ptr<GaussianFactorGraph> gfg(factors_.linearize(newLinPoint, ordering_));
boost::shared_ptr<GaussianFactorGraph> gfg(
factors_.linearize(newLinPoint, ordering_)->template dynamicCastFactors<GaussianFactorGraph>());
// Just recreate the whole BayesTree
isam_.update(*gfg);

View File

@ -71,7 +71,7 @@ namespace gtsam {
/* ************************************************************************* */
template<class G, class C, class L, class S, class W>
VectorValues NonlinearOptimizer<G, C, L, S, W>::linearizeAndOptimizeForDelta() const {
boost::shared_ptr<L> linearized = graph_->linearize(*values_, *ordering_);
boost::shared_ptr<L> linearized = graph_->linearize(*values_, *ordering_)->template dynamicCastFactors<L>();
// NonlinearOptimizer prepared(graph_, values_, ordering_, error_, lambda_);
return *S(*linearized).optimize();
}
@ -84,7 +84,7 @@ namespace gtsam {
NonlinearOptimizer<G, C, L, S, W> NonlinearOptimizer<G, C, L, S, W>::iterate() const {
Parameters::verbosityLevel verbosity = parameters_->verbosity_ ;
boost::shared_ptr<L> linearized = graph_->linearize(*values_, *ordering_);
boost::shared_ptr<L> linearized = graph_->linearize(*values_, *ordering_)->template dynamicCastFactors<L>();
shared_solver newSolver = solver_;
if(newSolver) newSolver->replaceFactors(linearized);
@ -167,7 +167,7 @@ namespace gtsam {
Matrix A = eye(dim);
Vector b = zero(dim);
SharedDiagonal model = noiseModel::Isotropic::Sigma(dim,sigma);
GaussianFactor::shared_ptr prior(new GaussianFactor(j, A, b, model));
GaussianFactor::shared_ptr prior(new JacobianFactor(j, A, b, model));
damped->push_back(prior);
}
}
@ -225,7 +225,7 @@ namespace gtsam {
if (verbosity >= Parameters::LAMBDA) cout << "lambda = " << lambda << endl;
// linearize all factors once
boost::shared_ptr<L> linear = graph_->linearize(*values_, *ordering_);
boost::shared_ptr<L> linear = graph_->linearize(*values_, *ordering_)->template dynamicCastFactors<L>();
if (verbosity >= Parameters::LINEAR) linear->print("linear");

View File

@ -134,7 +134,7 @@ namespace example {
}
/* ************************************************************************* */
GaussianFactorGraph createGaussianFactorGraph(const Ordering& ordering) {
FactorGraph<JacobianFactor> createGaussianFactorGraph(const Ordering& ordering) {
Matrix I = eye(2);
// Create empty graph
@ -273,7 +273,7 @@ namespace example {
}
/* ************************************************************************* */
pair<GaussianFactorGraph, Ordering> createSmoother(int T, boost::optional<Ordering> ordering) {
pair<FactorGraph<JacobianFactor>, Ordering> createSmoother(int T, boost::optional<Ordering> ordering) {
Graph nlfg;
Values poses;
boost::tie(nlfg, poses) = createNonlinearSmoother(T);
@ -283,14 +283,14 @@ namespace example {
}
/* ************************************************************************* */
GaussianFactorGraph createSimpleConstraintGraph() {
FactorGraph<JacobianFactor> createSimpleConstraintGraph() {
// create unary factor
// prior on _x_, mean = [1,-1], sigma=0.1
Matrix Ax = eye(2);
Vector b1(2);
b1(0) = 1.0;
b1(1) = -1.0;
GaussianFactor::shared_ptr f1(new GaussianFactor(_x_, Ax, b1, sigma0_1));
GaussianFactor::shared_ptr f1(new JacobianFactor(_x_, Ax, b1, sigma0_1));
// create binary constraint factor
// between _x_ and _y_, that is going to be the only factor on _y_
@ -299,11 +299,11 @@ namespace example {
Matrix Ax1 = eye(2);
Matrix Ay1 = eye(2) * -1;
Vector b2 = Vector_(2, 0.0, 0.0);
GaussianFactor::shared_ptr f2(new GaussianFactor(_x_, Ax1, _y_, Ay1, b2,
GaussianFactor::shared_ptr f2(new JacobianFactor(_x_, Ax1, _y_, Ay1, b2,
constraintModel));
// construct the graph
GaussianFactorGraph fg;
FactorGraph<JacobianFactor> fg;
fg.push_back(f1);
fg.push_back(f2);
@ -320,15 +320,15 @@ namespace example {
}
/* ************************************************************************* */
GaussianFactorGraph createSingleConstraintGraph() {
FactorGraph<JacobianFactor> createSingleConstraintGraph() {
// create unary factor
// prior on _x_, mean = [1,-1], sigma=0.1
Matrix Ax = eye(2);
Vector b1(2);
b1(0) = 1.0;
b1(1) = -1.0;
//GaussianFactor::shared_ptr f1(new GaussianFactor(_x_, sigma0_1->Whiten(Ax), sigma0_1->whiten(b1), sigma0_1));
GaussianFactor::shared_ptr f1(new GaussianFactor(_x_, Ax, b1, sigma0_1));
//GaussianFactor::shared_ptr f1(new JacobianFactor(_x_, sigma0_1->Whiten(Ax), sigma0_1->whiten(b1), sigma0_1));
GaussianFactor::shared_ptr f1(new JacobianFactor(_x_, Ax, b1, sigma0_1));
// create binary constraint factor
// between _x_ and _y_, that is going to be the only factor on _y_
@ -341,11 +341,11 @@ namespace example {
Ax1(1, 1) = 1.0;
Matrix Ay1 = eye(2) * 10;
Vector b2 = Vector_(2, 1.0, 2.0);
GaussianFactor::shared_ptr f2(new GaussianFactor(_x_, Ax1, _y_, Ay1, b2,
GaussianFactor::shared_ptr f2(new JacobianFactor(_x_, Ax1, _y_, Ay1, b2,
constraintModel));
// construct the graph
GaussianFactorGraph fg;
FactorGraph<JacobianFactor> fg;
fg.push_back(f1);
fg.push_back(f2);
@ -361,11 +361,11 @@ namespace example {
}
/* ************************************************************************* */
GaussianFactorGraph createMultiConstraintGraph() {
FactorGraph<JacobianFactor> createMultiConstraintGraph() {
// unary factor 1
Matrix A = eye(2);
Vector b = Vector_(2, -2.0, 2.0);
GaussianFactor::shared_ptr lf1(new GaussianFactor(_x_, A, b, sigma0_1));
GaussianFactor::shared_ptr lf1(new JacobianFactor(_x_, A, b, sigma0_1));
// constraint 1
Matrix A11(2, 2);
@ -383,7 +383,7 @@ namespace example {
Vector b1(2);
b1(0) = 1.0;
b1(1) = 2.0;
GaussianFactor::shared_ptr lc1(new GaussianFactor(_x_, A11, _y_, A12, b1,
GaussianFactor::shared_ptr lc1(new JacobianFactor(_x_, A11, _y_, A12, b1,
constraintModel));
// constraint 2
@ -402,11 +402,11 @@ namespace example {
Vector b2(2);
b2(0) = 3.0;
b2(1) = 4.0;
GaussianFactor::shared_ptr lc2(new GaussianFactor(_x_, A21, _z_, A22, b2,
GaussianFactor::shared_ptr lc2(new JacobianFactor(_x_, A21, _z_, A22, b2,
constraintModel));
// construct the graph
GaussianFactorGraph fg;
FactorGraph<JacobianFactor> fg;
fg.push_back(lf1);
fg.push_back(lc1);
fg.push_back(lc2);
@ -431,7 +431,7 @@ namespace example {
}
/* ************************************************************************* */
boost::tuple<GaussianFactorGraph, Ordering, VectorValues> planarGraph(size_t N) {
boost::tuple<FactorGraph<JacobianFactor>, Ordering, VectorValues> planarGraph(size_t N) {
// create empty graph
NonlinearFactorGraph<Values> nlfg;
@ -481,9 +481,9 @@ namespace example {
}
/* ************************************************************************* */
pair<GaussianFactorGraph, GaussianFactorGraph> splitOffPlanarTree(size_t N,
const GaussianFactorGraph& original) {
GaussianFactorGraph T, C;
pair<FactorGraph<JacobianFactor>, FactorGraph<JacobianFactor> > splitOffPlanarTree(size_t N,
const FactorGraph<JacobianFactor>& original) {
FactorGraph<JacobianFactor> T, C;
// Add the x11 constraint to the tree
T.push_back(original[0]);

View File

@ -69,7 +69,7 @@ namespace gtsam {
* create a linear factor graph
* The non-linear graph above evaluated at NoisyValues
*/
GaussianFactorGraph createGaussianFactorGraph(const Ordering& ordering);
FactorGraph<JacobianFactor> createGaussianFactorGraph(const Ordering& ordering);
/**
* create small Chordal Bayes Net x <- y
@ -93,7 +93,7 @@ namespace gtsam {
* Create a Kalman smoother by linearizing a non-linear factor graph
* @param T number of time-steps
*/
std::pair<GaussianFactorGraph, Ordering> createSmoother(int T, boost::optional<Ordering> ordering = boost::none);
std::pair<FactorGraph<JacobianFactor>, Ordering> createSmoother(int T, boost::optional<Ordering> ordering = boost::none);
/* ******************************************************* */
// Linear Constrained Examples
@ -103,21 +103,21 @@ namespace gtsam {
* Creates a simple constrained graph with one linear factor and
* one binary equality constraint that sets x = y
*/
GaussianFactorGraph createSimpleConstraintGraph();
FactorGraph<JacobianFactor> createSimpleConstraintGraph();
VectorValues createSimpleConstraintValues();
/**
* Creates a simple constrained graph with one linear factor and
* one binary constraint.
*/
GaussianFactorGraph createSingleConstraintGraph();
FactorGraph<JacobianFactor> createSingleConstraintGraph();
VectorValues createSingleConstraintValues();
/**
* Creates a constrained graph with a linear factor and two
* binary constraints that share a node
*/
GaussianFactorGraph createMultiConstraintGraph();
FactorGraph<JacobianFactor> createMultiConstraintGraph();
VectorValues createMultiConstraintValues();
/* ******************************************************* */
@ -133,7 +133,7 @@ namespace gtsam {
* -x11-x21-x31
* with x11 clamped at (1,1), and others related by 2D odometry.
*/
boost::tuple<GaussianFactorGraph, Ordering, VectorValues> planarGraph(size_t N);
boost::tuple<FactorGraph<JacobianFactor>, Ordering, VectorValues> planarGraph(size_t N);
/*
* Create canonical ordering for planar graph that also works for tree
@ -150,8 +150,8 @@ namespace gtsam {
* |
* -x11-x21-x31
*/
std::pair<GaussianFactorGraph, GaussianFactorGraph> splitOffPlanarTree(
size_t N, const GaussianFactorGraph& original);
std::pair<FactorGraph<JacobianFactor>, FactorGraph<JacobianFactor> > splitOffPlanarTree(
size_t N, const FactorGraph<JacobianFactor>& original);
} // example
} // gtsam

View File

@ -52,7 +52,7 @@ TEST( Pose2Factor, error )
// Actual linearization
Ordering ordering(*x0.orderingArbitrary());
boost::shared_ptr<GaussianFactor> linear = factor.linearize(x0, ordering);
boost::shared_ptr<JacobianFactor> linear = factor.linearize(x0, ordering);
// Check error at x0, i.e. delta = zero !
VectorValues delta(x0.dims(ordering));
@ -60,7 +60,7 @@ TEST( Pose2Factor, error )
delta[ordering["x2"]] = zero(3);
Vector error_at_zero = Vector_(3,0.0,0.0,0.0);
CHECK(assert_equal(error_at_zero,factor.unwhitenedError(x0)));
CHECK(assert_equal(-error_at_zero,linear->error_vector(delta)));
CHECK(assert_equal(-error_at_zero, linear->error_vector(delta)));
// Check error after increasing p2
VectorValues plus = delta;
@ -88,7 +88,7 @@ TEST( Pose2Factor, rhs )
// Actual linearization
Ordering ordering(*x0.orderingArbitrary());
boost::shared_ptr<GaussianFactor> linear = factor.linearize(x0, ordering);
boost::shared_ptr<JacobianFactor> linear = factor.linearize(x0, ordering);
// Check RHS
Pose2 hx0 = p1.between(p2);
@ -131,10 +131,10 @@ TEST( Pose2Factor, linearize )
// expected linear factor
Ordering ordering(*x0.orderingArbitrary());
SharedDiagonal probModel1 = noiseModel::Unit::Create(3);
GaussianFactor expected(ordering["x1"], expectedH1, ordering["x2"], expectedH2, expected_b, probModel1);
JacobianFactor expected(ordering["x1"], expectedH1, ordering["x2"], expectedH2, expected_b, probModel1);
// Actual linearization
boost::shared_ptr<GaussianFactor> actual = factor.linearize(x0, ordering);
boost::shared_ptr<JacobianFactor> actual = factor.linearize(x0, ordering);
CHECK(assert_equal(expected,*actual));
// Numerical do not work out because BetweenFactor is approximate ?

View File

@ -44,7 +44,7 @@ TEST( Pose2Prior, error )
// Actual linearization
Ordering ordering(*x0.orderingArbitrary());
boost::shared_ptr<GaussianFactor> linear = factor.linearize(x0, ordering);
boost::shared_ptr<JacobianFactor> linear = factor.linearize(x0, ordering);
// Check error at x0, i.e. delta = zero !
VectorValues delta(x0.dims(ordering));
@ -86,7 +86,7 @@ TEST( Pose2Prior, linearize )
// Actual linearization
Ordering ordering(*x0.orderingArbitrary());
boost::shared_ptr<GaussianFactor> actual = factor.linearize(x0, ordering);
boost::shared_ptr<JacobianFactor> actual = factor.linearize(x0, ordering);
// Test with numerical derivative
Matrix numericalH = numericalDerivative11(h, prior, 1e-5);

View File

@ -74,11 +74,11 @@ TEST( Pose2Graph, linearization )
config.insert(2,p2);
// Linearize
Ordering ordering(*config.orderingArbitrary());
boost::shared_ptr<GaussianFactorGraph> lfg_linearized = graph.linearize(config, ordering);
boost::shared_ptr<FactorGraph<JacobianFactor> > lfg_linearized = graph.linearize(config, ordering);
//lfg_linearized->print("lfg_actual");
// the expected linear factor
GaussianFactorGraph lfg_expected;
FactorGraph<JacobianFactor> lfg_expected;
Matrix A1 = Matrix_(3,3,
0.0,-2.0, -4.2,
2.0, 0.0, -4.2,
@ -91,7 +91,7 @@ TEST( Pose2Graph, linearization )
Vector b = Vector_(3,-0.1/sx,0.1/sy,0.0);
SharedDiagonal probModel1 = noiseModel::Unit::Create(3);
lfg_expected.add(ordering["x1"], A1, ordering["x2"], A2, b, probModel1);
lfg_expected.push_back(JacobianFactor::shared_ptr(new JacobianFactor(ordering["x1"], A1, ordering["x2"], A2, b, probModel1)));
CHECK(assert_equal(lfg_expected, *lfg_linearized));
}

View File

@ -17,6 +17,7 @@
#define GTSAM_MAGIC_KEY
#include <gtsam/inference/FactorGraph-inl.h>
#include <gtsam/slam/visualSLAM.h>
#include <gtsam/geometry/Point3.h>
#include <gtsam/geometry/Pose3.h>
@ -65,16 +66,16 @@ TEST( ProjectionFactor, error )
Matrix Al1 = Matrix_(2, 3, 61.584, 0., 0., 0., 61.584, 0.);
Vector b = Vector_(2,3.,0.);
SharedDiagonal probModel1 = noiseModel::Unit::Create(2);
GaussianFactor expected(ordering["x1"], Ax1, ordering["l1"], Al1, b, probModel1);
GaussianFactor::shared_ptr actual = factor->linearize(config, ordering);
JacobianFactor expected(ordering["x1"], Ax1, ordering["l1"], Al1, b, probModel1);
JacobianFactor::shared_ptr actual = factor->linearize(config, ordering);
CHECK(assert_equal(expected,*actual,1e-3));
// linearize graph
Graph graph;
graph.push_back(factor);
GaussianFactorGraph expected_lfg;
FactorGraph<JacobianFactor> expected_lfg;
expected_lfg.push_back(actual);
boost::shared_ptr<GaussianFactorGraph> actual_lfg = graph.linearize(config, ordering);
boost::shared_ptr<FactorGraph<JacobianFactor> > actual_lfg = graph.linearize(config, ordering);
CHECK(assert_equal(expected_lfg,*actual_lfg));
// expmap on a config

View File

@ -49,13 +49,13 @@ TEST( GaussianFactor, linearFactor )
Matrix I = eye(2);
Vector b = Vector_(2, 2.0, -1.0);
GaussianFactor expected(ordering["x1"], -10*I,ordering["x2"], 10*I, b, noiseModel::Unit::Create(2));
JacobianFactor expected(ordering["x1"], -10*I,ordering["x2"], 10*I, b, noiseModel::Unit::Create(2));
// create a small linear factor graph
GaussianFactorGraph fg = createGaussianFactorGraph(ordering);
FactorGraph<JacobianFactor> fg = createGaussianFactorGraph(ordering);
// get the factor "f2" from the factor graph
GaussianFactor::shared_ptr lf = fg[1];
JacobianFactor::shared_ptr lf = fg[1];
// check if the two factors are the same
CHECK(assert_equal(expected,*lf));
@ -225,7 +225,7 @@ TEST( GaussianFactor, matrix )
{
// create a small linear factor graph
Ordering ordering; ordering += "x1","x2","l1";
GaussianFactorGraph fg = createGaussianFactorGraph(ordering);
FactorGraph<JacobianFactor> fg = createGaussianFactorGraph(ordering);
// get the factor "f2" from the factor graph
//GaussianFactor::shared_ptr lf = fg[1]; // NOTE: using the older version
@ -234,7 +234,7 @@ TEST( GaussianFactor, matrix )
// render with a given ordering
Ordering ord;
ord += "x1","x2";
GaussianFactor::shared_ptr lf(new GaussianFactor(ord["x1"], -I, ord["x2"], I, b2, sigma0_1));
JacobianFactor::shared_ptr lf(new JacobianFactor(ord["x1"], -I, ord["x2"], I, b2, sigma0_1));
// Test whitened version
Matrix A_act1; Vector b_act1;
@ -273,7 +273,7 @@ TEST( GaussianFactor, matrix_aug )
{
// create a small linear factor graph
Ordering ordering; ordering += "x1","x2","l1";
GaussianFactorGraph fg = createGaussianFactorGraph(ordering);
FactorGraph<JacobianFactor> fg = createGaussianFactorGraph(ordering);
// get the factor "f2" from the factor graph
//GaussianFactor::shared_ptr lf = fg[1];
@ -282,7 +282,7 @@ TEST( GaussianFactor, matrix_aug )
// render with a given ordering
Ordering ord;
ord += "x1","x2";
GaussianFactor::shared_ptr lf(new GaussianFactor(ord["x1"], -I, ord["x2"], I, b2, sigma0_1));
JacobianFactor::shared_ptr lf(new JacobianFactor(ord["x1"], -I, ord["x2"], I, b2, sigma0_1));
// Test unwhitened version

View File

@ -58,13 +58,13 @@ TEST( GaussianFactorGraph, equals ){
TEST( GaussianFactorGraph, error )
{
Ordering ordering; ordering += "x1","x2","l1";
GaussianFactorGraph fg = createGaussianFactorGraph(ordering);
FactorGraph<JacobianFactor> fg = createGaussianFactorGraph(ordering);
VectorValues cfg = createZeroDelta(ordering);
// note the error is the same as in testNonlinearFactorGraph as a
// zero delta config in the linear graph is equivalent to noisy in
// non-linear, which is really linear under the hood
double actual = fg.error(cfg);
double actual = gaussianError(fg, cfg);
DOUBLES_EQUAL( 5.625, actual, 1e-9 );
}
@ -349,9 +349,9 @@ TEST( GaussianFactorGraph, eliminateAll )
// Matrix A = eye(2);
// Vector b = zero(2);
// SharedDiagonal sigma = sharedSigma(2,3.0);
// expected.push_back(GaussianFactor::shared_ptr(new GaussianFactor(ordering["l1"],A,b,sigma)));
// expected.push_back(GaussianFactor::shared_ptr(new GaussianFactor(ordering["x1"],A,b,sigma)));
// expected.push_back(GaussianFactor::shared_ptr(new GaussianFactor(ordering["x2"],A,b,sigma)));
// expected.push_back(GaussianFactor::shared_ptr(new JacobianFactor(ordering["l1"],A,b,sigma)));
// expected.push_back(GaussianFactor::shared_ptr(new JacobianFactor(ordering["x1"],A,b,sigma)));
// expected.push_back(GaussianFactor::shared_ptr(new JacobianFactor(ordering["x2"],A,b,sigma)));
// CHECK(assert_equal(expected,actual));
//}
@ -650,7 +650,7 @@ double error(const VectorValues& x) {
Ordering ord; ord += "x2","l1","x1";
GaussianFactorGraph fg = createGaussianFactorGraph(ord);
return fg.error(x);
return gaussianError(fg,x);
}
///* ************************************************************************* */
@ -899,13 +899,13 @@ TEST(GaussianFactorGraph, replace)
Ordering ord; ord += "x1","x2","x3","x4","x5","x6";
SharedDiagonal noise(sharedSigma(3, 1.0));
GaussianFactorGraph::sharedFactor f1(new GaussianFactor(
GaussianFactorGraph::sharedFactor f1(new JacobianFactor(
ord["x1"], eye(3,3), ord["x2"], eye(3,3), zero(3), noise));
GaussianFactorGraph::sharedFactor f2(new GaussianFactor(
GaussianFactorGraph::sharedFactor f2(new JacobianFactor(
ord["x2"], eye(3,3), ord["x3"], eye(3,3), zero(3), noise));
GaussianFactorGraph::sharedFactor f3(new GaussianFactor(
GaussianFactorGraph::sharedFactor f3(new JacobianFactor(
ord["x3"], eye(3,3), ord["x4"], eye(3,3), zero(3), noise));
GaussianFactorGraph::sharedFactor f4(new GaussianFactor(
GaussianFactorGraph::sharedFactor f4(new JacobianFactor(
ord["x5"], eye(3,3), ord["x6"], eye(3,3), zero(3), noise));
GaussianFactorGraph actual;
@ -970,9 +970,9 @@ TEST(GaussianFactorGraph, replace)
// typedef GaussianFactorGraph::sharedFactor Factor;
// SharedDiagonal model(Vector_(1, 0.5));
// GaussianFactorGraph fg;
// Factor factor1(new GaussianFactor("x1", Matrix_(1,1,1.), "x2", Matrix_(1,1,1.), Vector_(1,1.), model));
// Factor factor2(new GaussianFactor("x2", Matrix_(1,1,1.), "x3", Matrix_(1,1,1.), Vector_(1,1.), model));
// Factor factor3(new GaussianFactor("x3", Matrix_(1,1,1.), "x3", Matrix_(1,1,1.), Vector_(1,1.), model));
// Factor factor1(new JacobianFactor("x1", Matrix_(1,1,1.), "x2", Matrix_(1,1,1.), Vector_(1,1.), model));
// Factor factor2(new JacobianFactor("x2", Matrix_(1,1,1.), "x3", Matrix_(1,1,1.), Vector_(1,1.), model));
// Factor factor3(new JacobianFactor("x3", Matrix_(1,1,1.), "x3", Matrix_(1,1,1.), Vector_(1,1.), model));
// fg.push_back(factor1);
// fg.push_back(factor2);
// fg.push_back(factor3);
@ -989,7 +989,7 @@ TEST(GaussianFactorGraph, replace)
// bn_expected.push_back(conditional2);
// CHECK(assert_equal(bn_expected, bn));
//
// GaussianFactorGraph::sharedFactor factor_expected(new GaussianFactor("x3", Matrix_(1, 1, 2.), Vector_(1, 2.), SharedDiagonal(Vector_(1, 1.))));
// GaussianFactorGraph::sharedFactor factor_expected(new JacobianFactor("x3", Matrix_(1, 1, 2.), Vector_(1, 2.), SharedDiagonal(Vector_(1, 1.))));
// GaussianFactorGraph fg_expected;
// fg_expected.push_back(factor_expected);
// CHECK(assert_equal(fg_expected, fg));

View File

@ -85,7 +85,7 @@ TEST( Inference, marginals2)
init.insert(PointKey(0), Point2(1.0,1.0));
Ordering ordering(*fg.orderingCOLAMD(init));
GaussianFactorGraph::shared_ptr gfg(fg.linearize(init, ordering));
FactorGraph<JacobianFactor>::shared_ptr gfg(fg.linearize(init, ordering));
GaussianMultifrontalSolver solver(*gfg);
solver.marginalFactor(ordering[PointKey(0)]);
}

View File

@ -49,8 +49,8 @@ TEST ( NonlinearEquality, linearization ) {
// check linearize
SharedDiagonal constraintModel = noiseModel::Constrained::All(3);
GaussianFactor expLF(0, eye(3), zero(3), constraintModel);
GaussianFactor::shared_ptr actualLF = nle->linearize(linearize, *linearize.orderingArbitrary());
JacobianFactor expLF(0, eye(3), zero(3), constraintModel);
JacobianFactor::shared_ptr actualLF = nle->linearize(linearize, *linearize.orderingArbitrary());
EXPECT(assert_equal(*actualLF, expLF));
}
@ -174,7 +174,7 @@ TEST ( NonlinearEquality, allow_error_pose ) {
Matrix A1 = eye(3,3);
Vector b = expVec;
SharedDiagonal model = noiseModel::Constrained::All(3);
GaussianFactor::shared_ptr expLinFactor(new GaussianFactor(0, A1, b, model));
GaussianFactor::shared_ptr expLinFactor(new JacobianFactor(0, A1, b, model));
EXPECT(assert_equal(*expLinFactor, *actLinFactor, 1e-5));
}

View File

@ -200,12 +200,12 @@ TEST( NonlinearFactor, linearize_constraint1 )
Values config;
config.insert(simulated2D::PoseKey(1), Point2(1.0, 2.0));
GaussianFactor::shared_ptr actual = f0->linearize(config, *config.orderingArbitrary());
JacobianFactor::shared_ptr actual = f0->linearize(config, *config.orderingArbitrary());
// create expected
Ordering ord(*config.orderingArbitrary());
Vector b = Vector_(2, 0., -3.);
GaussianFactor expected(ord["x1"], eye(2), b, constraint);
JacobianFactor expected(ord["x1"], eye(2), b, constraint);
CHECK(assert_equal(expected, *actual));
}
@ -221,12 +221,12 @@ TEST( NonlinearFactor, linearize_constraint2 )
Values config;
config.insert(simulated2D::PoseKey(1), Point2(1.0, 2.0));
config.insert(simulated2D::PointKey(1), Point2(5.0, 4.0));
GaussianFactor::shared_ptr actual = f0.linearize(config, *config.orderingArbitrary());
JacobianFactor::shared_ptr actual = f0.linearize(config, *config.orderingArbitrary());
// create expected
Ordering ord(*config.orderingArbitrary());
Vector b = Vector_(2, -3., -3.);
GaussianFactor expected(ord["x1"], -1*eye(2), ord["l1"], eye(2), b, constraint);
JacobianFactor expected(ord["x1"], -1*eye(2), ord["l1"], eye(2), b, constraint);
CHECK(assert_equal(expected, *actual));
}

View File

@ -96,8 +96,8 @@ TEST( Graph, linearize )
{
Graph fg = createNonlinearFactorGraph();
Values initial = createNoisyValues();
boost::shared_ptr<GaussianFactorGraph> linearized = fg.linearize(initial, *initial.orderingArbitrary());
GaussianFactorGraph expected = createGaussianFactorGraph(*initial.orderingArbitrary());
boost::shared_ptr<FactorGraph<JacobianFactor> > linearized = fg.linearize(initial, *initial.orderingArbitrary());
FactorGraph<JacobianFactor> expected = createGaussianFactorGraph(*initial.orderingArbitrary());
CHECK(assert_equal(expected,*linearized)); // Needs correct linearizations
}

View File

@ -38,21 +38,26 @@ int main(int argc, char *argv[]) {
// Add a prior on the first pose
data.first->addPrior(0, Pose2(), sharedSigma(Pose2::Dim(), 0.0005));
tic_("Z 1 order");
tic_(1, "order");
Ordering::shared_ptr ordering(data.first->orderingCOLAMD(*data.second));
toc_("Z 1 order");
toc_(1, "order");
tictoc_print_();
tic_("Z 2 linearize");
GaussianFactorGraph::shared_ptr gfg(data.first->linearize(*data.second, *ordering));
toc_("Z 2 linearize");
tic_(2, "linearize");
GaussianFactorGraph::shared_ptr gfg(data.first->linearize(*data.second, *ordering)->dynamicCastFactors<GaussianFactorGraph>());
toc_(2, "linearize");
tictoc_print_();
for(size_t trial = 0; trial < 10; ++trial) {
tic_("Z 3 solve");
VectorValues soln(*GaussianMultifrontalSolver(*gfg).optimize());
toc_("Z 3 solve");
tic_(3, "solve");
tic(1, "construct solver");
GaussianMultifrontalSolver solver(*gfg);
toc(1, "construct solver");
tic(2, "optimize");
VectorValues soln(*solver.optimize());
toc(2, "optimize");
toc_(3, "solve");
tictoc_print_();
}

View File

@ -38,21 +38,26 @@ int main(int argc, char *argv[]) {
// Add a prior on the first pose
data.first->addPrior(0, Pose2(), sharedSigma(Pose2::Dim(), 0.0005));
tic_("Z 1 order");
tic_(1, "order");
Ordering::shared_ptr ordering(data.first->orderingCOLAMD(*data.second));
toc_("Z 1 order");
toc_(1, "order");
tictoc_print_();
tic_("Z 2 linearize");
GaussianFactorGraph::shared_ptr gfg(data.first->linearize(*data.second, *ordering));
toc_("Z 2 linearize");
tic_(2, "linearize");
GaussianFactorGraph::shared_ptr gfg(data.first->linearize(*data.second, *ordering)->dynamicCastFactors<GaussianFactorGraph>());
toc_(2, "linearize");
tictoc_print_();
for(size_t trial = 0; trial < 10; ++trial) {
tic_("Z 3 solve");
VectorValues soln(*GaussianSequentialSolver(*gfg).optimize());
toc_("Z 3 solve");
tic_(3, "solve");
tic(1, "construct solver");
GaussianSequentialSolver solver(*gfg);
toc(1, "construct solver");
tic(2, "optimize");
VectorValues soln(*solver.optimize());
toc(2, "optimize");
toc_(3, "solve");
tictoc_print_();
}