diff --git a/gtsam/base/DenseQRUtil.cpp b/gtsam/base/DenseQRUtil.cpp index 32e26742a..1a086c3d2 100644 --- a/gtsam/base/DenseQRUtil.cpp +++ b/gtsam/base/DenseQRUtil.cpp @@ -155,12 +155,12 @@ namespace gtsam { assert(Stair != NULL); - tic("householder_denseqr"); + tic(1, "householder_denseqr"); int npiv = min(m,n); int b = min(min(m,n),32); double W[b*(n+b)]; DenseQR(m, n, npiv, A.data().begin(), Stair, W); - toc("householder_denseqr"); + toc(1, "householder_denseqr"); } } // namespace gtsam diff --git a/gtsam/base/Makefile.am b/gtsam/base/Makefile.am index eed25d600..1acc61b7d 100644 --- a/gtsam/base/Makefile.am +++ b/gtsam/base/Makefile.am @@ -37,7 +37,7 @@ sources += DSFVector.cpp check_PROGRAMS += tests/testBTree tests/testDSF tests/testDSFVector # Timing tests -noinst_PROGRAMS = tests/timeMatrix tests/timeublas tests/timeVirtual +noinst_PROGRAMS = tests/timeMatrix tests/timeublas tests/timeVirtual tests/timeTest #---------------------------------------------------------------------------------------------------- # Create a libtool library that is not installed diff --git a/gtsam/base/blockMatrices.h b/gtsam/base/blockMatrices.h index 445b88df2..c5e0de43f 100644 --- a/gtsam/base/blockMatrices.h +++ b/gtsam/base/blockMatrices.h @@ -72,6 +72,8 @@ public: const_iterator end() const { return base_.end(); } }; +template class SymmetricBlockView; + /** * This class stores a *reference* to a matrix and allows it to be accessed as * a collection of vertical blocks. It also provides for accessing individual @@ -81,7 +83,13 @@ public: * is consistent with the given block dimensions. * * This class also has three parameters that can be changed after construction - * that change the + * that change the apparent view of the matrix. firstBlock() determines the + * block that has index 0 for all operations (except for re-setting + * firstBlock()). rowStart() determines the apparent first row of the matrix + * for all operations (except for setting rowStart() and rowEnd()). rowEnd() + * determines the apparent *exclusive* last row for all operations. To include + * all rows, rowEnd() should be set to the number of rows in the matrix (i.e. + * one after the last true row index). */ template class VerticalBlockView { @@ -93,29 +101,58 @@ public: typedef BlockColumn constColumn; protected: - FullMatrix& matrix_; // the reference to the original matrix + FullMatrix& matrix_; // the reference to the full matrix std::vector variableColOffsets_; // the starting columns of each block (0-based) - // for elimination, represent + // Changes apparent matrix view, see main class comment. size_t rowStart_; // 0 initially size_t rowEnd_; // the number of row - 1, initially size_t blockStart_; // 0 initially public: /** Construct from an empty matrix (asserts that the matrix is empty) */ - VerticalBlockView(FullMatrix& matrix); + VerticalBlockView(FullMatrix& matrix) : + matrix_(matrix), rowStart_(0), rowEnd_(matrix_.size1()), blockStart_(0) { + fillOffsets((size_t*)0, (size_t*)0); + assertInvariants(); + } + + /** + * Construct from a non-empty matrix and copy the block structure from + * another block view. */ + template + VerticalBlockView(FullMatrix& matrix, const RHS& rhs) : + matrix_(matrix) { + if(matrix_.size1() != rhs.size1() || matrix_.size2() != rhs.size2()) + throw std::invalid_argument( + "In VerticalBlockView<>(FullMatrix& matrix, const RHS& rhs), matrix and rhs must\n" + "already be of the same size. If not, construct the VerticalBlockView from an\n" + "empty matrix and then use copyStructureFrom(const RHS& rhs) to resize the matrix\n" + "and set up the block structure."); + copyStructureFrom(rhs); + assertInvariants(); + } /** Construct from iterators over the sizes of each vertical block */ template - VerticalBlockView(FullMatrix& matrix, ITERATOR firstBlockDim, ITERATOR lastBlockDim); + VerticalBlockView(FullMatrix& matrix, ITERATOR firstBlockDim, ITERATOR lastBlockDim) : + matrix_(matrix), rowStart_(0), rowEnd_(matrix_.size1()), blockStart_(0) { + fillOffsets(firstBlockDim, lastBlockDim); + assertInvariants(); + } /** Construct from a vector of the sizes of each vertical block, resize the * matrix so that its height is matrixNewHeight and its width fits the given * block dimensions. */ template - VerticalBlockView(FullMatrix& matrix, ITERATOR firstBlockDim, ITERATOR lastBlockDim, size_t matrixNewHeight); - + VerticalBlockView(FullMatrix& matrix, ITERATOR firstBlockDim, ITERATOR lastBlockDim, size_t matrixNewHeight) : + matrix_(matrix), rowStart_(0), rowEnd_(matrixNewHeight), blockStart_(0) { + fillOffsets(firstBlockDim, lastBlockDim); + matrix_.resize(matrixNewHeight, variableColOffsets_.back(), false); + assertInvariants(); + } + /** Row size */ size_t size1() const { assertInvariants(); return rowEnd_ - rowStart_; } @@ -160,11 +197,19 @@ public: boost::numeric::ublas::range(variableColOffsets_[actualStartBlock], variableColOffsets_[actualEndBlock])); } + Block full() { + return range(0,nBlocks()); + } + + constBlock full() const { + return range(0,nBlocks()); + } + Column column(size_t block, size_t columnOffset) { assertInvariants(); size_t actualBlock = block + blockStart_; checkBlock(actualBlock); - assert(variableColOffsets_[actualBlock] + columnOffset < matrix_.size2()); + assert(variableColOffsets_[actualBlock] + columnOffset < variableColOffsets_[actualBlock+1]); Block blockMat(operator()(block)); return Column(blockMat, columnOffset); } @@ -193,24 +238,65 @@ public: size_t firstBlock() const { return blockStart_; } /** Copy the block structure and resize the underlying matrix, but do not - * copy the matrix data. + * copy the matrix data. If blockStart(), rowStart(), and/or rowEnd() have + * been modified, this copies the structure of the corresponding matrix view. + * In the destination VerticalBlockView, blockStart() and rowStart() will + * thus be 0, rowEnd() will be size2() of the source VerticalBlockView, and + * the underlying matrix will be the size of the view of the source matrix. */ - template - void copyStructureFrom(const VerticalBlockView& rhs); + template + void copyStructureFrom(const RHS& rhs) { + if(matrix_.size1() != rhs.size1() || matrix_.size2() != rhs.size2()) + matrix_.resize(rhs.size1(), rhs.size2(), false); + if(rhs.blockStart_ == 0) + variableColOffsets_ = rhs.variableColOffsets_; + else { + variableColOffsets_.resize(rhs.nBlocks() + 1); + variableColOffsets_[0] = 0; + size_t j=0; + assert(rhs.variableColOffsets_.begin()+rhs.blockStart_ < rhs.variableColOffsets_.end()-1); + for(std::vector::const_iterator off=rhs.variableColOffsets_.begin()+rhs.blockStart_; off!=rhs.variableColOffsets_.end()-1; ++off) { + variableColOffsets_[j+1] = variableColOffsets_[j] + (*(off+1) - *off); + ++ j; + } + } + rowStart_ = 0; + rowEnd_ = matrix_.size1(); + blockStart_ = 0; + assertInvariants(); + } /** Copy the block struture and matrix data, resizing the underlying matrix * in the process. This can deal with assigning between different types of * underlying matrices, as long as the matrices themselves are assignable. * To avoid creating a temporary matrix this assumes no aliasing, i.e. that * no part of the underlying matrices refer to the same memory! + * + * If blockStart(), rowStart(), and/or rowEnd() have been modified, this + * copies the structure of the corresponding matrix view. In the destination + * VerticalBlockView, blockStart() and rowStart() will thus be 0, rowEnd() + * will be size2() of the source VerticalBlockView, and the underlying matrix + * will be the size of the view of the source matrix. */ - template - VerticalBlockView& assignNoalias(const VerticalBlockView& rhs); + template + VerticalBlockView& assignNoalias(const RHS& rhs) { + copyStructureFrom(rhs); + boost::numeric::ublas::noalias(matrix_) = rhs.full(); + return *this; + } /** Swap the contents of the underlying matrix and the block information with * another VerticalBlockView. */ - void swap(VerticalBlockView& other); + void swap(VerticalBlockView& other) { + matrix_.swap(other.matrix_); + variableColOffsets_.swap(other.variableColOffsets_); + std::swap(rowStart_, other.rowStart_); + std::swap(rowEnd_, other.rowEnd_); + std::swap(blockStart_, other.blockStart_); + assertInvariants(); + other.assertInvariants(); + } protected: void assertInvariants() const { @@ -238,80 +324,254 @@ protected: } } - template - friend class VerticalBlockView; + template friend class SymmetricBlockView; + template friend class VerticalBlockView; }; -/* ************************************************************************* */ -template -VerticalBlockView::VerticalBlockView(FullMatrix& matrix) : -matrix_(matrix), rowStart_(0), rowEnd_(matrix_.size1()), blockStart_(0) { - fillOffsets((size_t*)0, (size_t*)0); - assertInvariants(); -} -/* ************************************************************************* */ +/** + * This class stores a *reference* to a matrix and allows it to be accessed as + * a collection of blocks. It also provides for accessing individual + * columns from those blocks. When constructed or resized, the caller must + * provide the dimensions of the blocks, as well as an underlying matrix + * storage object. This class will resize the underlying matrix such that it + * is consistent with the given block dimensions. + * + * This class uses a symmetric block structure. The underlying matrix does not + * necessarily need to be symmetric. + * + * This class also has a parameter that can be changed after construction to + * change the apparent matrix view. firstBlock() determines the block that + * appears to have index 0 for all operations (except re-setting firstBlock()). + */ template -template -VerticalBlockView::VerticalBlockView(FullMatrix& matrix, ITERATOR firstBlockDim, ITERATOR lastBlockDim) : -matrix_(matrix), rowStart_(0), rowEnd_(matrix_.size1()), blockStart_(0) { - fillOffsets(firstBlockDim, lastBlockDim); - assertInvariants(); -} +class SymmetricBlockView { +public: + typedef MATRIX FullMatrix; + typedef typename boost::numeric::ublas::matrix_range Block; + typedef typename boost::numeric::ublas::matrix_range constBlock; + typedef BlockColumn Column; + typedef BlockColumn constColumn; -/* ************************************************************************* */ -template -template -VerticalBlockView::VerticalBlockView(FullMatrix& matrix, ITERATOR firstBlockDim, ITERATOR lastBlockDim, size_t matrixNewHeight) : -matrix_(matrix), rowStart_(0), rowEnd_(matrixNewHeight), blockStart_(0) { - fillOffsets(firstBlockDim, lastBlockDim); - matrix_.resize(matrixNewHeight, variableColOffsets_.back(), false); - assertInvariants(); -} +protected: + FullMatrix& matrix_; // the reference to the full matrix + std::vector variableColOffsets_; // the starting columns of each block (0-based) -/* ************************************************************************* */ -template -template -void VerticalBlockView::copyStructureFrom(const VerticalBlockView& rhs) { - matrix_.resize(rhs.rowEnd() - rhs.rowStart(), rhs.range(0, rhs.nBlocks()).size2(), false); - if(rhs.blockStart_ == 0) - variableColOffsets_ = rhs.variableColOffsets_; - else { - variableColOffsets_.resize(rhs.nBlocks() + 1); + // Changes apparent matrix view, see main class comment. + size_t blockStart_; // 0 initially + +public: + /** Construct from an empty matrix (asserts that the matrix is empty) */ + SymmetricBlockView(FullMatrix& matrix) : + matrix_(matrix), blockStart_(0) { + fillOffsets((size_t*)0, (size_t*)0); + assertInvariants(); + } + + /** Construct from iterators over the sizes of each block */ + template + SymmetricBlockView(FullMatrix& matrix, ITERATOR firstBlockDim, ITERATOR lastBlockDim) : + matrix_(matrix), blockStart_(0) { + fillOffsets(firstBlockDim, lastBlockDim); + assertInvariants(); + } + + /** + * Modify the size and structure of the underlying matrix and this block + * view. If 'preserve' is true, the underlying matrix data will be copied if + * the matrix size changes, otherwise the new data will be uninitialized. + */ + template + void resize(ITERATOR firstBlockDim, ITERATOR lastBlockDim, bool preserve) { + blockStart_ = 0; + fillOffsets(firstBlockDim, lastBlockDim); + matrix_.resize(variableColOffsets_.back(), variableColOffsets_.back(), preserve); + } + + /** Row size + */ + size_t size1() const { assertInvariants(); return variableColOffsets_.back() - variableColOffsets_[blockStart_]; } + + /** Column size + */ + size_t size2() const { return size1(); } + + + /** Block count + */ + size_t nBlocks() const { assertInvariants(); return variableColOffsets_.size() - 1 - blockStart_; } + + + Block operator()(size_t i_block, size_t j_block) { + return range(i_block, i_block+1, j_block, j_block+1); + } + + constBlock operator()(size_t i_block, size_t j_block) const { + return range(i_block, i_block+1, j_block, j_block+1); + } + + Block range(size_t i_startBlock, size_t i_endBlock, size_t j_startBlock, size_t j_endBlock) { + assertInvariants(); + size_t i_actualStartBlock = i_startBlock + blockStart_; + size_t i_actualEndBlock = i_endBlock + blockStart_; + size_t j_actualStartBlock = j_startBlock + blockStart_; + size_t j_actualEndBlock = j_endBlock + blockStart_; + checkBlock(i_actualStartBlock); + checkBlock(j_actualStartBlock); + assert(i_actualEndBlock < variableColOffsets_.size()); + assert(j_actualEndBlock < variableColOffsets_.size()); + return Block(matrix_, + boost::numeric::ublas::range(variableColOffsets_[i_actualStartBlock], variableColOffsets_[i_actualEndBlock]), + boost::numeric::ublas::range(variableColOffsets_[j_actualStartBlock], variableColOffsets_[j_actualEndBlock])); + } + + constBlock range(size_t i_startBlock, size_t i_endBlock, size_t j_startBlock, size_t j_endBlock) const { + assertInvariants(); + size_t i_actualStartBlock = i_startBlock + blockStart_; + size_t i_actualEndBlock = i_endBlock + blockStart_; + size_t j_actualStartBlock = j_startBlock + blockStart_; + size_t j_actualEndBlock = j_endBlock + blockStart_; + checkBlock(i_actualStartBlock); + checkBlock(j_actualStartBlock); + assert(i_actualEndBlock < variableColOffsets_.size()); + assert(j_actualEndBlock < variableColOffsets_.size()); + return constBlock(matrix_, + boost::numeric::ublas::range(variableColOffsets_[i_actualStartBlock], variableColOffsets_[i_actualEndBlock]), + boost::numeric::ublas::range(variableColOffsets_[j_actualStartBlock], variableColOffsets_[j_actualEndBlock])); + } + + Block full() { + return range(0,nBlocks(), 0,nBlocks()); + } + + constBlock full() const { + return range(0,nBlocks(), 0,nBlocks()); + } + + Column column(size_t i_block, size_t j_block, size_t columnOffset) { + assertInvariants(); + size_t j_actualBlock = j_block + blockStart_; + assert(variableColOffsets_[j_actualBlock] + columnOffset < variableColOffsets_[j_actualBlock+1]); + Block blockMat(operator()(i_block, j_block)); + return Column(blockMat, columnOffset); + } + + constColumn column(size_t i_block, size_t j_block, size_t columnOffset) const { + assertInvariants(); + size_t j_actualBlock = j_block + blockStart_; + assert(variableColOffsets_[j_actualBlock] + columnOffset < variableColOffsets_[j_actualBlock+1]); + constBlock blockMat(operator()(i_block, j_block)); + return constColumn(blockMat, columnOffset); + } + + Column rangeColumn(size_t i_startBlock, size_t i_endBlock, size_t j_block, size_t columnOffset) { + assertInvariants(); + size_t j_actualBlock = j_block + blockStart_; + assert(variableColOffsets_[j_actualBlock] + columnOffset < variableColOffsets_[j_actualBlock+1]); + Block blockMat(this->range(i_startBlock, i_endBlock, j_block)); + return Column(blockMat, columnOffset); + } + + constColumn rangeColumn(size_t i_startBlock, size_t i_endBlock, size_t j_block, size_t columnOffset) const { + assertInvariants(); + size_t j_actualBlock = j_block + blockStart_; + assert(variableColOffsets_[j_actualBlock] + columnOffset < variableColOffsets_[j_actualBlock+1]); + constBlock blockMat(this->range(i_startBlock, i_endBlock, j_block, j_block+1)); + return constColumn(blockMat, columnOffset); + } + + size_t offset(size_t block) const { + assertInvariants(); + size_t actualBlock = block + blockStart_; + checkBlock(actualBlock); + return variableColOffsets_[actualBlock] - variableColOffsets_[blockStart_]; + } + + size_t& blockStart() { return blockStart_; } + size_t blockStart() const { return blockStart_; } + + /** Copy the block structure and resize the underlying matrix, but do not + * copy the matrix data. If blockStart() has been modified, this copies the + * structure of the corresponding matrix view. In the destination + * SymmetricBlockView, startBlock() will thus be 0 and the underlying matrix + * will be the size of the view of the source matrix. + */ + template + void copyStructureFrom(const RHS& rhs) { + matrix_.resize(rhs.size2(), rhs.size2(), false); + if(rhs.blockStart_ == 0) + variableColOffsets_ = rhs.variableColOffsets_; + else { + variableColOffsets_.resize(rhs.nBlocks() + 1); + variableColOffsets_[0] = 0; + size_t j=0; + assert(rhs.variableColOffsets_.begin()+rhs.blockStart_ < rhs.variableColOffsets_.end()-1); + for(std::vector::const_iterator off=rhs.variableColOffsets_.begin()+rhs.blockStart_; off!=rhs.variableColOffsets_.end()-1; ++off) { + variableColOffsets_[j+1] = variableColOffsets_[j] + (*(off+1) - *off); + ++ j; + } + } + blockStart_ = 0; + assertInvariants(); + } + + /** Copy the block struture and matrix data, resizing the underlying matrix + * in the process. This can deal with assigning between different types of + * underlying matrices, as long as the matrices themselves are assignable. + * To avoid creating a temporary matrix this assumes no aliasing, i.e. that + * no part of the underlying matrices refer to the same memory! + * + * If blockStart() has been modified, this copies the structure of the + * corresponding matrix view. In the destination SymmetricBlockView, + * startBlock() will thus be 0 and the underlying matrix will be the size + * of the view of the source matrix. + */ + template + SymmetricBlockView& assignNoalias(const SymmetricBlockView& rhs) { + copyStructureFrom(rhs); + boost::numeric::ublas::noalias(matrix_) = rhs.range(0, rhs.nBlocks(), 0, rhs.nBlocks()); + return *this; + } + + /** Swap the contents of the underlying matrix and the block information with + * another VerticalBlockView. + */ + void swap(SymmetricBlockView& other) { + matrix_.swap(other.matrix_); + variableColOffsets_.swap(other.variableColOffsets_); + std::swap(blockStart_, other.blockStart_); + assertInvariants(); + other.assertInvariants(); + } + +protected: + void assertInvariants() const { + assert(matrix_.size1() == matrix_.size2()); + assert(matrix_.size2() == variableColOffsets_.back()); + assert(blockStart_ < variableColOffsets_.size()); + } + + void checkBlock(size_t block) const { + assert(matrix_.size1() == matrix_.size2()); + assert(matrix_.size2() == variableColOffsets_.back()); + assert(block < variableColOffsets_.size()-1); + assert(variableColOffsets_[block] < matrix_.size2() && variableColOffsets_[block+1] <= matrix_.size2()); + } + + template + void fillOffsets(ITERATOR firstBlockDim, ITERATOR lastBlockDim) { + variableColOffsets_.resize((lastBlockDim-firstBlockDim)+1); variableColOffsets_[0] = 0; size_t j=0; - assert(rhs.variableColOffsets_.begin()+rhs.blockStart_ < rhs.variableColOffsets_.end()-1); - for(std::vector::const_iterator off=rhs.variableColOffsets_.begin()+rhs.blockStart_; off!=rhs.variableColOffsets_.end()-1; ++off) { - variableColOffsets_[j+1] = variableColOffsets_[j] + (*(off+1) - *off); + for(ITERATOR dim=firstBlockDim; dim!=lastBlockDim; ++dim) { + variableColOffsets_[j+1] = variableColOffsets_[j] + *dim; ++ j; } } - rowStart_ = 0; - rowEnd_ = matrix_.size1(); - blockStart_ = 0; - assertInvariants(); -} -/* ************************************************************************* */ -template -template -VerticalBlockView& VerticalBlockView::assignNoalias(const VerticalBlockView& rhs) { - copyStructureFrom(rhs); - boost::numeric::ublas::noalias(matrix_) = rhs.range(0, rhs.nBlocks()); - return *this; -} - -/* ************************************************************************* */ -template -void VerticalBlockView::swap(VerticalBlockView& other) { - matrix_.swap(other.matrix_); - variableColOffsets_.swap(other.variableColOffsets_); - std::swap(rowStart_, other.rowStart_); - std::swap(rowEnd_, other.rowEnd_); - std::swap(blockStart_, other.blockStart_); - assertInvariants(); - other.assertInvariants(); -} + template friend class SymmetricBlockView; + template friend class VerticalBlockView; +}; } diff --git a/gtsam/base/cholesky.cpp b/gtsam/base/cholesky.cpp index 9d818b1d4..de2c1488d 100644 --- a/gtsam/base/cholesky.cpp +++ b/gtsam/base/cholesky.cpp @@ -18,6 +18,7 @@ #include #include +#include #include #include @@ -225,7 +226,50 @@ size_t choleskyCareful(MatrixColMajor& ATA) { } return maxrank; +} +/* ************************************************************************* */ +void choleskyPartial(MatrixColMajor& ABC, size_t nFrontal) { + + static const bool debug = false; + + assert(ABC.size1() == ABC.size2()); + assert(nFrontal <= ABC.size1()); + + const size_t n = ABC.size1(); + + // Compute Cholesky factorization of A, overwrites A. + tic(1, "dpotrf"); + int info = lapack_dpotrf('U', nFrontal, &ABC(0,0), n); + if(info != 0) { + if(info < 0) + throw std::domain_error(boost::str(boost::format( + "Bad input to choleskyFactorUnderdetermined, dpotrf returned %d.\n")%info)); + else + throw std::domain_error(boost::str(boost::format( + "The matrix passed into choleskyFactorUnderdetermined is numerically rank-deficient, dpotrf returned rank=%d, expected rank was %d.\n")%(info-1)%nFrontal)); + } + toc(1, "dpotrf"); + + // Views of R, S, and L submatrices. + ublas::matrix_range R(ublas::project(ABC, ublas::range(0,nFrontal), ublas::range(0,nFrontal))); + ublas::matrix_range S(ublas::project(ABC, ublas::range(0,nFrontal), ublas::range(nFrontal,n))); + ublas::matrix_range L(ublas::project(ABC, ublas::range(nFrontal,n), ublas::range(nFrontal,n))); + + // Compute S = inv(R') * B + tic(2, "compute S"); + if(S.size2() > 0) + cblas_dtrsm(CblasColMajor, CblasLeft, CblasUpper, CblasTrans, CblasNonUnit, S.size1(), S.size2(), 1.0, &R(0,0), n, &S(0,0), n); + if(debug) gtsam::print(S, "S: "); + toc(2, "compute S"); + + // Compute L = C - S' * S + tic(3, "compute L"); + if(debug) gtsam::print(L, "C: "); + if(L.size2() > 0) + cblas_dsyrk(CblasColMajor, CblasUpper, CblasTrans, L.size1(), S.size1(), -1.0, &S(0,0), n, 1.0, &L(0,0), n); + if(debug) gtsam::print(L, "L: "); + toc(3, "compute L"); } } diff --git a/gtsam/base/cholesky.h b/gtsam/base/cholesky.h index c7c815462..90396b122 100644 --- a/gtsam/base/cholesky.h +++ b/gtsam/base/cholesky.h @@ -60,5 +60,15 @@ size_t choleskyFactorUnderdetermined(MatrixColMajor& Ab, size_t nFrontal); */ size_t choleskyCareful(MatrixColMajor& ATA); +/** + * Partial Cholesky computes a factor [R S such that [R' 0 [R S = [A B + * 0 L] S' I] 0 L] B' C]. + * The input to this function is the matrix ABC = [A B], and the parameter + * [B' C] + * nFrontal determines the split between A, B, and C, with A being of size + * nFrontal x nFrontal. + */ +void choleskyPartial(MatrixColMajor& ABC, size_t nFrontal); + } diff --git a/gtsam/base/tests/testCholesky.cpp b/gtsam/base/tests/testCholesky.cpp index 2c84becc7..342716083 100644 --- a/gtsam/base/tests/testCholesky.cpp +++ b/gtsam/base/tests/testCholesky.cpp @@ -20,6 +20,8 @@ #include #include +#include +#include using namespace gtsam; namespace ublas = boost::numeric::ublas; @@ -66,6 +68,37 @@ TEST(cholesky, choleskyFactorUnderdetermined) { CHECK(assert_equal(expected, actual, 1e-3)); } +/* ************************************************************************* */ +TEST(cholesky, choleskyPartial) { + + // choleskyPartial should only use the upper triangle, so this represents a + // symmetric matrix. + Matrix ABC = Matrix_(7,7, + 4.0375, 3.4584, 3.5735, 2.4815, 2.1471, 2.7400, 2.2063, + 0., 4.7267, 3.8423, 2.3624, 2.8091, 2.9579, 2.5914, + 0., 0., 5.1600, 2.0797, 3.4690, 3.2419, 2.9992, + 0., 0., 0., 1.8786, 1.0535, 1.4250, 1.3347, + 0., 0., 0., 0., 3.0788, 2.6283, 2.3791, + 0., 0., 0., 0., 0., 2.9227, 2.4056, + 0., 0., 0., 0., 0., 0., 2.5776); + + // Do partial Cholesky on 3 frontal scalar variables + MatrixColMajor RSL(ABC); + choleskyPartial(RSL, 3); + + // See the function comment for choleskyPartial, this decomposition should hold. + Matrix R1(ublas::trans(RSL)); + Matrix R2(RSL); + ublas::project(R1, ublas::range(3,7), ublas::range(3,7)) = eye(4,4); + ublas::project(R2, ublas::range(3,7), ublas::range(3,7)) = + ublas::symmetric_adaptor,ublas::upper>( + ublas::project(R2, ublas::range(3,7), ublas::range(3,7))); + Matrix actual(R1 * R2); + + EXPECT(assert_equal(ublas::symmetric_adaptor(ABC), actual, 1e-9)); + +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/base/tests/timeTest.cpp b/gtsam/base/tests/timeTest.cpp new file mode 100644 index 000000000..e50121c29 --- /dev/null +++ b/gtsam/base/tests/timeTest.cpp @@ -0,0 +1,62 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file timing.h + * @brief + * @author Richard Roberts (extracted from Michael Kaess' timing functions) + * @created Oct 5, 2010 + */ + +#include + +int main(int argc, char *argv[]) { + + ticPush_("1", "top 1"); + ticPush_("1", "sub 1"); + tic_("sub sub a"); + toc_("sub sub a"); + ticPop_("1", "sub 1"); + ticPush_("2", "sub 2"); + tic_("sub sub b"); + toc_("sub sub b"); + ticPop_("2", "sub 2"); + ticPop_("1", "top 1"); + + ticPush_("2", "top 2"); + ticPush_("1", "sub 1"); + tic_("sub sub a"); + toc_("sub sub a"); + ticPop_("1", "sub 1"); + ticPush_("2", "sub 2"); + tic_("sub sub b"); + toc_("sub sub b"); + ticPop_("2", "sub 2"); + ticPop_("2", "top 2"); + + for(size_t i=0; i<1000000; ++i) { + ticPush_("3", "overhead"); + ticPush_("1", "overhead"); + ticPop_("1", "overhead"); + ticPop_("3", "overhead"); + } + + for(size_t i=0; i<1000000; ++i) { + tic(1, "overhead a"); + tic(1, "overhead b"); + toc(1, "overhead b"); + toc(1, "overhead a"); + } + + tictoc_print_(); + + return 0; +} diff --git a/gtsam/inference/EliminationTree-inl.h b/gtsam/inference/EliminationTree-inl.h index 2b79ee1b8..275b8d26f 100644 --- a/gtsam/inference/EliminationTree-inl.h +++ b/gtsam/inference/EliminationTree-inl.h @@ -7,6 +7,7 @@ #pragma once #include +#include #include #include #include @@ -30,7 +31,7 @@ EliminationTree::eliminate_(Conditionals& conditionals) const { if(debug) cout << "ETree: eliminating " << this->key_ << endl; - set, boost::fast_pool_allocator > separator; + FastSet separator; // Create the list of factors to be eliminated, initially empty, and reserve space FactorGraph factors; @@ -92,12 +93,10 @@ EliminationTree::Create(const FactorGraph& factorGraph, c static const bool debug = false; - tic("ET 1: Create"); - - tic("ET 1.1: ComputeParents"); + tic(1, "ET ComputeParents"); // Compute the tree structure vector parents(ComputeParents(structure)); - toc("ET 1.1: ComputeParents"); + toc(1, "ET ComputeParents"); // Number of variables const size_t n = structure.size(); @@ -105,7 +104,7 @@ EliminationTree::Create(const FactorGraph& factorGraph, c static const Index none = numeric_limits::max(); // Create tree structure - tic("ET 1.2: assemble tree"); + tic(2, "assemble tree"); vector trees(n); for (Index k = 1; k <= n; k++) { Index j = n - k; @@ -113,10 +112,10 @@ EliminationTree::Create(const FactorGraph& factorGraph, c if (parents[j] != none) trees[parents[j]]->add(trees[j]); } - toc("ET 1.2: assemble tree"); + toc(2, "assemble tree"); // Hang factors in right places - tic("ET 1.3: hang factors"); + tic(3, "hang factors"); BOOST_FOREACH(const typename DERIVEDFACTOR::shared_ptr& derivedFactor, factorGraph) { // Here we static_cast to the factor type of this EliminationTree. This // allows performing symbolic elimination on, for example, GaussianFactors. @@ -124,9 +123,7 @@ EliminationTree::Create(const FactorGraph& factorGraph, c Index j = factor->front(); trees[j]->add(factor); } - toc("ET 1.3: hang factors"); - - toc("ET 1: Create"); + toc(3, "hang factors"); // Assert that all other nodes have parents, i.e. that this is not a forest. #ifndef NDEBUG @@ -147,9 +144,9 @@ typename EliminationTree::shared_ptr EliminationTree::Create(const FactorGraph& factorGraph) { // Build variable index - tic("ET 0: variable index"); + tic(0, "ET Create, variable index"); const VariableIndex variableIndex(factorGraph); - toc("ET 0: variable index"); + toc(0, "ET Create, variable index"); // Build elimination tree return Create(factorGraph, variableIndex); @@ -185,24 +182,20 @@ template typename EliminationTree::BayesNet::shared_ptr EliminationTree::eliminate() const { - tic("ET 2: eliminate"); - // call recursive routine - tic("ET 2.1: recursive eliminate"); + tic(1, "ET recursive eliminate"); Conditionals conditionals(this->key_ + 1); (void)eliminate_(conditionals); - toc("ET 2.1: recursive eliminate"); + toc(1, "ET recursive eliminate"); // Add conditionals to BayesNet - tic("ET 2.1: assemble BayesNet"); + tic(2, "assemble BayesNet"); typename BayesNet::shared_ptr bayesNet(new BayesNet); BOOST_FOREACH(const typename BayesNet::sharedConditional& conditional, conditionals) { if(conditional) bayesNet->push_back(conditional); } - toc("ET 2.1: assemble BayesNet"); - - toc("ET 2: eliminate"); + toc(2, "assemble BayesNet"); return bayesNet; } diff --git a/gtsam/inference/FactorBase.h b/gtsam/inference/FactorBase.h index 4f8bda92b..9f25d0757 100644 --- a/gtsam/inference/FactorBase.h +++ b/gtsam/inference/FactorBase.h @@ -101,7 +101,7 @@ public: keys_[0] = key1; keys_[1] = key2; keys_[2] = key3; keys_[3] = key4; assertInvariants(); } /** Construct n-way factor */ - FactorBase(std::set keys) { + FactorBase(const std::set& keys) { BOOST_FOREACH(const Key& key, keys) keys_.push_back(key); assertInvariants(); } @@ -124,7 +124,7 @@ public: typename BayesNet::shared_ptr eliminate(size_t nrFrontals = 1); /** - * Permutes the GaussianFactor, but for efficiency requires the permutation + * Permutes the factor, but for efficiency requires the permutation * to already be inverted. */ void permuteWithInverse(const Permutation& inversePermutation); diff --git a/gtsam/inference/FactorGraph.h b/gtsam/inference/FactorGraph.h index 81cf1ff16..9ddc80462 100644 --- a/gtsam/inference/FactorGraph.h +++ b/gtsam/inference/FactorGraph.h @@ -85,7 +85,7 @@ namespace gtsam { void print(const std::string& s = "FactorGraph") const; /** Check equality */ - bool equals(const FactorGraph& fg, double tol = 1e-9) const; + bool equals(const FactorGraph& fg, double tol = 1e-9) const; /** const cast to the underlying vector of factors */ operator const std::vector&() const { return factors_; } @@ -110,6 +110,39 @@ namespace gtsam { /** return the number valid factors */ size_t nrFactors() const; + /** dynamic_cast the factor pointers down or up the class hierarchy */ + template + typename RELATED::shared_ptr dynamicCastFactors() const { + typename RELATED::shared_ptr ret(new RELATED); + ret->reserve(this->size()); + BOOST_FOREACH(const sharedFactor& factor, *this) { + typename RELATED::Factor::shared_ptr castedFactor(boost::dynamic_pointer_cast(factor)); + if(castedFactor) + ret->push_back(castedFactor); + else + throw std::invalid_argument("In FactorGraph::dynamic_factor_cast(), dynamic_cast failed, meaning an invalid cast was requested."); + } + return ret; + } + + /** + * dynamic_cast factor pointers if possible, otherwise convert with a + * constructor of the target type. + */ + template + typename TARGET::shared_ptr convertCastFactors() const { + typename TARGET::shared_ptr ret(new TARGET); + ret->reserve(this->size()); + BOOST_FOREACH(const sharedFactor& factor, *this) { + typename TARGET::Factor::shared_ptr castedFactor(boost::dynamic_pointer_cast(factor)); + if(castedFactor) + ret->push_back(castedFactor); + else + ret->push_back(typename TARGET::Factor::shared_ptr(new typename TARGET::Factor(*factor))); + } + return ret; + } + /** ----------------- Modifying Factor Graphs ---------------------------- */ /** STL begin and end, so we can use BOOST_FOREACH */ @@ -179,7 +212,7 @@ namespace gtsam { FactorGraph::FactorGraph(const BayesNet& bayesNet) { factors_.reserve(bayesNet.size()); BOOST_FOREACH(const typename CONDITIONAL::shared_ptr& cond, bayesNet) { - this->push_back(sharedFactor(new FACTOR(*cond))); + this->push_back(cond->toFactor()); } } diff --git a/gtsam/inference/GenericSequentialSolver-inl.h b/gtsam/inference/GenericSequentialSolver-inl.h index 3c2547ead..0d26d6a6b 100644 --- a/gtsam/inference/GenericSequentialSolver-inl.h +++ b/gtsam/inference/GenericSequentialSolver-inl.h @@ -87,7 +87,7 @@ typename FactorGraph::shared_ptr GenericSequentialSolver::jointF joint->reserve(js.size()); typename BayesNet::const_reverse_iterator conditional = bayesNet->rbegin(); for(size_t i = 0; i < js.size(); ++i) { - joint->push_back(typename FACTOR::shared_ptr(new FACTOR(**(conditional++)))); } + joint->push_back((*(conditional++))->toFactor()); } // Undo the permutation on the eliminated joint marginal factors BOOST_FOREACH(const typename FACTOR::shared_ptr& factor, *joint) { diff --git a/gtsam/inference/IndexConditional.h b/gtsam/inference/IndexConditional.h index 2955efa64..f70013943 100644 --- a/gtsam/inference/IndexConditional.h +++ b/gtsam/inference/IndexConditional.h @@ -23,8 +23,6 @@ namespace gtsam { -class IndexFactor; - class IndexConditional : public ConditionalBase { public: @@ -62,6 +60,9 @@ public: static shared_ptr FromRange(ITERATOR firstKey, ITERATOR lastKey, size_t nrFrontals) { return Base::FromRange(firstKey, lastKey, nrFrontals); } + /** Convert to a factor */ + IndexFactor::shared_ptr toFactor() const { return IndexFactor::shared_ptr(new IndexFactor(*this)); } + }; } diff --git a/gtsam/inference/IndexFactor.cpp b/gtsam/inference/IndexFactor.cpp index 999b82627..bd5052345 100644 --- a/gtsam/inference/IndexFactor.cpp +++ b/gtsam/inference/IndexFactor.cpp @@ -18,6 +18,7 @@ #include #include +#include #include using namespace std; diff --git a/gtsam/inference/IndexFactor.h b/gtsam/inference/IndexFactor.h index a0dd4c6bb..dee284fb4 100644 --- a/gtsam/inference/IndexFactor.h +++ b/gtsam/inference/IndexFactor.h @@ -18,7 +18,6 @@ #pragma once -#include #include namespace gtsam { @@ -60,7 +59,7 @@ public: IndexFactor(Index j1, Index j2, Index j3, Index j4) : Base(j1, j2, j3, j4) {} /** Construct n-way factor */ - IndexFactor(std::set js) : Base(js) {} + IndexFactor(const std::set& js) : Base(js) {} /** * Combine and eliminate several factors. diff --git a/gtsam/inference/JunctionTree-inl.h b/gtsam/inference/JunctionTree-inl.h index b4c61dfa1..381d79ee3 100644 --- a/gtsam/inference/JunctionTree-inl.h +++ b/gtsam/inference/JunctionTree-inl.h @@ -39,19 +39,20 @@ namespace gtsam { /* ************************************************************************* */ template void JunctionTree::construct(const FG& fg, const VariableIndex& variableIndex) { - tic("JT 1 constructor"); - tic("JT 1.1 symbolic elimination"); - SymbolicBayesNet::shared_ptr sbn = EliminationTree::Create(fg, variableIndex)->eliminate(); - toc("JT 1.1 symbolic elimination"); - tic("JT 1.2 symbolic BayesTree"); + tic(1, "JT symbolic ET"); + const typename EliminationTree::shared_ptr symETree(EliminationTree::Create(fg, variableIndex)); + toc(1, "JT symbolic ET"); + tic(2, "JT symbolic eliminate"); + SymbolicBayesNet::shared_ptr sbn = symETree->eliminate(); + toc(2, "JT symbolic eliminate"); + tic(3, "symbolic BayesTree"); SymbolicBayesTree sbt(*sbn); - toc("JT 1.2 symbolic BayesTree"); + toc(3, "symbolic BayesTree"); // distribute factors - tic("JT 1.3 distributeFactors"); + tic(4, "distributeFactors"); this->root_ = distributeFactors(fg, sbt.root()); - toc("JT 1.3 distributeFactors"); - toc("JT 1 constructor"); + toc(4, "distributeFactors"); } /* ************************************************************************* */ @@ -95,7 +96,7 @@ namespace gtsam { // Now add each factor to the list corresponding to its lowest-ordered // variable. - vector > > targets(maxVar+1); + vector > targets(maxVar+1); for(size_t i=0; i::max()) targets[lowestOrdered[i]].push_back(i); @@ -107,7 +108,7 @@ namespace gtsam { /* ************************************************************************* */ template typename JunctionTree::sharedClique JunctionTree::distributeFactors(const FG& fg, - const std::vector > >& targets, + const std::vector >& targets, const SymbolicBayesTree::sharedClique& bayesClique) { if(bayesClique) { @@ -165,22 +166,23 @@ namespace gtsam { // Now that we know which factors and variables, and where variables // come from and go to, create and eliminate the new joint factor. - tic("JT 2.2 CombineAndEliminate"); + tic(2, "CombineAndEliminate"); pair::shared_ptr, typename FG::sharedFactor> eliminated( FG::Factor::CombineAndEliminate(fg, current->frontal.size())); - toc("JT 2.2 CombineAndEliminate"); + toc(2, "CombineAndEliminate"); assert(std::equal(eliminated.second->begin(), eliminated.second->end(), current->separator.begin())); - tic("JT 2.4 Update tree"); + tic(3, "Update tree"); // create a new clique corresponding the combined factors typename BayesTree::sharedClique new_clique(new typename BayesTree::Clique(*eliminated.first)); new_clique->children_ = children; - BOOST_FOREACH(typename BayesTree::sharedClique& childRoot, children) - childRoot->parent_ = new_clique; + BOOST_FOREACH(typename BayesTree::sharedClique& childRoot, children) { + childRoot->parent_ = new_clique; + } + toc(3, "Update tree"); - toc("JT 2.4 Update tree"); return make_pair(new_clique, eliminated.second); } @@ -188,11 +190,9 @@ namespace gtsam { template typename JunctionTree::BayesTree::sharedClique JunctionTree::eliminate() const { if(this->root()) { - tic("JT 2 eliminate"); pair ret = this->eliminateOneClique(this->root()); if (ret.second->size() != 0) throw runtime_error("JuntionTree::eliminate: elimination failed because of factors left over!"); - toc("JT 2 eliminate"); return ret.first; } else return typename BayesTree::sharedClique(); diff --git a/gtsam/inference/JunctionTree.h b/gtsam/inference/JunctionTree.h index df6fb6e6e..f9311bab5 100644 --- a/gtsam/inference/JunctionTree.h +++ b/gtsam/inference/JunctionTree.h @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include @@ -59,7 +59,7 @@ namespace gtsam { const SymbolicBayesTree::sharedClique& clique); // distribute the factors along the cluster tree - sharedClique distributeFactors(const FG& fg, const std::vector > >& targets, + sharedClique distributeFactors(const FG& fg, const std::vector >& targets, const SymbolicBayesTree::sharedClique& clique); // recursive elimination function diff --git a/gtsam/inference/tests/testFactorGraph.cpp b/gtsam/inference/tests/testFactorGraph.cpp index 452c5cd53..46484a426 100644 --- a/gtsam/inference/tests/testFactorGraph.cpp +++ b/gtsam/inference/tests/testFactorGraph.cpp @@ -27,6 +27,7 @@ using namespace boost::assign; #define GTSAM_MAGIC_KEY #include +#include using namespace std; using namespace gtsam; @@ -92,6 +93,11 @@ typedef boost::shared_ptr shared; // CHECK(singletonGraph_excepted.equals(singletonGraph)); //} +TEST(FactorGraph, dynamic_factor_cast) { + FactorGraph fg; + fg.dynamicCastFactors >(); +} + /* ************************************************************************* */ int main() { diff --git a/gtsam/linear/GaussianConditional.cpp b/gtsam/linear/GaussianConditional.cpp index cbff35bf2..8f55e9bcf 100644 --- a/gtsam/linear/GaussianConditional.cpp +++ b/gtsam/linear/GaussianConditional.cpp @@ -22,6 +22,8 @@ #include #include +#include +#include #include using namespace std; @@ -139,7 +141,10 @@ bool GaussianConditional::equals(const GaussianConditional &c, double tol) const return true; } - +/* ************************************************************************* */ +GaussianFactor::shared_ptr GaussianConditional::toFactor() const { + return GaussianFactor::shared_ptr(new JacobianFactor(*this)); +} /* ************************************************************************* */ Vector GaussianConditional::solve(const VectorValues& x) const { diff --git a/gtsam/linear/GaussianConditional.h b/gtsam/linear/GaussianConditional.h index 454c56933..a9c788bce 100644 --- a/gtsam/linear/GaussianConditional.h +++ b/gtsam/linear/GaussianConditional.h @@ -118,6 +118,12 @@ public: rsd_type::constBlock get_S(const_iterator variable) const { return rsd_(variable - this->begin()); } const Vector& get_sigmas() const {return sigmas_;} + /** + * Copy to a Factor (this creates a JacobianFactor and returns it as its + * base class GaussianFactor. + */ + boost::shared_ptr toFactor() const; + /** * solve a conditional Gaussian * @param x values structure in which the parents values (y,z,...) are known @@ -137,7 +143,7 @@ protected: rsd_type::Block get_R_() { return rsd_(0); } rsd_type::Block get_S_(iterator variable) { return rsd_(variable - this->begin()); } - friend class GaussianFactor; + friend class JacobianFactor; private: // /** Serialization function */ diff --git a/gtsam/linear/GaussianFactor.cpp b/gtsam/linear/GaussianFactor.cpp index 4ae8798c6..bbee3ae69 100644 --- a/gtsam/linear/GaussianFactor.cpp +++ b/gtsam/linear/GaussianFactor.cpp @@ -13,1018 +13,99 @@ * @file GaussianFactor.cpp * @brief Linear Factor....A Gaussian * @brief linearFactor - * @author Christian Potthast + * @author Richard Roberts, Christian Potthast */ #include -#include -#include -#include -#include #include -#include +#include +#include +#include +#include -#include -#include -#include -#include -#include -#include -#include +#include -#include -#include -#include -#include -#include - -#include +#include using namespace std; -namespace ublas = boost::numeric::ublas; -using namespace boost::lambda; - namespace gtsam { -/* ************************************************************************* */ -inline void GaussianFactor::assertInvariants() const { -#ifndef NDEBUG - IndexFactor::assertInvariants(); - assert((keys_.size() == 0 && Ab_.size1() == 0 && Ab_.nBlocks() == 0) || keys_.size()+1 == Ab_.nBlocks()); -#endif -} - -/* ************************************************************************* */ -GaussianFactor::GaussianFactor(const GaussianFactor& gf) : - IndexFactor(gf), model_(gf.model_), firstNonzeroBlocks_(gf.firstNonzeroBlocks_), Ab_(matrix_) { - Ab_.assignNoalias(gf.Ab_); - assertInvariants(); -} - -/* ************************************************************************* */ -GaussianFactor::GaussianFactor() : Ab_(matrix_) { assertInvariants(); } - -/* ************************************************************************* */ -GaussianFactor::GaussianFactor(const Vector& b_in) : firstNonzeroBlocks_(b_in.size(), 0), Ab_(matrix_) { - size_t dims[] = { 1 }; - Ab_.copyStructureFrom(BlockAb(matrix_, dims, dims+1, b_in.size())); - getb() = b_in; - assertInvariants(); -} - -/* ************************************************************************* */ -GaussianFactor::GaussianFactor(Index i1, const Matrix& A1, - const Vector& b, const SharedDiagonal& model) : - IndexFactor(i1), model_(model), firstNonzeroBlocks_(b.size(), 0), Ab_(matrix_) { - size_t dims[] = { A1.size2(), 1}; - Ab_.copyStructureFrom(BlockAb(matrix_, dims, dims+2, b.size())); - Ab_(0) = A1; - getb() = b; - assertInvariants(); -} - -/* ************************************************************************* */ -GaussianFactor::GaussianFactor(Index i1, const Matrix& A1, Index i2, const Matrix& A2, - const Vector& b, const SharedDiagonal& model) : - IndexFactor(i1,i2), model_(model), firstNonzeroBlocks_(b.size(), 0), Ab_(matrix_) { - size_t dims[] = { A1.size2(), A2.size2(), 1}; - Ab_.copyStructureFrom(BlockAb(matrix_, dims, dims+3, b.size())); - Ab_(0) = A1; - Ab_(1) = A2; - getb() = b; - assertInvariants(); -} - -/* ************************************************************************* */ -GaussianFactor::GaussianFactor(Index i1, const Matrix& A1, Index i2, const Matrix& A2, - Index i3, const Matrix& A3, const Vector& b, const SharedDiagonal& model) : - IndexFactor(i1,i2,i3), model_(model), firstNonzeroBlocks_(b.size(), 0), Ab_(matrix_) { - size_t dims[] = { A1.size2(), A2.size2(), A3.size2(), 1}; - Ab_.copyStructureFrom(BlockAb(matrix_, dims, dims+4, b.size())); - Ab_(0) = A1; - Ab_(1) = A2; - Ab_(2) = A3; - getb() = b; - assertInvariants(); -} - -/* ************************************************************************* */ -GaussianFactor::GaussianFactor(const std::vector > &terms, - const Vector &b, const SharedDiagonal& model) : - model_(model), firstNonzeroBlocks_(b.size(), 0), Ab_(matrix_) { - keys_.resize(terms.size()); - size_t dims[terms.size()+1]; - for(size_t j=0; j > &terms, - const Vector &b, const SharedDiagonal& model) : - model_(model), firstNonzeroBlocks_(b.size(), 0), Ab_(matrix_) { - keys_.resize(terms.size()); - size_t dims[terms.size()+1]; - size_t j=0; - for(std::list >::const_iterator term=terms.begin(); term!=terms.end(); ++term) { - keys_[j] = term->first; - dims[j] = term->second.size2(); - ++ j; - } - dims[j] = 1; - firstNonzeroBlocks_.resize(b.size(), 0); - Ab_.copyStructureFrom(BlockAb(matrix_, dims, dims+terms.size()+1, b.size())); - j = 0; - for(std::list >::const_iterator term=terms.begin(); term!=terms.end(); ++term) { - Ab_(j) = term->second; - ++ j; - } - getb() = b; - assertInvariants(); -} - -/* ************************************************************************* */ -GaussianFactor::GaussianFactor(const GaussianConditional& cg) : IndexFactor(cg), model_(noiseModel::Diagonal::Sigmas(cg.get_sigmas(), true)), Ab_(matrix_) { - Ab_.assignNoalias(cg.rsd_); - // todo SL: make firstNonzeroCols triangular? - firstNonzeroBlocks_.resize(cg.get_d().size(), 0); // set sigmas from precisions - assertInvariants(); -} - -/* ************************************************************************* */ -void GaussianFactor::print(const string& s) const { - cout << s << "\n"; - if (empty()) { - cout << " empty, keys: "; - BOOST_FOREACH(const Index key, keys_) { cout << key << " "; } - cout << endl; - } else { - for(const_iterator key=begin(); key!=end(); ++key) - gtsam::print(getA(key), (boost::format("A[%1%]=\n")%*key).str()); - gtsam::print(getb(),"b="); - model_->print("model"); - } -} - -/* ************************************************************************* */ -// Check if two linear factors are equal -bool GaussianFactor::equals(const GaussianFactor& f, double tol) const { - if (empty()) return (f.empty()); - if(keys_!=f.keys_ /*|| !model_->equals(lf->model_, tol)*/) - return false; - - assert(Ab_.size1() == f.Ab_.size1() && Ab_.size2() == f.Ab_.size2()); - - constABlock Ab1(Ab_.range(0, Ab_.nBlocks())); - constABlock Ab2(f.Ab_.range(0, f.Ab_.nBlocks())); - for(size_t row=0; row, boost::fast_pool_allocator > > SourceSlots; - SourceSlots sourceSlots; - for(size_t j=0; j dimensions(keys_.size() + 1); - size_t j = 0; - BOOST_FOREACH(const SourceSlots::value_type& sourceSlot, sourceSlots) { - dimensions[j++] = Ab_(sourceSlot.second).size2(); - } - assert(j == keys_.size()); - dimensions.back() = 1; - - // Copy the variables and matrix into the new order - vector oldKeys(keys_.size()); - keys_.swap(oldKeys); - AbMatrix oldMatrix; - BlockAb oldAb(oldMatrix, dimensions.begin(), dimensions.end(), Ab_.size1()); - Ab_.swap(oldAb); - j = 0; - BOOST_FOREACH(const SourceSlots::value_type& sourceSlot, sourceSlots) { - keys_[j] = sourceSlot.first; - ublas::noalias(Ab_(j++)) = oldAb(sourceSlot.second); - } - ublas::noalias(Ab_(j)) = oldAb(j); - - // Since we're permuting the variables, ensure that entire rows from this - // factor are copied when Combine is called - BOOST_FOREACH(size_t& varpos, firstNonzeroBlocks_) { varpos = 0; } - assertInvariants(); -} - -/* ************************************************************************* */ -Vector GaussianFactor::unweighted_error(const VectorValues& c) const { - Vector e = -getb(); - if (empty()) return e; - for(size_t pos=0; poswhiten(-getb()); - return model_->whiten(unweighted_error(c)); -} - -/* ************************************************************************* */ -double GaussianFactor::error(const VectorValues& c) const { - if (empty()) return 0; - Vector weighted = error_vector(c); - return 0.5 * inner_prod(weighted,weighted); -} - - -/* ************************************************************************* */ -Vector GaussianFactor::operator*(const VectorValues& x) const { - Vector Ax = zero(Ab_.size1()); - if (empty()) return Ax; - - // Just iterate over all A matrices and multiply in correct config part - for(size_t pos=0; poswhiten(Ax); -} - - -/* ************************************************************************* */ -void GaussianFactor::transposeMultiplyAdd(double alpha, const Vector& e, - VectorValues& x) const { - Vector E = alpha * model_->whiten(e); - // Just iterate over all A matrices and insert Ai^e into VectorValues - for(size_t pos=0; pos GaussianFactor::matrix(bool weight) const { - Matrix A(Ab_.range(0, keys_.size())); - Vector b(getb()); - // divide in sigma so error is indeed 0.5*|Ax-b| - if (weight) model_->WhitenSystem(A,b); - return make_pair(A, b); -} - -/* ************************************************************************* */ -Matrix GaussianFactor::matrix_augmented(bool weight) const { - if (weight) { Matrix Ab(Ab_.range(0,Ab_.nBlocks())); model_->WhitenInPlace(Ab); return Ab; } - else return Ab_.range(0, Ab_.nBlocks()); -} - -/* ************************************************************************* */ -boost::tuple, list, list > -GaussianFactor::sparse(const Dimensions& columnIndices) const { - - // declare return values - list I,J; - list S; - - // iterate over all matrices in the factor - for(size_t pos=0; possigma(i); - for (size_t j = 0; j < A.size2(); j++) - if (A(i, j) != 0.0) { - I.push_back(i + 1); - J.push_back(j + column_start); - S.push_back(A(i, j) / sigma_i); - } - } - } - - // return the result - return boost::tuple, list, list >(I,J,S); -} - -/* ************************************************************************* */ -GaussianFactor GaussianFactor::whiten() const { - GaussianFactor result(*this); - result.model_->WhitenInPlace(result.matrix_); - result.model_ = noiseModel::Unit::Create(result.model_->dim()); - return result; -} - -/* ************************************************************************* */ -struct SlotEntry { - size_t slot; - size_t dimension; - SlotEntry(size_t _slot, size_t _dimension) : slot(_slot), dimension(_dimension) {} -}; - -typedef FastMap Scatter; - -/* ************************************************************************* */ -static FastMap findScatterAndDims(const FactorGraph& factors) { - - static const bool debug = false; - - // The "scatter" is a map from global variable indices to slot indices in the - // union of involved variables. We also include the dimensionality of the - // variable. - - Scatter scatter; - - // First do the set union. - BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, factors) { - for(GaussianFactor::const_iterator variable = factor->begin(); variable != factor->end(); ++variable) { - scatter.insert(make_pair(*variable, SlotEntry(0, factor->getDim(variable)))); - } - } - - // Next fill in the slot indices (we can only get these after doing the set - // union. - size_t slot = 0; - BOOST_FOREACH(Scatter::value_type& var_slot, scatter) { - var_slot.second.slot = (slot ++); - if(debug) - cout << "scatter[" << var_slot.first << "] = (slot " << var_slot.second.slot << ", dim " << var_slot.second.dimension << ")" << endl; - } - - return scatter; -} - -/* ************************************************************************* */ -static MatrixColMajor formAbTAb(const FactorGraph& factors, const Scatter& scatter) { - - static const bool debug = false; - - tic("CombineAndEliminate: 3.1 varStarts"); - // Determine scalar indices of each variable - vector varStarts; - varStarts.reserve(scatter.size() + 2); - varStarts.push_back(0); - BOOST_FOREACH(const Scatter::value_type& var_slot, scatter) { - varStarts.push_back(varStarts.back() + var_slot.second.dimension); - } - // This is for the r.h.s. vector - varStarts.push_back(varStarts.back() + 1); - toc("CombineAndEliminate: 3.1 varStarts"); - - // Allocate and zero matrix for Ab' * Ab - MatrixColMajor ATA(ublas::zero_matrix(varStarts.back(), varStarts.back())); - - tic("CombineAndEliminate: 3.2 updates"); - // Do blockwise low-rank updates to Ab' * Ab for each factor. Here, we - // only update the upper triangle because this is all that Cholesky uses. - BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, factors) { - - // Whiten the factor first so it has a unit diagonal noise model - GaussianFactor whitenedFactor(factor->whiten()); - - if(debug) whitenedFactor.print("whitened factor: "); - - for(GaussianFactor::const_iterator var2 = whitenedFactor.begin(); var2 != whitenedFactor.end(); ++var2) { - assert(scatter.find(*var2) != scatter.end()); - size_t vj = scatter.find(*var2)->second.slot; - for(GaussianFactor::const_iterator var1 = whitenedFactor.begin(); var1 <= var2; ++var1) { - assert(scatter.find(*var1) != scatter.end()); - size_t vi = scatter.find(*var1)->second.slot; - if(debug) cout << "Updating block " << vi << ", " << vj << endl; - if(debug) cout << "Updating (" << varStarts[vi] << ":" << varStarts[vi+1] << ", " << - varStarts[vj] << ":" << varStarts[vj+1] << ") from A" << *var1 << "' * A" << *var2 << endl; - ublas::project(ATA, - ublas::range(varStarts[vi], varStarts[vi+1]), ublas::range(varStarts[vj], varStarts[vj+1])) += - ublas::prod(ublas::trans(whitenedFactor.getA(var1)), whitenedFactor.getA(var2)); - } - } - - // Update r.h.s. vector - size_t vj = scatter.size(); - for(GaussianFactor::const_iterator var1 = whitenedFactor.begin(); var1 < whitenedFactor.end(); ++var1) { - assert(scatter.find(*var1) != scatter.end()); - size_t vi = scatter.find(*var1)->second.slot; - if(debug) cout << "Updating block " << vi << ", " << vj << endl; - if(debug) cout << "Updating (" << varStarts[vi] << ":" << varStarts[vi+1] << ", " << - varStarts[vj] << ":" << varStarts[vj+1] << ") from A" << *var1 << "' * b" << endl; - ublas::matrix_column col(ATA, varStarts[vj]); - ublas::subrange(col, varStarts[vi], varStarts[vi+1]) += - ublas::prod(ublas::trans(whitenedFactor.getA(var1)), whitenedFactor.getb()); - } - - size_t vi = scatter.size(); - if(debug) cout << "Updating block " << vi << ", " << vj << endl; - if(debug) cout << "Updating (" << varStarts[vi] << ":" << varStarts[vi+1] << ", " << - varStarts[vj] << ":" << varStarts[vj+1] << ") from b" << "' * b" << endl; - ATA(varStarts[vi], varStarts[vj]) += ublas::inner_prod(whitenedFactor.getb(), whitenedFactor.getb()); - } - toc("CombineAndEliminate: 3.2 updates"); - - return ATA; -} - -GaussianBayesNet::shared_ptr GaussianFactor::splitEliminatedFactor(size_t nrFrontals, const vector& keys) { - - static const bool debug = false; - - const size_t maxrank = Ab_.size1(); - - // Check for rank-deficiency that would prevent back-substitution - if(maxrank < Ab_.range(0, nrFrontals).size2()) { - stringstream ss; - ss << "Problem is rank-deficient, discovered while eliminating frontal variables"; - for(size_t i=0; i 1) - for(size_t j = 0; j < remainingMatrix.size1() - 1; ++j) - memset(&remainingMatrix(j+1, j), 0, sizeof(remainingMatrix(0,0)) * (remainingMatrix.size1() - j - 1)); - } - - const ublas::scalar_vector sigmas(varDim, 1.0); - conditionals->push_back(boost::make_shared(keys.begin()+j, keys.end(), 1, Ab_, sigmas)); - if(debug) conditionals->back()->print("Extracted conditional: "); - Ab_.rowStart() += varDim; - Ab_.firstBlock() += 1; - if(debug) cout << "rowStart = " << Ab_.rowStart() << ", rowEnd = " << Ab_.rowEnd() << endl; - } - toc("CombineAndEliminate: 5.1 cond Rd"); - - // Take lower-right block of Ab_ to get the new factor - tic("CombineAndEliminate: 5.2 remaining factor"); - Ab_.rowEnd() = maxrank; - - // Assign the keys - keys_.assign(keys.begin() + nrFrontals, keys.end()); - - // Zero the entries below the diagonal (this relies on the matrix being - // column-major). - { - ABlock remainingMatrix(Ab_.range(0, Ab_.nBlocks())); - if(remainingMatrix.size1() > 1) - for(size_t j = 0; j < remainingMatrix.size1() - 1; ++j) - memset(&remainingMatrix(j+1, j), 0, sizeof(remainingMatrix(0,0)) * (remainingMatrix.size1() - j - 1)); - } - - // Make a unit diagonal noise model - model_ = noiseModel::Unit::Create(Ab_.size1()); - if(debug) this->print("Eliminated factor: "); - toc("CombineAndEliminate: 5.2 remaining factor"); - - // todo SL: deal with "dead" pivot columns!!! - tic("CombineAndEliminate: 5.3 rowstarts"); - size_t varpos = 0; - firstNonzeroBlocks_.resize(numberOfRows()); - for(size_t row=0; row GaussianFactor::CombineAndEliminate( - const FactorGraph& factors, size_t nrFrontals, SolveMethod solveMethod) { - - static const bool debug = false; - - SolveMethod correctedSolveMethod = solveMethod; - - // Check for constrained noise models - if(correctedSolveMethod != SOLVE_QR) { - BOOST_FOREACH(const shared_ptr& factor, factors) { - if(factor->model_->isConstrained()) { - correctedSolveMethod = SOLVE_QR; - break; - } - } - } - - if(correctedSolveMethod == SOLVE_QR) { - shared_ptr jointFactor(Combine(factors, VariableSlots(factors))); - GaussianBayesNet::shared_ptr gbn(jointFactor->eliminate(nrFrontals, SOLVE_QR)); - return make_pair(gbn, jointFactor); - } else if(correctedSolveMethod == SOLVE_CHOLESKY) { - - // Find the scatter and variable dimensions - tic("CombineAndEliminate: 1 find scatter"); - Scatter scatter(findScatterAndDims(factors)); - toc("CombineAndEliminate: 1 find scatter"); - - // Pull out keys and dimensions - tic("CombineAndEliminate: 2 keys"); - vector keys(scatter.size()); - vector dimensions(scatter.size() + 1); - BOOST_FOREACH(const Scatter::value_type& var_slot, scatter) { - keys[var_slot.second.slot] = var_slot.first; - dimensions[var_slot.second.slot] = var_slot.second.dimension; - } - // This is for the r.h.s. vector - dimensions.back() = 1; - toc("CombineAndEliminate: 2 keys"); - - // Form Ab' * Ab - tic("CombineAndEliminate: 3 Ab'*Ab"); - MatrixColMajor ATA(formAbTAb(factors, scatter)); - if(debug) gtsam::print(ATA, "Ab' * Ab: "); - toc("CombineAndEliminate: 3 Ab'*Ab"); - - // Do Cholesky, note that after this, the lower triangle still contains - // some untouched non-zeros that should be zero. We zero them while - // extracting submatrices next. - tic("CombineAndEliminate: 4 Cholesky careful"); - size_t maxrank = choleskyCareful(ATA); - if(maxrank > ATA.size2() - 1) - maxrank = ATA.size2() - 1; - if(debug) { - gtsam::print(ATA, "chol(Ab' * Ab): "); - cout << "maxrank = " << maxrank << endl; - } - toc("CombineAndEliminate: 4 Cholesky careful"); - - // Create the remaining factor and swap in the matrix and block structure. - // We declare a reference Ab to the block matrix in the remaining factor to - // refer to below. - GaussianFactor::shared_ptr remainingFactor(new GaussianFactor()); - BlockAb& Ab(remainingFactor->Ab_); - { - BlockAb newAb(ATA, dimensions.begin(), dimensions.end()); - newAb.rowEnd() = maxrank; - newAb.swap(Ab); - } - - // Extract conditionals and fill in details of the remaining factor - tic("CombineAndEliminate: 5 Split"); - GaussianBayesNet::shared_ptr conditionals(remainingFactor->splitEliminatedFactor(nrFrontals, keys)); - if(debug) { - conditionals->print("Extracted conditionals: "); - remainingFactor->print("Eliminated factor: "); - } - toc("CombineAndEliminate: 5 Split"); - - return make_pair(conditionals, remainingFactor); - - } else { - assert(false); - return make_pair(GaussianBayesNet::shared_ptr(), shared_ptr()); - } -} - -/* ************************************************************************* */ -GaussianConditional::shared_ptr GaussianFactor::eliminateFirst(SolveMethod solveMethod) { - - assert(Ab_.rowStart() == 0 && Ab_.rowEnd() == matrix_.size1() && Ab_.firstBlock() == 0); - assert(!keys_.empty()); - assertInvariants(); - - static const bool debug = false; - - tic("eliminateFirst"); - - if(debug) print("Eliminating GaussianFactor: "); - - tic("eliminateFirst: stairs"); - // Translate the left-most nonzero column indices into top-most zero row indices - vector firstZeroRows(Ab_.size2()); - { - size_t lastNonzeroRow = 0; - vector::iterator firstZeroRowsIt = firstZeroRows.begin(); - for(size_t var=0; varnumberOfRows() && firstNonzeroBlocks_[lastNonzeroRow] <= var) - ++ lastNonzeroRow; - fill(firstZeroRowsIt, firstZeroRowsIt+Ab_(var).size2(), lastNonzeroRow); - firstZeroRowsIt += Ab_(var).size2(); - } - assert(firstZeroRowsIt+1 == firstZeroRows.end()); - *firstZeroRowsIt = this->numberOfRows(); - } - toc("eliminateFirst: stairs"); - -#ifndef NDEBUG - for(size_t col=0; colisConstrained()) - correctedSolveMethod = SOLVE_QR; - else - correctedSolveMethod = solveMethod; - - // Use in-place QR or Cholesky on dense Ab appropriate to NoiseModel - SharedDiagonal noiseModel; - if(correctedSolveMethod == SOLVE_QR) { - tic("eliminateFirst: QR"); - noiseModel = model_->QRColumnWise(matrix_, firstZeroRows); - toc("eliminateFirst: QR"); - } else if(correctedSolveMethod == SOLVE_CHOLESKY) { - tic("eliminateFirst: Cholesky"); - noiseModel = model_->Cholesky(matrix_, firstVarDim); - Ab_.rowEnd() = noiseModel->dim(); - toc("eliminateFirst: Cholesky"); - } else - assert(false); - - if(matrix_.size1() > 0) { - for(size_t j=0; jdim(); ++i) - matrix_(i,j) = 0.0; - } - - if(debug) gtsam::print(matrix_, "QR result: "); - - // Check for singular factor - if(noiseModel->dim() < firstVarDim) { - throw domain_error((boost::format( - "GaussianFactor is singular in variable %1%, discovered while attempting\n" - "to eliminate this variable.") % keys_.front()).str()); - } - - // Extract conditional - // todo SL: are we pulling Householder vectors into the conditional and does it matter? - tic("eliminateFirst: cond Rd"); - // Temporarily restrict the matrix view to the conditional blocks of the - // eliminated Ab matrix to create the GaussianConditional from it. - Ab_.rowStart() = 0; - Ab_.rowEnd() = firstVarDim; - Ab_.firstBlock() = 0; - const ublas::vector_range sigmas(noiseModel->sigmas(), ublas::range(0, firstVarDim)); - GaussianConditional::shared_ptr conditional(new GaussianConditional( - keys_.begin(), keys_.end(), 1, Ab_, sigmas)); - toc("eliminateFirst: cond Rd"); - - if(debug) conditional->print("Extracted conditional: "); - - tic("eliminateFirst: remaining factor"); - // Take lower-right block of Ab to get the new factor - Ab_.rowStart() = firstVarDim; - Ab_.rowEnd() = noiseModel->dim(); - Ab_.firstBlock() = 1; - keys_.erase(keys_.begin()); - // Set sigmas with the right model - if (noiseModel->isConstrained()) - model_ = noiseModel::Constrained::MixedSigmas(sub(noiseModel->sigmas(), firstVarDim, noiseModel->dim())); - else - model_ = noiseModel::Diagonal::Sigmas(sub(noiseModel->sigmas(), firstVarDim, noiseModel->dim())); - if(correctedSolveMethod == SOLVE_QR) assert(Ab_.size1() <= Ab_.size2()-1); - toc("eliminateFirst: remaining factor"); - - // todo SL: deal with "dead" pivot columns!!! - tic("eliminateFirst: rowstarts"); - size_t varpos = 0; - firstNonzeroBlocks_.resize(this->numberOfRows()); - for(size_t row=0; rowkeys_.size() && Ab_.offset(varpos+1) <= row) - ++ varpos; - firstNonzeroBlocks_[row] = varpos; - if(debug) cout << "firstNonzeroVars_[" << row << "] = " << firstNonzeroBlocks_[row] << endl; - // if(debug && varpos < size()) { - // ABlock block(Ab_(varpos)); - // assert(!gtsam::equal_with_abs_tol(ublas::row(block, row), zero(block.size2()), 1e-5)); - // } - } - toc("eliminateFirst: rowstarts"); - - if(debug) print("Eliminated factor: "); - - toc("eliminateFirst"); - - assertInvariants(); - - return conditional; -} - -/* ************************************************************************* */ -GaussianBayesNet::shared_ptr GaussianFactor::eliminate(size_t nrFrontals, SolveMethod solveMethod) { - - assert(Ab_.rowStart() == 0 && Ab_.rowEnd() == matrix_.size1() && Ab_.firstBlock() == 0); - assert(keys_.size() >= nrFrontals); - assertInvariants(); - - static const bool debug = false; - - tic("eliminate"); - - if(debug) cout << "Eliminating " << nrFrontals << " frontal variables" << endl; - if(debug) this->print("Eliminating GaussianFactor: "); - - tic("eliminate: stairs"); - // Translate the left-most nonzero column indices into top-most zero row indices - vector firstZeroRows(Ab_.size2()); - { - size_t lastNonzeroRow = 0; - vector::iterator firstZeroRowsIt = firstZeroRows.begin(); - for(size_t var=0; varnumberOfRows() && firstNonzeroBlocks_[lastNonzeroRow] <= var) - ++ lastNonzeroRow; - fill(firstZeroRowsIt, firstZeroRowsIt+Ab_(var).size2(), lastNonzeroRow); - firstZeroRowsIt += Ab_(var).size2(); - } - assert(firstZeroRowsIt+1 == firstZeroRows.end()); - *firstZeroRowsIt = this->numberOfRows(); - } - toc("eliminate: stairs"); - -#ifndef NDEBUG - for(size_t col=0; colQRColumnWise(matrix_, firstZeroRows); - toc("eliminateFirst: QR"); - } else if(correctedSolveMethod == SOLVE_CHOLESKY) { - tic("eliminateFirst: Cholesky"); - noiseModel = model_->Cholesky(matrix_, frontalDim); - Ab_.rowEnd() = noiseModel->dim(); - toc("eliminateFirst: Cholesky"); - } else - assert(false); - - // Zero the lower-left triangle. todo: not all of these entries actually - // need to be zeroed if we are careful to start copying rows after the last - // structural zero. - if(matrix_.size1() > 0) { - for(size_t j=0; jdim(); ++i) - matrix_(i,j) = 0.0; - } - - if(debug) gtsam::print(matrix_, "QR result: "); - - // Check for singular factor - if(noiseModel->dim() < frontalDim) { - throw domain_error((boost::format( - "GaussianFactor is singular in variable %1%, discovered while attempting\n" - "to eliminate this variable.") % keys_.front()).str()); - } - - // Extract conditionals - tic("eliminate: cond Rd"); - GaussianBayesNet::shared_ptr conditionals(new GaussianBayesNet()); - for(size_t j=0; j sigmas(noiseModel->sigmas(), ublas::range(Ab_.rowStart(), Ab_.rowEnd())); - conditionals->push_back(boost::make_shared(keys_.begin()+j, keys_.end(), 1, Ab_, sigmas)); - if(debug) conditionals->back()->print("Extracted conditional: "); - Ab_.rowStart() += varDim; - Ab_.firstBlock() += 1; - } - toc("eliminate: cond Rd"); - - if(debug) conditionals->print("Extracted conditionals: "); - - tic("eliminate: remaining factor"); - // Take lower-right block of Ab to get the new factor - Ab_.rowEnd() = noiseModel->dim(); - keys_.assign(keys_.begin() + nrFrontals, keys_.end()); - // Set sigmas with the right model - if (noiseModel->isConstrained()) - model_ = noiseModel::Constrained::MixedSigmas(sub(noiseModel->sigmas(), frontalDim, noiseModel->dim())); - else - model_ = noiseModel::Diagonal::Sigmas(sub(noiseModel->sigmas(), frontalDim, noiseModel->dim())); - if(debug) this->print("Eliminated factor: "); - if(correctedSolveMethod == SOLVE_QR) assert(Ab_.size1() <= Ab_.size2()-1); - toc("eliminate: remaining factor"); - - // todo SL: deal with "dead" pivot columns!!! - tic("eliminate: rowstarts"); - size_t varpos = 0; - firstNonzeroBlocks_.resize(this->numberOfRows()); - for(size_t row=0; row, size_t, size_t> countDims(const std::vector& factors, const VariableSlots& variableSlots) { -#ifndef NDEBUG - vector varDims(variableSlots.size(), numeric_limits::max()); -#else - vector varDims(variableSlots.size()); -#endif - size_t m = 0; - size_t n = 0; - { - Index jointVarpos = 0; - BOOST_FOREACH(const VariableSlots::value_type& slots, variableSlots) { - - assert(slots.second.size() == factors.size()); - - Index sourceFactorI = 0; - BOOST_FOREACH(const Index sourceVarpos, slots.second) { - if(sourceVarpos < numeric_limits::max()) { - const GaussianFactor& sourceFactor = *factors[sourceFactorI]; - size_t vardim = sourceFactor.getDim(sourceFactor.begin() + sourceVarpos); -#ifndef NDEBUG - if(varDims[jointVarpos] == numeric_limits::max()) { - varDims[jointVarpos] = vardim; - n += vardim; - } else - assert(varDims[jointVarpos] == vardim); -#else - varDims[jointVarpos] = vardim; - n += vardim; + /* ************************************************************************* */ + pair GaussianFactor::CombineAndEliminate( + const FactorGraph& factors, size_t nrFrontals, SolveMethod solveMethod) { + + // If any JacobianFactors have constrained noise models, we have to convert + // all factors to JacobianFactors. Otherwise, we can convert all factors + // to HessianFactors. This is because QR can handle constrained noise + // models but Cholesky cannot. + + // Decide whether to use QR or Cholesky + bool useQR; + if(solveMethod == SOLVE_QR) { + useQR = true; + } else if(solveMethod == SOLVE_PREFER_CHOLESKY) { + // Check if any JacobianFactors have constrained noise models. + useQR = false; + BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, factors) { + JacobianFactor::shared_ptr jacobianFactor(boost::dynamic_pointer_cast(factor)); + if(jacobianFactor && jacobianFactor->get_model()->isConstrained()) { + useQR = true; break; -#endif } - ++ sourceFactorI; } - ++ jointVarpos; } - BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, factors) { - m += factor->numberOfRows(); + + // Convert all factors to the appropriate type and call the type-specific CombineAndEliminate. + if(useQR) { + tic(1, "convert to Jacobian"); + FactorGraph jacobianFactors; + jacobianFactors.reserve(factors.size()); + BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, factors) { + if(factor) { + JacobianFactor::shared_ptr jacobianFactor(boost::dynamic_pointer_cast(factor)); + if(jacobianFactor) + jacobianFactors.push_back(jacobianFactor); + else { + HessianFactor::shared_ptr hessianFactor(boost::dynamic_pointer_cast(factor)); + if(!hessianFactor) throw std::invalid_argument( + "In GaussianFactor::CombineAndEliminate, factor is neither a JacobianFactor nor a HessianFactor."); + jacobianFactors.push_back(JacobianFactor::shared_ptr(new JacobianFactor(*hessianFactor))); + } + } + } + toc(1, "convert to Jacobian"); + tic(2, "Jacobian CombineAndEliminate"); + pair ret( + JacobianFactor::CombineAndEliminate(jacobianFactors, nrFrontals)); + toc(2, "Jacobian CombineAndEliminate"); + return ret; + + } else { + tic(1, "convert to Hessian"); + FactorGraph hessianFactors; + hessianFactors.reserve(factors.size()); + BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, factors) { + if(factor) { + HessianFactor::shared_ptr hessianFactor(boost::dynamic_pointer_cast(factor)); + if(hessianFactor) + hessianFactors.push_back(hessianFactor); + else { + JacobianFactor::shared_ptr jacobianFactor(boost::dynamic_pointer_cast(factor)); + if(!jacobianFactor) throw std::invalid_argument( + "In GaussianFactor::CombineAndEliminate, factor is neither a JacobianFactor nor a HessianFactor."); + hessianFactors.push_back(HessianFactor::shared_ptr(new HessianFactor(*jacobianFactor))); + } + } + } + toc(1, "convert to Hessian"); + tic(2, "Hessian CombineAndEliminate"); + pair ret( + HessianFactor::CombineAndEliminate(hessianFactors, nrFrontals)); + toc(2, "Hessian CombineAndEliminate"); + return ret; } + } - return boost::make_tuple(varDims, m, n); -} - -/* ************************************************************************* */ -GaussianFactor::shared_ptr GaussianFactor::Combine(const FactorGraph& factors, const VariableSlots& variableSlots) { - - static const bool verbose = false; - static const bool debug = false; - if (verbose) std::cout << "GaussianFactor::GaussianFactor (factors)" << std::endl; - - if(debug) factors.print("Combining factors: "); - - if(debug) variableSlots.print(); - - // Determine dimensions - tic("Combine 1: countDims"); - vector varDims; - size_t m; - size_t n; - boost::tie(varDims, m, n) = countDims(factors, variableSlots); - if(debug) { - cout << "Dims: " << m << " x " << n << "\n"; - BOOST_FOREACH(const size_t dim, varDims) { cout << dim << " "; } - cout << endl; - } - toc("Combine 1: countDims"); - - // Sort rows - tic("Combine 2: sort rows"); - vector<_RowSource> rowSources; rowSources.reserve(m); - bool anyConstrained = false; - for(size_t sourceFactorI = 0; sourceFactorI < factors.size(); ++sourceFactorI) { - const GaussianFactor& sourceFactor(*factors[sourceFactorI]); - for(size_t sourceFactorRow = 0; sourceFactorRow < sourceFactor.numberOfRows(); ++sourceFactorRow) { - Index firstNonzeroVar; - firstNonzeroVar = sourceFactor.keys_[sourceFactor.firstNonzeroBlocks_[sourceFactorRow]]; - rowSources.push_back(_RowSource(firstNonzeroVar, sourceFactorI, sourceFactorRow)); - } - if(sourceFactor.model_->isConstrained()) anyConstrained = true; - } - assert(rowSources.size() == m); - std::sort(rowSources.begin(), rowSources.end()); - toc("Combine 2: sort rows"); - - // Allocate new factor - tic("Combine 3: allocate"); - shared_ptr combined(new GaussianFactor()); - combined->keys_.resize(variableSlots.size()); - std::transform(variableSlots.begin(), variableSlots.end(), combined->keys_.begin(), bind(&VariableSlots::const_iterator::value_type::first, boost::lambda::_1)); - varDims.push_back(1); - combined->Ab_.copyStructureFrom(BlockAb(combined->matrix_, varDims.begin(), varDims.end(), m)); - combined->firstNonzeroBlocks_.resize(m); - Vector sigmas(m); - toc("Combine 3: allocate"); - - // Copy rows - tic("Combine 4: copy rows"); - Index combinedSlot = 0; - BOOST_FOREACH(const VariableSlots::value_type& varslot, variableSlots) { - for(size_t row = 0; row < m; ++row) { - const Index sourceSlot = varslot.second[rowSources[row].factorI]; - ABlock combinedBlock(combined->Ab_(combinedSlot)); - if(sourceSlot != numeric_limits::max()) { - const GaussianFactor& source(*factors[rowSources[row].factorI]); - const size_t sourceRow = rowSources[row].factorRowI; - if(source.firstNonzeroBlocks_[sourceRow] <= sourceSlot) { - const constABlock sourceBlock(source.Ab_(sourceSlot)); - ublas::noalias(ublas::row(combinedBlock, row)) = ublas::row(sourceBlock, sourceRow); - } else - ublas::noalias(ublas::row(combinedBlock, row)) = ublas::zero_vector(combinedBlock.size2()); - } else - ublas::noalias(ublas::row(combinedBlock, row)) = ublas::zero_vector(combinedBlock.size2()); - } - ++ combinedSlot; - } - toc("Combine 4: copy rows"); - - // Copy rhs (b), sigma, and firstNonzeroBlocks - tic("Combine 5: copy vectors"); - Index firstNonzeroSlot = 0; - for(size_t row = 0; row < m; ++row) { - const GaussianFactor& source(*factors[rowSources[row].factorI]); - const size_t sourceRow = rowSources[row].factorRowI; - combined->getb()(row) = source.getb()(sourceRow); - sigmas(row) = source.get_sigmas()(sourceRow); - while(firstNonzeroSlot < variableSlots.size() && rowSources[row].firstNonzeroVar > combined->keys_[firstNonzeroSlot]) - ++ firstNonzeroSlot; - combined->firstNonzeroBlocks_[row] = firstNonzeroSlot; - } - toc("Combine 5: copy vectors"); - - // Create noise model from sigmas - tic("Combine 6: noise model"); - if(anyConstrained) combined->model_ = noiseModel::Constrained::MixedSigmas(sigmas); - else combined->model_ = noiseModel::Diagonal::Sigmas(sigmas); - toc("Combine 6: noise model"); - - combined->assertInvariants(); - - return combined; -} } diff --git a/gtsam/linear/GaussianFactor.h b/gtsam/linear/GaussianFactor.h index fd9d154a4..1a5b8696d 100644 --- a/gtsam/linear/GaussianFactor.h +++ b/gtsam/linear/GaussianFactor.h @@ -13,42 +13,24 @@ * @file GaussianFactor.h * @brief Linear Factor....A Gaussian * @brief linearFactor - * @author Christian Potthast + * @author Richard Roberts, Christian Potthast */ // \callgraph #pragma once -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include #include -#include -#include -#include #include +#include + +#include +#include namespace gtsam { - /** A map from key to dimension, useful in various contexts */ - typedef std::map Dimensions; + class VectorValues; + class Permutation; /** * Base Class for a linear factor. @@ -59,201 +41,92 @@ namespace gtsam { protected: - typedef boost::numeric::ublas::matrix AbMatrix; - typedef VerticalBlockView BlockAb; - - public: - typedef GaussianConditional Conditional; - typedef boost::shared_ptr shared_ptr; - typedef BlockAb::Block ABlock; - typedef BlockAb::constBlock constABlock; - typedef BlockAb::Column BVector; - typedef BlockAb::constColumn constBVector; - - enum SolveMethod { SOLVE_QR, SOLVE_CHOLESKY }; - - protected: - SharedDiagonal model_; // Gaussian noise model with diagonal covariance matrix - std::vector firstNonzeroBlocks_; - AbMatrix matrix_; // the full matrix corresponding to the factor - BlockAb Ab_; // the block view of the full matrix - - public: - /** Copy constructor */ - GaussianFactor(const GaussianFactor& gf); + GaussianFactor(const This& f) : IndexFactor(f) {} - /** default constructor for I/O */ - GaussianFactor(); + /** Construct from derived type */ + GaussianFactor(const GaussianConditional& c) : IndexFactor(c) {} - /** Construct Null factor */ - GaussianFactor(const Vector& b_in); + /** Constructor from a collection of keys */ + template GaussianFactor(KeyIterator beginKey, KeyIterator endKey) : + Base(beginKey, endKey) {} + + /** Default constructor for I/O */ + GaussianFactor() {} /** Construct unary factor */ - GaussianFactor(Index i1, const Matrix& A1, - const Vector& b, const SharedDiagonal& model); + GaussianFactor(Index j) : IndexFactor(j) {} /** Construct binary factor */ - GaussianFactor(Index i1, const Matrix& A1, - Index i2, const Matrix& A2, - const Vector& b, const SharedDiagonal& model); + GaussianFactor(Index j1, Index j2) : IndexFactor(j1, j2) {} /** Construct ternary factor */ - GaussianFactor(Index i1, const Matrix& A1, Index i2, - const Matrix& A2, Index i3, const Matrix& A3, - const Vector& b, const SharedDiagonal& model); + GaussianFactor(Index j1, Index j2, Index j3) : IndexFactor(j1, j2, j3) {} - /** Construct an n-ary factor */ - GaussianFactor(const std::vector > &terms, - const Vector &b, const SharedDiagonal& model); + /** Construct 4-way factor */ + GaussianFactor(Index j1, Index j2, Index j3, Index j4) : IndexFactor(j1, j2, j3, j4) {} - GaussianFactor(const std::list > &terms, - const Vector &b, const SharedDiagonal& model); + /** Construct n-way factor */ + GaussianFactor(const std::set& js) : IndexFactor(js) {} - /** Construct from Conditional Gaussian */ - GaussianFactor(const GaussianConditional& cg); + public: + + enum SolveMethod { SOLVE_QR, SOLVE_PREFER_CHOLESKY }; + + typedef GaussianConditional Conditional; + typedef boost::shared_ptr shared_ptr; // Implementing Testable interface - void print(const std::string& s = "") const; - bool equals(const GaussianFactor& lf, double tol = 1e-9) const; + virtual void print(const std::string& s = "") const = 0; + virtual bool equals(const GaussianFactor& lf, double tol = 1e-9) const = 0; - Vector unweighted_error(const VectorValues& c) const; /** (A*x-b) */ - Vector error_vector(const VectorValues& c) const; /** (A*x-b)/sigma */ - double error(const VectorValues& c) const; /** 0.5*(A*x-b)'*D*(A*x-b) */ + virtual double error(const VectorValues& c) const = 0; /** 0.5*(A*x-b)'*D*(A*x-b) */ - /** Check if the factor contains no information, i.e. zero rows. This does - * not necessarily mean that the factor involves no variables (to check for - * involving no variables use keys().empty()). - */ - bool empty() const { return Ab_.size1() == 0;} - - /** - * return the number of rows in the corresponding linear system - */ - size_t size1() const { return Ab_.size1(); } - - /** - * return the number of columns in the corresponding linear system - */ - size_t size2() const { return Ab_.size2(); } - - - /** Get a view of the r.h.s. vector b */ - constBVector getb() const { return Ab_.column(size(), 0); } - - /** Get a view of the A matrix for the variable pointed to be the given key iterator */ - constABlock getA(const_iterator variable) const { return Ab_(variable - keys_.begin()); } - - BVector getb() { return Ab_.column(size(), 0); } - - ABlock getA(iterator variable) { return Ab_(variable - keys_.begin()); } - - /** Return the dimension of the variable pointed to by the given key iterator - * todo: Remove this in favor of keeping track of dimensions with variables? - */ - size_t getDim(const_iterator variable) const { return Ab_(variable - keys_.begin()).size2(); } + /** Return the dimension of the variable pointed to by the given key iterator */ + virtual size_t getDim(const_iterator variable) const = 0; /** * Permutes the GaussianFactor, but for efficiency requires the permutation * to already be inverted. This acts just as a change-of-name for each * variable. The order of the variables within the factor is not changed. */ - void permuteWithInverse(const Permutation& inversePermutation); - - /** - * Whiten the matrix and r.h.s. so that the noise model is unit diagonal. - * This throws an exception if the noise model cannot whiten, e.g. if it is - * constrained. - */ - GaussianFactor whiten() const; - - /** - * Named constructor for combining a set of factors with pre-computed set of variables. - */ - static shared_ptr Combine(const FactorGraph& factors, const VariableSlots& variableSlots); + virtual void permuteWithInverse(const Permutation& inversePermutation) = 0; /** * Combine and eliminate several factors. */ static std::pair CombineAndEliminate( - const FactorGraph& factors, size_t nrFrontals=1, SolveMethod solveMethod = SOLVE_QR); - - protected: - - /** Internal debug check to make sure variables are sorted */ - void assertInvariants() const; - - /** Internal helper function to extract conditionals from a factor that was - * just numerically eliminated. - */ - GaussianBayesNet::shared_ptr splitEliminatedFactor(size_t nrFrontals, const std::vector& keys); - - public: - - /** access the sigmas */ - const Vector& get_sigmas() const { return model_->sigmas(); } - - /** access the noise model */ - const SharedDiagonal& get_model() const { return model_; } - - /** access the noise model (non-const version) */ - SharedDiagonal& get_model() { return model_; } - - /** get the indices list */ - const std::vector& get_firstNonzeroBlocks() const { return firstNonzeroBlocks_; } - - /** whether the noise model of this factor is constrained (i.e. contains any sigmas of 0.0) */ - bool isConstrained() const {return model_->isConstrained();} - - /** - * return the number of rows from the b vector - * @return a integer with the number of rows from the b vector - */ - size_t numberOfRows() const { return Ab_.size1(); } - - /** Return A*x */ - Vector operator*(const VectorValues& x) const; - - - /** x += A'*e */ - void transposeMultiplyAdd(double alpha, const Vector& e, VectorValues& x) const; - - /** - * Return (dense) matrix associated with factor - * @param ordering of variables needed for matrix column order - * @param set weight to true to bake in the weights - */ - std::pair matrix(bool weight = true) const; - - /** - * Return (dense) matrix associated with factor - * The returned system is an augmented matrix: [A b] - * @param ordering of variables needed for matrix column order - * @param set weight to use whitening to bake in weights - */ - Matrix matrix_augmented(bool weight = true) const; - - /** - * Return vectors i, j, and s to generate an m-by-n sparse matrix - * such that S(i(k),j(k)) = s(k), which can be given to MATLAB's sparse. - * As above, the standard deviations are baked into A and b - * @param first column index for each variable - */ - boost::tuple, std::list, std::list > - sparse(const Dimensions& columnIndices) const; - - /* ************************************************************************* */ - // MUTABLE functions. FD:on the path to being eradicated - /* ************************************************************************* */ - - GaussianConditional::shared_ptr eliminateFirst(SolveMethod solveMethod = SOLVE_QR); - - GaussianBayesNet::shared_ptr eliminate(size_t nrFrontals = 1, SolveMethod solveMethod = SOLVE_QR); - - void set_firstNonzeroBlocks(size_t row, size_t varpos) { firstNonzeroBlocks_[row] = varpos; } + const FactorGraph& factors, size_t nrFrontals=1, SolveMethod solveMethod=SOLVE_PREFER_CHOLESKY); }; // GaussianFactor + /** unnormalized error */ + template + double gaussianError(const FactorGraph& fg, const VectorValues& x) { + double total_error = 0.; + BOOST_FOREACH(const typename FACTOR::shared_ptr& factor, fg) { + total_error += factor->error(x); + } + return total_error; + } + + /** return A*x-b */ + template + Errors gaussianErrors(const FactorGraph& fg, const VectorValues& x) { + return *gaussianErrors_(fg, x); + } + + /** shared pointer version */ + template + boost::shared_ptr gaussianErrors_(const FactorGraph& fg, const VectorValues& x) { + boost::shared_ptr e(new Errors); + BOOST_FOREACH(const typename FACTOR::shared_ptr& factor, fg) { + e->push_back(factor->error_vector(x)); + } + return e; + } + + } // namespace gtsam diff --git a/gtsam/linear/GaussianFactorGraph.cpp b/gtsam/linear/GaussianFactorGraph.cpp index ede5571a8..ca06a9115 100644 --- a/gtsam/linear/GaussianFactorGraph.cpp +++ b/gtsam/linear/GaussianFactorGraph.cpp @@ -60,69 +60,6 @@ namespace gtsam { } } - /* ************************************************************************* */ - double GaussianFactorGraph::error(const VectorValues& x) const { - double total_error = 0.; - BOOST_FOREACH(sharedFactor factor,factors_) - total_error += factor->error(x); - return total_error; - } - - /* ************************************************************************* */ - Errors GaussianFactorGraph::errors(const VectorValues& x) const { - return *errors_(x); - } - - /* ************************************************************************* */ - boost::shared_ptr GaussianFactorGraph::errors_(const VectorValues& x) const { - boost::shared_ptr e(new Errors); - BOOST_FOREACH(const sharedFactor& factor,factors_) - e->push_back(factor->error_vector(x)); - return e; - } - - /* ************************************************************************* */ - Errors GaussianFactorGraph::operator*(const VectorValues& x) const { - Errors e; - BOOST_FOREACH(const sharedFactor& Ai,factors_) - e.push_back((*Ai)*x); - return e; - } - - /* ************************************************************************* */ - void GaussianFactorGraph::multiplyInPlace(const VectorValues& x, Errors& e) const { - multiplyInPlace(x,e.begin()); - } - - /* ************************************************************************* */ - void GaussianFactorGraph::multiplyInPlace(const VectorValues& x, - const Errors::iterator& e) const { - Errors::iterator ei = e; - BOOST_FOREACH(const sharedFactor& Ai,factors_) { - *ei = (*Ai)*x; - ei++; - } - } - - - /* ************************************************************************* */ - // x += alpha*A'*e - void GaussianFactorGraph::transposeMultiplyAdd(double alpha, const Errors& e, - VectorValues& x) const { - // For each factor add the gradient contribution - Errors::const_iterator ei = e.begin(); - BOOST_FOREACH(const sharedFactor& Ai,factors_) - Ai->transposeMultiplyAdd(alpha,*(ei++),x); - } - - /* ************************************************************************* */ - VectorValues GaussianFactorGraph::gradient(const VectorValues& x) const { - // It is crucial for performance to make a zero-valued clone of x - VectorValues g = VectorValues::zero(x); - transposeMultiplyAdd(1.0, errors(x), g); - return g; - } - /* ************************************************************************* */ void GaussianFactorGraph::combine(const GaussianFactorGraph &lfg){ for(const_iterator factor=lfg.factors_.begin(); factor!=lfg.factors_.end(); factor++){ @@ -145,60 +82,29 @@ namespace gtsam { return fg; } - void GaussianFactorGraph::residual(const VectorValues &x, VectorValues &r) const { - - getb(r) ; - VectorValues Ax = VectorValues::SameStructure(r) ; - multiply(x,Ax) ; - axpy(-1.0,Ax,r) ; - } - - void GaussianFactorGraph::multiply(const VectorValues &x, VectorValues &r) const { - - r.makeZero() ; - Index i = 0 ; - BOOST_FOREACH(const sharedFactor& factor, factors_) { - for(GaussianFactor::const_iterator j = factor->begin(); j != factor->end(); ++j) { - r[i] += prod(factor->getA(j), x[*j]); - } - ++i ; - } - } - - void GaussianFactorGraph::transposeMultiply(const VectorValues &r, VectorValues &x) const { - x.makeZero() ; - Index i = 0 ; - BOOST_FOREACH(const sharedFactor& factor, factors_) { - for(GaussianFactor::const_iterator j = factor->begin(); j != factor->end(); ++j) { - x[*j] += prod(trans(factor->getA(j)), r[i]) ; - } - ++i ; - } - } - - VectorValues GaussianFactorGraph::allocateVectorValuesb() const { - std::vector dimensions(size()) ; - Index i = 0 ; - BOOST_FOREACH( const sharedFactor& factor, factors_) { - dimensions[i] = factor->numberOfRows() ; - i++; - } - - return VectorValues(dimensions) ; - } - - void GaussianFactorGraph::getb(VectorValues &b) const { - Index i = 0 ; - BOOST_FOREACH( const sharedFactor& factor, factors_) { - b[i] = factor->getb() ; - i++; - } - } - - VectorValues GaussianFactorGraph::getb() const { - VectorValues b = allocateVectorValuesb() ; - getb(b) ; - return b ; - } +// VectorValues GaussianFactorGraph::allocateVectorValuesb() const { +// std::vector dimensions(size()) ; +// Index i = 0 ; +// BOOST_FOREACH( const sharedFactor& factor, factors_) { +// dimensions[i] = factor->numberOfRows() ; +// i++; +// } +// +// return VectorValues(dimensions) ; +// } +// +// void GaussianFactorGraph::getb(VectorValues &b) const { +// Index i = 0 ; +// BOOST_FOREACH( const sharedFactor& factor, factors_) { +// b[i] = factor->getb(); +// i++; +// } +// } +// +// VectorValues GaussianFactorGraph::getb() const { +// VectorValues b = allocateVectorValuesb() ; +// getb(b) ; +// return b ; +// } } // namespace gtsam diff --git a/gtsam/linear/GaussianFactorGraph.h b/gtsam/linear/GaussianFactorGraph.h index f8738f638..71d1d2088 100644 --- a/gtsam/linear/GaussianFactorGraph.h +++ b/gtsam/linear/GaussianFactorGraph.h @@ -24,7 +24,9 @@ #include #include #include +#include #include +#include namespace gtsam { @@ -55,28 +57,22 @@ namespace gtsam { push_back(fg); } - /* dummy constructor, to be compatible with conjugate gradient solver */ - template - GaussianFactorGraph(const FactorGraph& fg, const VectorValues &x0) { - push_back(fg); - } - /** Add a null factor */ void add(const Vector& b) { - push_back(sharedFactor(new GaussianFactor(b))); + push_back(sharedFactor(new JacobianFactor(b))); } /** Add a unary factor */ void add(Index key1, const Matrix& A1, const Vector& b, const SharedDiagonal& model) { - push_back(sharedFactor(new GaussianFactor(key1,A1,b,model))); + push_back(sharedFactor(new JacobianFactor(key1,A1,b,model))); } /** Add a binary factor */ void add(Index key1, const Matrix& A1, Index key2, const Matrix& A2, const Vector& b, const SharedDiagonal& model) { - push_back(sharedFactor(new GaussianFactor(key1,A1,key2,A2,b,model))); + push_back(sharedFactor(new JacobianFactor(key1,A1,key2,A2,b,model))); } /** Add a ternary factor */ @@ -84,13 +80,13 @@ namespace gtsam { Index key2, const Matrix& A2, Index key3, const Matrix& A3, const Vector& b, const SharedDiagonal& model) { - push_back(sharedFactor(new GaussianFactor(key1,A1,key2,A2,key3,A3,b,model))); + push_back(sharedFactor(new JacobianFactor(key1,A1,key2,A2,key3,A3,b,model))); } /** Add an n-ary factor */ void add(const std::vector > &terms, const Vector &b, const SharedDiagonal& model) { - push_back(sharedFactor(new GaussianFactor(terms,b,model))); + push_back(sharedFactor(new JacobianFactor(terms,b,model))); } /** @@ -103,37 +99,9 @@ namespace gtsam { /** Permute the variables in the factors */ void permuteWithInverse(const Permutation& inversePermutation); - /** return A*x-b */ - Errors errors(const VectorValues& x) const; - - /** shared pointer version */ - boost::shared_ptr errors_(const VectorValues& x) const; - - /** unnormalized error */ - double error(const VectorValues& x) const; - - /** return A*x */ - Errors operator*(const VectorValues& x) const; - - /* In-place version e <- A*x that overwrites e. */ - void multiplyInPlace(const VectorValues& x, Errors& e) const; - - /* In-place version e <- A*x that takes an iterator. */ - void multiplyInPlace(const VectorValues& x, const Errors::iterator& e) const; - - /** x += alpha*A'*e */ - void transposeMultiplyAdd(double alpha, const Errors& e, VectorValues& x) const; - - /** - * Calculate Gradient of A^(A*x-b) for a given config - * @param x: VectorValues specifying where to calculate gradient - * @return gradient, as a VectorValues as well - */ - VectorValues gradient(const VectorValues& x) const; - /** Unnormalized probability. O(n) */ double probPrime(const VectorValues& c) const { - return exp(-0.5 * error(c)); + return exp(-0.5 * gaussianError(*this, c)); } /** @@ -151,17 +119,12 @@ namespace gtsam { */ void combine(const GaussianFactorGraph &lfg); - // matrix-vector operations - void residual(const VectorValues &x, VectorValues &r) const ; - void multiply(const VectorValues &x, VectorValues &r) const ; - void transposeMultiply(const VectorValues &r, VectorValues &x) const ; - // get b - void getb(VectorValues &b) const ; - VectorValues getb() const ; - - // allocate a vectorvalues of b's structure - VectorValues allocateVectorValuesb() const ; +// void getb(VectorValues &b) const ; +// VectorValues getb() const ; +// +// // allocate a vectorvalues of b's structure +// VectorValues allocateVectorValuesb() const ; }; diff --git a/gtsam/linear/GaussianJunctionTree.cpp b/gtsam/linear/GaussianJunctionTree.cpp index 3b904bbdc..61ec7fd26 100644 --- a/gtsam/linear/GaussianJunctionTree.cpp +++ b/gtsam/linear/GaussianJunctionTree.cpp @@ -65,22 +65,22 @@ namespace gtsam { /* ************************************************************************* */ VectorValues GaussianJunctionTree::optimize() const { - tic("GJT optimize 1: eliminate"); + tic(1, "GJT eliminate"); // eliminate from leaves to the root boost::shared_ptr rootClique(this->eliminate()); - toc("GJT optimize 1: eliminate"); + toc(1, "GJT eliminate"); // Allocate solution vector - tic("GJT optimize 2: allocate VectorValues"); + tic(2, "allocate VectorValues"); vector dims(rootClique->back()->key() + 1, 0); countDims(rootClique, dims); VectorValues result(dims); - toc("GJT optimize 2: allocate VectorValues"); + toc(2, "allocate VectorValues"); // back-substitution - tic("GJT optimize 3: back-substitute"); + tic(3, "back-substitute"); btreeBackSubstitute(rootClique, result); - toc("GJT optimize 3: back-substitute"); + toc(3, "back-substitute"); return result; } diff --git a/gtsam/linear/GaussianMultifrontalSolver.cpp b/gtsam/linear/GaussianMultifrontalSolver.cpp index 547cc3024..8b61c575c 100644 --- a/gtsam/linear/GaussianMultifrontalSolver.cpp +++ b/gtsam/linear/GaussianMultifrontalSolver.cpp @@ -68,7 +68,9 @@ GaussianFactor::shared_ptr GaussianMultifrontalSolver::marginalFactor(Index j) c /* ************************************************************************* */ std::pair GaussianMultifrontalSolver::marginalCovariance(Index j) const { - GaussianConditional::shared_ptr conditional = Base::marginalFactor(j)->eliminateFirst(); + FactorGraph fg; + fg.push_back(Base::marginalFactor(j)); + GaussianConditional::shared_ptr conditional = GaussianFactor::CombineAndEliminate(fg,1).first->front(); Matrix R = conditional->get_R(); return make_pair(conditional->get_d(), inverse(ublas::prod(ublas::trans(R), R))); } diff --git a/gtsam/linear/GaussianSequentialSolver.cpp b/gtsam/linear/GaussianSequentialSolver.cpp index 1d9543435..ed0c9bf90 100644 --- a/gtsam/linear/GaussianSequentialSolver.cpp +++ b/gtsam/linear/GaussianSequentialSolver.cpp @@ -81,7 +81,9 @@ GaussianFactor::shared_ptr GaussianSequentialSolver::marginalFactor(Index j) con } std::pair GaussianSequentialSolver::marginalCovariance(Index j) const { - GaussianConditional::shared_ptr conditional = Base::marginalFactor(j)->eliminateFirst(); + FactorGraph fg; + fg.push_back(Base::marginalFactor(j)); + GaussianConditional::shared_ptr conditional = GaussianFactor::CombineAndEliminate(fg,1).first->front(); Matrix R = conditional->get_R(); return make_pair(conditional->get_d(), inverse(ublas::prod(ublas::trans(R), R))); } diff --git a/gtsam/linear/HessianFactor.cpp b/gtsam/linear/HessianFactor.cpp new file mode 100644 index 000000000..6f064f41c --- /dev/null +++ b/gtsam/linear/HessianFactor.cpp @@ -0,0 +1,407 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HessianFactor.cpp + * @brief + * @author Richard Roberts + * @created Dec 8, 2010 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +using namespace std; + +namespace ublas = boost::numeric::ublas; +using namespace boost::lambda; + +namespace gtsam { + + /* ************************************************************************* */ + HessianFactor::HessianFactor(const HessianFactor& gf) : + GaussianFactor(gf), info_(matrix_) { + info_.assignNoalias(gf.info_); + } + + /* ************************************************************************* */ + HessianFactor::HessianFactor() : info_(matrix_) {} + + /* ************************************************************************* */ + HessianFactor::HessianFactor(const Vector& b_in) : info_(matrix_) { + JacobianFactor jf(b_in); + ublas::noalias(matrix_) = ublas::prod(ublas::trans(jf.matrix_), jf.matrix_); + info_.copyStructureFrom(jf.Ab_); + } + + /* ************************************************************************* */ + HessianFactor::HessianFactor(Index i1, const Matrix& A1, + const Vector& b, const SharedDiagonal& model) : + GaussianFactor(i1), info_(matrix_) { + JacobianFactor jf(i1, A1, b, model); + ublas::noalias(matrix_) = ublas::prod(ublas::trans(jf.matrix_), jf.matrix_); + info_.copyStructureFrom(jf.Ab_); + } + + /* ************************************************************************* */ + HessianFactor::HessianFactor(Index i1, const Matrix& A1, Index i2, const Matrix& A2, + const Vector& b, const SharedDiagonal& model) : + GaussianFactor(i1,i2), info_(matrix_) { + JacobianFactor jf(i1, A1, i2, A2, b, model); + ublas::noalias(matrix_) = ublas::prod(ublas::trans(jf.matrix_), jf.matrix_); + info_.copyStructureFrom(jf.Ab_); + } + + /* ************************************************************************* */ + HessianFactor::HessianFactor(Index i1, const Matrix& A1, Index i2, const Matrix& A2, + Index i3, const Matrix& A3, const Vector& b, const SharedDiagonal& model) : + GaussianFactor(i1,i2,i3), info_(matrix_) { + JacobianFactor jf(i1, A1, i2, A2, i3, A3, b, model); + ublas::noalias(matrix_) = ublas::prod(ublas::trans(jf.matrix_), jf.matrix_); + info_.copyStructureFrom(jf.Ab_); + } + + /* ************************************************************************* */ + HessianFactor::HessianFactor(const std::vector > &terms, + const Vector &b, const SharedDiagonal& model) : info_(matrix_) { + JacobianFactor jf(terms, b, model); + keys_ = jf.keys_; + ublas::noalias(matrix_) = ublas::prod(ublas::trans(jf.matrix_), jf.matrix_); + info_.copyStructureFrom(jf.Ab_); + } + + /* ************************************************************************* */ + HessianFactor::HessianFactor(const std::list > &terms, + const Vector &b, const SharedDiagonal& model) : info_(matrix_) { + JacobianFactor jf(terms, b, model); + keys_ = jf.keys_; + ublas::noalias(matrix_) = ublas::prod(ublas::trans(jf.matrix_), jf.matrix_); + info_.copyStructureFrom(jf.Ab_); + } + + /* ************************************************************************* */ + HessianFactor::HessianFactor(const GaussianConditional& cg) : GaussianFactor(cg), info_(matrix_) { + JacobianFactor jf(cg); + ublas::noalias(ublas::symmetric_adaptor(matrix_)) = + ublas::prod(ublas::trans(jf.matrix_), jf.matrix_); + info_.copyStructureFrom(jf.Ab_); + } + + /* ************************************************************************* */ + HessianFactor::HessianFactor(const GaussianFactor& gf) : info_(matrix_) { + // Copy the variable indices + (GaussianFactor&)(*this) = gf; + // Copy the matrix data depending on what type of factor we're copying from + if(dynamic_cast(&gf)) { + const JacobianFactor& jf(static_cast(gf)); + JacobianFactor whitened(jf.whiten()); + matrix_ = ublas::prod(ublas::trans(whitened.matrix_), whitened.matrix_); + info_.copyStructureFrom(whitened.Ab_); + } else if(dynamic_cast(&gf)) { + const HessianFactor& hf(static_cast(gf)); + info_.assignNoalias(hf.info_); + } else + throw std::invalid_argument("In HessianFactor(const GaussianFactor& gf), gf is neither a JacobianFactor nor a HessianFactor"); + } + + /* ************************************************************************* */ + void HessianFactor::print(const std::string& s) const { + cout << s << "\n"; + cout << " keys: "; + for(const_iterator key=this->begin(); key!=this->end(); ++key) + cout << *key << "(" << this->getDim(key) << ") "; + cout << "\n"; + gtsam::print(ublas::symmetric_adaptor( + info_.range(0,info_.nBlocks(), 0,info_.nBlocks())), "Ab^T * Ab: "); + } + + /* ************************************************************************* */ + bool HessianFactor::equals(const GaussianFactor& lf, double tol) const { + if(!dynamic_cast(&lf)) + return false; + else { + MatrixColMajor thisMatrix = ublas::symmetric_adaptor(this->info_.full()); + thisMatrix(thisMatrix.size1()-1, thisMatrix.size2()-1) = 0.0; + MatrixColMajor rhsMatrix = ublas::symmetric_adaptor(static_cast(lf).info_.full()); + rhsMatrix(rhsMatrix.size1()-1, rhsMatrix.size2()-1) = 0.0; + return equal_with_abs_tol(thisMatrix, rhsMatrix, tol); + } + } + + /* ************************************************************************* */ + double HessianFactor::error(const VectorValues& c) const { + return ublas::inner_prod(c.vector(), ublas::prod(info_.range(0, this->size(), 0, this->size()), c.vector())) - + 2.0*ublas::inner_prod(c.vector(), info_.rangeColumn(0, this->size(), this->size(), 0)); + } + + /* ************************************************************************* */ + static FastMap findScatterAndDims(const FactorGraph& factors) { + + static const bool debug = false; + + // The "scatter" is a map from global variable indices to slot indices in the + // union of involved variables. We also include the dimensionality of the + // variable. + + Scatter scatter; + + // First do the set union. + BOOST_FOREACH(const HessianFactor::shared_ptr& factor, factors) { + for(HessianFactor::const_iterator variable = factor->begin(); variable != factor->end(); ++variable) { + scatter.insert(make_pair(*variable, SlotEntry(0, factor->getDim(variable)))); + } + } + + // Next fill in the slot indices (we can only get these after doing the set + // union. + size_t slot = 0; + BOOST_FOREACH(Scatter::value_type& var_slot, scatter) { + var_slot.second.slot = (slot ++); + if(debug) + cout << "scatter[" << var_slot.first << "] = (slot " << var_slot.second.slot << ", dim " << var_slot.second.dimension << ")" << endl; + } + + return scatter; + } + +///* ************************************************************************* */ +//static MatrixColMajor formAbTAb(const FactorGraph& factors, const Scatter& scatter) { +// +// static const bool debug = false; +// +// tic("CombineAndEliminate: 3.1 varStarts"); +// // Determine scalar indices of each variable +// vector varStarts; +// varStarts.reserve(scatter.size() + 2); +// varStarts.push_back(0); +// BOOST_FOREACH(const Scatter::value_type& var_slot, scatter) { +// varStarts.push_back(varStarts.back() + var_slot.second.dimension); +// } +// // This is for the r.h.s. vector +// varStarts.push_back(varStarts.back() + 1); +// toc("CombineAndEliminate: 3.1 varStarts"); +// +// // Allocate and zero matrix for Ab' * Ab +// MatrixColMajor ATA(ublas::zero_matrix(varStarts.back(), varStarts.back())); +// +// tic("CombineAndEliminate: 3.2 updates"); +// // Do blockwise low-rank updates to Ab' * Ab for each factor. Here, we +// // only update the upper triangle because this is all that Cholesky uses. +// BOOST_FOREACH(const HessianFactor::shared_ptr& factor, factors) { +// +// // Whiten the factor first so it has a unit diagonal noise model +// HessianFactor whitenedFactor(factor->whiten()); +// +// if(debug) whitenedFactor.print("whitened factor: "); +// +// for(HessianFactor::const_iterator var2 = whitenedFactor.begin(); var2 != whitenedFactor.end(); ++var2) { +// assert(scatter.find(*var2) != scatter.end()); +// size_t vj = scatter.find(*var2)->second.slot; +// for(HessianFactor::const_iterator var1 = whitenedFactor.begin(); var1 <= var2; ++var1) { +// assert(scatter.find(*var1) != scatter.end()); +// size_t vi = scatter.find(*var1)->second.slot; +// if(debug) cout << "Updating block " << vi << ", " << vj << endl; +// if(debug) cout << "Updating (" << varStarts[vi] << ":" << varStarts[vi+1] << ", " << +// varStarts[vj] << ":" << varStarts[vj+1] << ") from A" << *var1 << "' * A" << *var2 << endl; +// ublas::noalias(ublas::project(ATA, +// ublas::range(varStarts[vi], varStarts[vi+1]), ublas::range(varStarts[vj], varStarts[vj+1]))) += +// ublas::prod(ublas::trans(whitenedFactor.getA(var1)), whitenedFactor.getA(var2)); +// } +// } +// +// // Update r.h.s. vector +// size_t vj = scatter.size(); +// for(HessianFactor::const_iterator var1 = whitenedFactor.begin(); var1 < whitenedFactor.end(); ++var1) { +// assert(scatter.find(*var1) != scatter.end()); +// size_t vi = scatter.find(*var1)->second.slot; +// if(debug) cout << "Updating block " << vi << ", " << vj << endl; +// if(debug) cout << "Updating (" << varStarts[vi] << ":" << varStarts[vi+1] << ", " << +// varStarts[vj] << ":" << varStarts[vj+1] << ") from A" << *var1 << "' * b" << endl; +// ublas::matrix_column col(ATA, varStarts[vj]); +// ublas::noalias(ublas::subrange(col, varStarts[vi], varStarts[vi+1])) += +// ublas::prod(ublas::trans(whitenedFactor.getA(var1)), whitenedFactor.getb()); +// } +// +// size_t vi = scatter.size(); +// if(debug) cout << "Updating block " << vi << ", " << vj << endl; +// if(debug) cout << "Updating (" << varStarts[vi] << ":" << varStarts[vi+1] << ", " << +// varStarts[vj] << ":" << varStarts[vj+1] << ") from b" << "' * b" << endl; +// ublas::noalias(ATA(varStarts[vi], varStarts[vj])) += ublas::inner_prod(whitenedFactor.getb(), whitenedFactor.getb()); +// } +// toc("CombineAndEliminate: 3.2 updates"); +// +// return ATA; +//} + +void HessianFactor::updateATA(const HessianFactor& update, const Scatter& scatter) { + + // This function updates 'combined' with the information in 'update'. + // 'scatter' maps variables in the update factor to slots in the combined + // factor. + + static const bool debug = false; + + // First build an array of slots + tic(1, "slots"); + size_t slots[update.size()]; + size_t slot = 0; + BOOST_FOREACH(Index j, update) { + slots[slot] = scatter.find(j)->second.slot; + ++ slot; + } + toc(1, "slots"); + + // Apply updates to the upper triangle + tic(2, "update"); + assert(this->info_.nBlocks() - 1 == scatter.size()); + for(size_t j2=0; j2info_.nBlocks()-1 : slots[j2]; + for(size_t j1=0; j1<=j2; ++j1) { + size_t slot1 = (j1 == update.size()) ? this->info_.nBlocks()-1 : slots[j1]; + if(debug) + cout << "Updating (" << slot1 << "," << slot2 << ") from (" << j1 << "," << j2 << ")" << endl; + ublas::noalias(this->info_(slot1, slot2)) += update.info_(j1,j2); + } + } + toc(2, "update"); +} + +GaussianBayesNet::shared_ptr HessianFactor::splitEliminatedFactor(size_t nrFrontals, const vector& keys) { + + static const bool debug = false; + + // Extract conditionals + tic(1, "extract conditionals"); + GaussianBayesNet::shared_ptr conditionals(new GaussianBayesNet()); + typedef VerticalBlockView BlockAb; + BlockAb Ab(matrix_, info_); + for(size_t j=0; j 1) + for(size_t j = 0; j < remainingMatrix.size1() - 1; ++j) + memset(&remainingMatrix(j+1, j), 0, sizeof(remainingMatrix(0,0)) * (remainingMatrix.size1() - j - 1)); + toc(1, "zero"); + } + + tic(2, "construct cond"); + const ublas::scalar_vector sigmas(varDim, 1.0); + conditionals->push_back(boost::make_shared(keys.begin()+j, keys.end(), 1, Ab, sigmas)); + toc(2, "construct cond"); + if(debug) conditionals->back()->print("Extracted conditional: "); + Ab.rowStart() += varDim; + Ab.firstBlock() += 1; + if(debug) cout << "rowStart = " << Ab.rowStart() << ", rowEnd = " << Ab.rowEnd() << endl; + } + toc(1, "extract conditionals"); + + // Take lower-right block of Ab_ to get the new factor + tic(2, "remaining factor"); + info_.blockStart() = nrFrontals; + // Assign the keys + keys_.assign(keys.begin() + nrFrontals, keys.end()); + toc(2, "remaining factor"); + + return conditionals; +} + +/* ************************************************************************* */ +pair HessianFactor::CombineAndEliminate( + const FactorGraph& factors, size_t nrFrontals) { + + static const bool debug = false; + + // Find the scatter and variable dimensions + tic(1, "find scatter"); + Scatter scatter(findScatterAndDims(factors)); + toc(1, "find scatter"); + + // Pull out keys and dimensions + tic(2, "keys"); + vector keys(scatter.size()); + vector dimensions(scatter.size() + 1); + BOOST_FOREACH(const Scatter::value_type& var_slot, scatter) { + keys[var_slot.second.slot] = var_slot.first; + dimensions[var_slot.second.slot] = var_slot.second.dimension; + } + // This is for the r.h.s. vector + dimensions.back() = 1; + toc(2, "keys"); + + // Form Ab' * Ab + tic(3, "combine"); + tic(1, "allocate"); + HessianFactor::shared_ptr combinedFactor(new HessianFactor()); + combinedFactor->info_.resize(dimensions.begin(), dimensions.end(), false); + toc(1, "allocate"); + tic(2, "zero"); + ublas::noalias(combinedFactor->matrix_) = ublas::zero_matrix(combinedFactor->matrix_.size1(), combinedFactor->matrix_.size2()); + toc(2, "zero"); + tic(3, "update"); + BOOST_FOREACH(const HessianFactor::shared_ptr& factor, factors) { + combinedFactor->updateATA(*factor, scatter); + } + toc(3, "update"); + if(debug) gtsam::print(combinedFactor->matrix_, "Ab' * Ab: "); + toc(3, "combine"); + + // Do Cholesky, note that after this, the lower triangle still contains + // some untouched non-zeros that should be zero. We zero them while + // extracting submatrices next. + tic(4, "partial Cholesky"); + choleskyPartial(combinedFactor->matrix_, combinedFactor->info_.offset(nrFrontals)); + if(debug) gtsam::print(combinedFactor->matrix_, "chol(Ab' * Ab): "); + toc(4, "partial Cholesky"); + + // Extract conditionals and fill in details of the remaining factor + tic(5, "split"); + GaussianBayesNet::shared_ptr conditionals(combinedFactor->splitEliminatedFactor(nrFrontals, keys)); + if(debug) { + conditionals->print("Extracted conditionals: "); + combinedFactor->print("Eliminated factor (L piece): "); + } + toc(5, "split"); + + return make_pair(conditionals, combinedFactor); +} + +} diff --git a/gtsam/linear/HessianFactor.h b/gtsam/linear/HessianFactor.h new file mode 100644 index 000000000..3ebb1dfbd --- /dev/null +++ b/gtsam/linear/HessianFactor.h @@ -0,0 +1,133 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HessianFactor.h + * @brief + * @author Richard Roberts + * @created Dec 8, 2010 + */ + +#pragma once + +#include +#include + +// Forward declarations for friend unit tests +class ConversionConstructorHessianFactorTest; + +namespace gtsam { + + // Forward declarations + class JacobianFactor; + class SharedDiagonal; + + // Definition of Scatter + struct SlotEntry { + size_t slot; + size_t dimension; + SlotEntry(size_t _slot, size_t _dimension) : slot(_slot), dimension(_dimension) {} + }; + typedef FastMap Scatter; + + class HessianFactor : public GaussianFactor { + protected: + typedef MatrixColMajor InfoMatrix; + typedef SymmetricBlockView BlockInfo; + + InfoMatrix matrix_; // The full information matrix [A b]^T * [A b] + BlockInfo info_; // The block view of the full information matrix. + + GaussianBayesNet::shared_ptr splitEliminatedFactor(size_t nrFrontals, const std::vector& keys); + void updateATA(const HessianFactor& update, const Scatter& scatter); + + public: + + typedef boost::shared_ptr shared_ptr; + typedef BlockInfo::Block Block; + typedef BlockInfo::constBlock constBlock; + + /** Copy constructor */ + HessianFactor(const HessianFactor& gf); + + /** default constructor for I/O */ + HessianFactor(); + + /** Construct Null factor */ + HessianFactor(const Vector& b_in); + + /** Construct unary factor */ + HessianFactor(Index i1, const Matrix& A1, + const Vector& b, const SharedDiagonal& model); + + /** Construct binary factor */ + HessianFactor(Index i1, const Matrix& A1, + Index i2, const Matrix& A2, + const Vector& b, const SharedDiagonal& model); + + /** Construct ternary factor */ + HessianFactor(Index i1, const Matrix& A1, Index i2, + const Matrix& A2, Index i3, const Matrix& A3, + const Vector& b, const SharedDiagonal& model); + + /** Construct an n-ary factor */ + HessianFactor(const std::vector > &terms, + const Vector &b, const SharedDiagonal& model); + + HessianFactor(const std::list > &terms, + const Vector &b, const SharedDiagonal& model); + + /** Construct from Conditional Gaussian */ + HessianFactor(const GaussianConditional& cg); + + /** Convert from a JacobianFactor or HessianFactor (computes A^T * A) */ + HessianFactor(const GaussianFactor& factor); + + virtual ~HessianFactor() {} + + // Implementing Testable interface + virtual void print(const std::string& s = "") const; + virtual bool equals(const GaussianFactor& lf, double tol = 1e-9) const; + + virtual double error(const VectorValues& c) const; /** 0.5*(A*x-b)'*D*(A*x-b) */ + + /** Return the dimension of the variable pointed to by the given key iterator + * todo: Remove this in favor of keeping track of dimensions with variables? + */ + virtual size_t getDim(const_iterator variable) const { return info_(variable-this->begin(), 0).size1(); } + + /** Return a view of a block of the information matrix */ + constBlock info(const_iterator j1, const_iterator j2) const { return info_(j1-begin(), j2-begin()); } + + /** + * Permutes the GaussianFactor, but for efficiency requires the permutation + * to already be inverted. This acts just as a change-of-name for each + * variable. The order of the variables within the factor is not changed. + */ + virtual void permuteWithInverse(const Permutation& inversePermutation) { + FactorBase::permuteWithInverse(inversePermutation); } + + /** + * Combine and eliminate several factors. + */ + static std::pair CombineAndEliminate( + const FactorGraph& factors, size_t nrFrontals=1); + + // Friend unit test classes + friend class ::ConversionConstructorHessianFactorTest; + + // Friend JacobianFactor for conversion + friend class JacobianFactor; + + }; + +} + diff --git a/gtsam/linear/JacobianFactor.cpp b/gtsam/linear/JacobianFactor.cpp new file mode 100644 index 000000000..57a9e2dd8 --- /dev/null +++ b/gtsam/linear/JacobianFactor.cpp @@ -0,0 +1,733 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file JacobianFactor.cpp + * @brief + * @author Richard Roberts + * @created Dec 8, 2010 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +using namespace std; +namespace ublas = boost::numeric::ublas; +using namespace boost::lambda; + +namespace gtsam { + + /* ************************************************************************* */ + inline void JacobianFactor::assertInvariants() const { + #ifndef NDEBUG + IndexFactor::assertInvariants(); + assert((keys_.size() == 0 && Ab_.size1() == 0 && Ab_.nBlocks() == 0) || keys_.size()+1 == Ab_.nBlocks()); + assert(firstNonzeroBlocks_.size() == Ab_.size1()); + for(size_t i=0; i > &terms, + const Vector &b, const SharedDiagonal& model) : + model_(model), firstNonzeroBlocks_(b.size(), 0), Ab_(matrix_) { + keys_.resize(terms.size()); + size_t dims[terms.size()+1]; + for(size_t j=0; j > &terms, + const Vector &b, const SharedDiagonal& model) : + model_(model), firstNonzeroBlocks_(b.size(), 0), Ab_(matrix_) { + keys_.resize(terms.size()); + size_t dims[terms.size()+1]; + size_t j=0; + for(std::list >::const_iterator term=terms.begin(); term!=terms.end(); ++term) { + keys_[j] = term->first; + dims[j] = term->second.size2(); + ++ j; + } + dims[j] = 1; + firstNonzeroBlocks_.resize(b.size(), 0); + Ab_.copyStructureFrom(BlockAb(matrix_, dims, dims+terms.size()+1, b.size())); + j = 0; + for(std::list >::const_iterator term=terms.begin(); term!=terms.end(); ++term) { + Ab_(j) = term->second; + ++ j; + } + getb() = b; + assertInvariants(); + } + + /* ************************************************************************* */ + JacobianFactor::JacobianFactor(const GaussianConditional& cg) : GaussianFactor(cg), model_(noiseModel::Diagonal::Sigmas(cg.get_sigmas(), true)), Ab_(matrix_) { + Ab_.assignNoalias(cg.rsd_); + // todo SL: make firstNonzeroCols triangular? + firstNonzeroBlocks_.resize(cg.get_d().size(), 0); // set sigmas from precisions + assertInvariants(); + } + + /* ************************************************************************* */ + JacobianFactor::JacobianFactor(const HessianFactor& factor) : Ab_(matrix_) { + keys_ = factor.keys_; + Ab_.assignNoalias(factor.info_); + size_t maxrank = choleskyCareful(matrix_); + Ab_.rowEnd() = maxrank; + model_ = noiseModel::Unit::Create(maxrank); + + size_t varpos = 0; + firstNonzeroBlocks_.resize(this->size1()); + for(size_t row=0; rowsize1(); ++row) { + while(varpos < this->keys_.size() && Ab_.offset(varpos+1) <= row) + ++ varpos; + firstNonzeroBlocks_[row] = varpos; + } + assertInvariants(); + } + + /* ************************************************************************* */ + void JacobianFactor::print(const string& s) const { + cout << s << "\n"; + if (empty()) { + cout << " empty, keys: "; + BOOST_FOREACH(const Index key, keys_) { cout << key << " "; } + cout << endl; + } else { + for(const_iterator key=begin(); key!=end(); ++key) + gtsam::print(getA(key), (boost::format("A[%1%]=\n")%*key).str()); + gtsam::print(getb(),"b="); + model_->print("model"); + } + } + + /* ************************************************************************* */ + // Check if two linear factors are equal + bool JacobianFactor::equals(const GaussianFactor& f_, double tol) const { + if(!dynamic_cast(&f_)) + return false; + else { + const JacobianFactor& f(static_cast(f_)); + if (empty()) return (f.empty()); + if(keys_!=f.keys_ /*|| !model_->equals(lf->model_, tol)*/) + return false; + + assert(Ab_.size1() == f.Ab_.size1() && Ab_.size2() == f.Ab_.size2()); + + constABlock Ab1(Ab_.range(0, Ab_.nBlocks())); + constABlock Ab2(f.Ab_.range(0, f.Ab_.nBlocks())); + for(size_t row=0; row, boost::fast_pool_allocator > > SourceSlots; + SourceSlots sourceSlots; + for(size_t j=0; j dimensions(keys_.size() + 1); + size_t j = 0; + BOOST_FOREACH(const SourceSlots::value_type& sourceSlot, sourceSlots) { + dimensions[j++] = Ab_(sourceSlot.second).size2(); + } + assert(j == keys_.size()); + dimensions.back() = 1; + + // Copy the variables and matrix into the new order + vector oldKeys(keys_.size()); + keys_.swap(oldKeys); + AbMatrix oldMatrix; + BlockAb oldAb(oldMatrix, dimensions.begin(), dimensions.end(), Ab_.size1()); + Ab_.swap(oldAb); + j = 0; + BOOST_FOREACH(const SourceSlots::value_type& sourceSlot, sourceSlots) { + keys_[j] = sourceSlot.first; + ublas::noalias(Ab_(j++)) = oldAb(sourceSlot.second); + } + ublas::noalias(Ab_(j)) = oldAb(j); + + // Since we're permuting the variables, ensure that entire rows from this + // factor are copied when Combine is called + BOOST_FOREACH(size_t& varpos, firstNonzeroBlocks_) { varpos = 0; } + assertInvariants(); + } + + /* ************************************************************************* */ + Vector JacobianFactor::unweighted_error(const VectorValues& c) const { + Vector e = -getb(); + if (empty()) return e; + for(size_t pos=0; poswhiten(-getb()); + return model_->whiten(unweighted_error(c)); + } + + /* ************************************************************************* */ + double JacobianFactor::error(const VectorValues& c) const { + if (empty()) return 0; + Vector weighted = error_vector(c); + return 0.5 * inner_prod(weighted,weighted); + } + + + /* ************************************************************************* */ + Vector JacobianFactor::operator*(const VectorValues& x) const { + Vector Ax = zero(Ab_.size1()); + if (empty()) return Ax; + + // Just iterate over all A matrices and multiply in correct config part + for(size_t pos=0; poswhiten(Ax); + } + + + /* ************************************************************************* */ + void JacobianFactor::transposeMultiplyAdd(double alpha, const Vector& e, + VectorValues& x) const { + Vector E = alpha * model_->whiten(e); + // Just iterate over all A matrices and insert Ai^e into VectorValues + for(size_t pos=0; pos JacobianFactor::matrix(bool weight) const { + Matrix A(Ab_.range(0, keys_.size())); + Vector b(getb()); + // divide in sigma so error is indeed 0.5*|Ax-b| + if (weight) model_->WhitenSystem(A,b); + return make_pair(A, b); + } + + /* ************************************************************************* */ + Matrix JacobianFactor::matrix_augmented(bool weight) const { + if (weight) { Matrix Ab(Ab_.range(0,Ab_.nBlocks())); model_->WhitenInPlace(Ab); return Ab; } + else return Ab_.range(0, Ab_.nBlocks()); + } + + /* ************************************************************************* */ + boost::tuple, list, list > + JacobianFactor::sparse(const map& columnIndices) const { + + // declare return values + list I,J; + list S; + + // iterate over all matrices in the factor + for(size_t pos=0; possigma(i); + for (size_t j = 0; j < A.size2(); j++) + if (A(i, j) != 0.0) { + I.push_back(i + 1); + J.push_back(j + column_start); + S.push_back(A(i, j) / sigma_i); + } + } + } + + // return the result + return boost::tuple, list, list >(I,J,S); + } + + /* ************************************************************************* */ + JacobianFactor JacobianFactor::whiten() const { + JacobianFactor result(*this); + result.model_->WhitenInPlace(result.matrix_); + result.model_ = noiseModel::Unit::Create(result.model_->dim()); + return result; + } + + /* ************************************************************************* */ + GaussianConditional::shared_ptr JacobianFactor::eliminateFirst() { + return this->eliminate(1)->front(); + } + + /* ************************************************************************* */ + GaussianBayesNet::shared_ptr JacobianFactor::eliminate(size_t nrFrontals) { + + assert(Ab_.rowStart() == 0 && Ab_.rowEnd() == matrix_.size1() && Ab_.firstBlock() == 0); + assert(keys_.size() >= nrFrontals); + assertInvariants(); + + static const bool debug = false; + + if(debug) cout << "Eliminating " << nrFrontals << " frontal variables" << endl; + if(debug) this->print("Eliminating JacobianFactor: "); + + tic(1, "stairs"); + // Translate the left-most nonzero column indices into top-most zero row indices + vector firstZeroRows(Ab_.size2()); + { + size_t lastNonzeroRow = 0; + vector::iterator firstZeroRowsIt = firstZeroRows.begin(); + for(size_t var=0; varsize1() && firstNonzeroBlocks_[lastNonzeroRow] <= var) + ++ lastNonzeroRow; + fill(firstZeroRowsIt, firstZeroRowsIt+Ab_(var).size2(), lastNonzeroRow); + firstZeroRowsIt += Ab_(var).size2(); + } + assert(firstZeroRowsIt+1 == firstZeroRows.end()); + *firstZeroRowsIt = this->size1(); + } + toc(1, "stairs"); + + #ifndef NDEBUG + for(size_t col=0; colQRColumnWise(matrix_, firstZeroRows); + toc(2, "QR"); + + // Zero the lower-left triangle. todo: not all of these entries actually + // need to be zeroed if we are careful to start copying rows after the last + // structural zero. + if(matrix_.size1() > 0) { + for(size_t j=0; jdim(); ++i) + matrix_(i,j) = 0.0; + } + + if(debug) gtsam::print(matrix_, "QR result: "); + if(debug) noiseModel->print("QR result noise model: "); + + // Check for singular factor + if(noiseModel->dim() < frontalDim) { + throw domain_error((boost::format( + "JacobianFactor is singular in variable %1%, discovered while attempting\n" + "to eliminate this variable.") % keys_.front()).str()); + } + + // Extract conditionals + tic(3, "cond Rd"); + GaussianBayesNet::shared_ptr conditionals(new GaussianBayesNet()); + for(size_t j=0; j sigmas(noiseModel->sigmas(), ublas::range(Ab_.rowStart(), Ab_.rowEnd())); + conditionals->push_back(boost::make_shared(keys_.begin()+j, keys_.end(), 1, Ab_, sigmas)); + if(debug) conditionals->back()->print("Extracted conditional: "); + Ab_.rowStart() += varDim; + Ab_.firstBlock() += 1; + } + toc(3, "cond Rd"); + + if(debug) conditionals->print("Extracted conditionals: "); + + tic(4, "remaining factor"); + // Take lower-right block of Ab to get the new factor + Ab_.rowEnd() = noiseModel->dim(); + keys_.assign(keys_.begin() + nrFrontals, keys_.end()); + // Set sigmas with the right model + if (noiseModel->isConstrained()) + model_ = noiseModel::Constrained::MixedSigmas(sub(noiseModel->sigmas(), frontalDim, noiseModel->dim())); + else + model_ = noiseModel::Diagonal::Sigmas(sub(noiseModel->sigmas(), frontalDim, noiseModel->dim())); + if(debug) this->print("Eliminated factor: "); + assert(Ab_.size1() <= Ab_.size2()-1); + toc(4, "remaining factor"); + + // todo SL: deal with "dead" pivot columns!!! + tic(5, "rowstarts"); + size_t varpos = 0; + firstNonzeroBlocks_.resize(this->size1()); + for(size_t row=0; row, size_t, size_t> countDims(const std::vector& factors, const VariableSlots& variableSlots) { + #ifndef NDEBUG + vector varDims(variableSlots.size(), numeric_limits::max()); + #else + vector varDims(variableSlots.size()); + #endif + size_t m = 0; + size_t n = 0; + { + Index jointVarpos = 0; + BOOST_FOREACH(const VariableSlots::value_type& slots, variableSlots) { + + assert(slots.second.size() == factors.size()); + + Index sourceFactorI = 0; + BOOST_FOREACH(const Index sourceVarpos, slots.second) { + if(sourceVarpos < numeric_limits::max()) { + const JacobianFactor& sourceFactor = *factors[sourceFactorI]; + size_t vardim = sourceFactor.getDim(sourceFactor.begin() + sourceVarpos); + #ifndef NDEBUG + if(varDims[jointVarpos] == numeric_limits::max()) { + varDims[jointVarpos] = vardim; + n += vardim; + } else + assert(varDims[jointVarpos] == vardim); + #else + varDims[jointVarpos] = vardim; + n += vardim; + break; + #endif + } + ++ sourceFactorI; + } + ++ jointVarpos; + } + BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, factors) { + m += factor->size1(); + } + } + return boost::make_tuple(varDims, m, n); + } + + /* ************************************************************************* */ + JacobianFactor::shared_ptr JacobianFactor::Combine(const FactorGraph& factors, const VariableSlots& variableSlots) { + + static const bool debug = false; + + if(debug) factors.print("Combining factors: "); + + if(debug) variableSlots.print(); + + // Determine dimensions + tic(1, "countDims"); + vector varDims; + size_t m; + size_t n; + boost::tie(varDims, m, n) = countDims(factors, variableSlots); + if(debug) { + cout << "Dims: " << m << " x " << n << "\n"; + BOOST_FOREACH(const size_t dim, varDims) { cout << dim << " "; } + cout << endl; + } + toc(1, "countDims"); + + // Sort rows + tic(2, "sort rows"); + vector<_RowSource> rowSources; rowSources.reserve(m); + bool anyConstrained = false; + for(size_t sourceFactorI = 0; sourceFactorI < factors.size(); ++sourceFactorI) { + const JacobianFactor& sourceFactor(*factors[sourceFactorI]); + sourceFactor.assertInvariants(); + for(size_t sourceFactorRow = 0; sourceFactorRow < sourceFactor.size1(); ++sourceFactorRow) { + Index firstNonzeroVar; + if(sourceFactor.firstNonzeroBlocks_[sourceFactorRow] < sourceFactor.size()) + firstNonzeroVar = sourceFactor.keys_[sourceFactor.firstNonzeroBlocks_[sourceFactorRow]]; + else if(sourceFactor.firstNonzeroBlocks_[sourceFactorRow] == sourceFactor.size()) + firstNonzeroVar = sourceFactor.back() + 1; + else + assert(false); + rowSources.push_back(_RowSource(firstNonzeroVar, sourceFactorI, sourceFactorRow)); + } + if(sourceFactor.model_->isConstrained()) anyConstrained = true; + } + assert(rowSources.size() == m); + std::sort(rowSources.begin(), rowSources.end()); + toc(2, "sort rows"); + + // Allocate new factor + tic(3, "allocate"); + shared_ptr combined(new JacobianFactor()); + combined->keys_.resize(variableSlots.size()); + std::transform(variableSlots.begin(), variableSlots.end(), combined->keys_.begin(), bind(&VariableSlots::const_iterator::value_type::first, boost::lambda::_1)); + varDims.push_back(1); + combined->Ab_.copyStructureFrom(BlockAb(combined->matrix_, varDims.begin(), varDims.end(), m)); + combined->firstNonzeroBlocks_.resize(m); + Vector sigmas(m); + toc(3, "allocate"); + + // Copy rows + tic(4, "copy rows"); + Index combinedSlot = 0; + BOOST_FOREACH(const VariableSlots::value_type& varslot, variableSlots) { + for(size_t row = 0; row < m; ++row) { + const Index sourceSlot = varslot.second[rowSources[row].factorI]; + ABlock combinedBlock(combined->Ab_(combinedSlot)); + if(sourceSlot != numeric_limits::max()) { + const JacobianFactor& source(*factors[rowSources[row].factorI]); + const size_t sourceRow = rowSources[row].factorRowI; + if(source.firstNonzeroBlocks_[sourceRow] <= sourceSlot) { + const constABlock sourceBlock(source.Ab_(sourceSlot)); + ublas::noalias(ublas::row(combinedBlock, row)) = ublas::row(sourceBlock, sourceRow); + } else + ublas::noalias(ublas::row(combinedBlock, row)) = ublas::zero_vector(combinedBlock.size2()); + } else + ublas::noalias(ublas::row(combinedBlock, row)) = ublas::zero_vector(combinedBlock.size2()); + } + ++ combinedSlot; + } + toc(4, "copy rows"); + + // Copy rhs (b), sigma, and firstNonzeroBlocks + tic(5, "copy vectors"); + Index firstNonzeroSlot = 0; + for(size_t row = 0; row < m; ++row) { + const JacobianFactor& source(*factors[rowSources[row].factorI]); + const size_t sourceRow = rowSources[row].factorRowI; + combined->getb()(row) = source.getb()(sourceRow); + sigmas(row) = source.get_model()->sigmas()(sourceRow); + while(firstNonzeroSlot < variableSlots.size() && rowSources[row].firstNonzeroVar > combined->keys_[firstNonzeroSlot]) + ++ firstNonzeroSlot; + combined->firstNonzeroBlocks_[row] = firstNonzeroSlot; + } + toc(5, "copy vectors"); + + // Create noise model from sigmas + tic(6, "noise model"); + if(anyConstrained) combined->model_ = noiseModel::Constrained::MixedSigmas(sigmas); + else combined->model_ = noiseModel::Diagonal::Sigmas(sigmas); + toc(6, "noise model"); + + combined->assertInvariants(); + + return combined; + } + + /* ************************************************************************* */ + pair JacobianFactor::CombineAndEliminate( + const FactorGraph& factors, size_t nrFrontals) { + tic(1, "Combine"); + shared_ptr jointFactor(Combine(factors, VariableSlots(factors))); + toc(1, "Combine"); + tic(2, "eliminate"); + GaussianBayesNet::shared_ptr gbn(jointFactor->eliminate(nrFrontals)); + toc(2, "eliminate"); + return make_pair(gbn, jointFactor); + } + + + /* ************************************************************************* */ + Errors operator*(const FactorGraph& fg, const VectorValues& x) { + Errors e; + BOOST_FOREACH(const JacobianFactor::shared_ptr& Ai, fg) { + e.push_back((*Ai)*x); + } + return e; + } + + /* ************************************************************************* */ + void multiplyInPlace(const FactorGraph& fg, const VectorValues& x, Errors& e) { + multiplyInPlace(fg,x,e.begin()); + } + + /* ************************************************************************* */ + void multiplyInPlace(const FactorGraph& fg, const VectorValues& x, const Errors::iterator& e) { + Errors::iterator ei = e; + BOOST_FOREACH(const JacobianFactor::shared_ptr& Ai, fg) { + *ei = (*Ai)*x; + ei++; + } + } + + + /* ************************************************************************* */ + // x += alpha*A'*e + void transposeMultiplyAdd(const FactorGraph& fg, double alpha, const Errors& e, VectorValues& x) { + // For each factor add the gradient contribution + Errors::const_iterator ei = e.begin(); + BOOST_FOREACH(const JacobianFactor::shared_ptr& Ai, fg) { + Ai->transposeMultiplyAdd(alpha,*(ei++),x); + } + } + + /* ************************************************************************* */ + VectorValues gradient(const FactorGraph& fg, const VectorValues& x) { + // It is crucial for performance to make a zero-valued clone of x + VectorValues g = VectorValues::zero(x); + Errors e; + BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, fg) { + e.push_back(factor->error_vector(x)); + } + transposeMultiplyAdd(fg, 1.0, e, g); + return g; + } + + /* ************************************************************************* */ + void residual(const FactorGraph& fg, const VectorValues &x, VectorValues &r) { + Index i = 0 ; + BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, fg) { + r[i] = factor->getb(); + i++; + } + VectorValues Ax = VectorValues::SameStructure(r); + multiply(fg,x,Ax); + axpy(-1.0,Ax,r); + } + + /* ************************************************************************* */ + void multiply(const FactorGraph& fg, const VectorValues &x, VectorValues &r) { + r.makeZero(); + Index i = 0; + BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, fg) { + for(JacobianFactor::const_iterator j = factor->begin(); j != factor->end(); ++j) { + r[i] += prod(factor->getA(j), x[*j]); + } + ++i; + } + } + + /* ************************************************************************* */ + void transposeMultiply(const FactorGraph& fg, const VectorValues &r, VectorValues &x) { + x.makeZero(); + Index i = 0; + BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, fg) { + for(JacobianFactor::const_iterator j = factor->begin(); j != factor->end(); ++j) { + x[*j] += prod(trans(factor->getA(j)), r[i]); + } + ++i; + } + } + +} diff --git a/gtsam/linear/JacobianFactor.h b/gtsam/linear/JacobianFactor.h new file mode 100644 index 000000000..38cd88caf --- /dev/null +++ b/gtsam/linear/JacobianFactor.h @@ -0,0 +1,238 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file JacobianFactor.h + * @brief + * @author Richard Roberts + * @created Dec 8, 2010 + */ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +// Forward declarations of friend unit tests +class Combine2GaussianFactorTest; +class eliminateFrontalsGaussianFactorTest; + +namespace gtsam { + + // Forward declarations + class HessianFactor; + class VariableSlots; + + class JacobianFactor : public GaussianFactor { + + protected: + typedef MatrixColMajor AbMatrix; + typedef VerticalBlockView BlockAb; + + SharedDiagonal model_; // Gaussian noise model with diagonal covariance matrix + std::vector firstNonzeroBlocks_; + AbMatrix matrix_; // the full matrix corresponding to the factor + BlockAb Ab_; // the block view of the full matrix + + public: + typedef boost::shared_ptr shared_ptr; + typedef BlockAb::Block ABlock; + typedef BlockAb::constBlock constABlock; + typedef BlockAb::Column BVector; + typedef BlockAb::constColumn constBVector; + + protected: + void assertInvariants() const; + static JacobianFactor::shared_ptr Combine(const FactorGraph& factors, const VariableSlots& variableSlots); + + public: + + /** Copy constructor */ + JacobianFactor(const JacobianFactor& gf); + + /** default constructor for I/O */ + JacobianFactor(); + + /** Construct Null factor */ + JacobianFactor(const Vector& b_in); + + /** Construct unary factor */ + JacobianFactor(Index i1, const Matrix& A1, + const Vector& b, const SharedDiagonal& model); + + /** Construct binary factor */ + JacobianFactor(Index i1, const Matrix& A1, + Index i2, const Matrix& A2, + const Vector& b, const SharedDiagonal& model); + + /** Construct ternary factor */ + JacobianFactor(Index i1, const Matrix& A1, Index i2, + const Matrix& A2, Index i3, const Matrix& A3, + const Vector& b, const SharedDiagonal& model); + + /** Construct an n-ary factor */ + JacobianFactor(const std::vector > &terms, + const Vector &b, const SharedDiagonal& model); + + JacobianFactor(const std::list > &terms, + const Vector &b, const SharedDiagonal& model); + + /** Construct from Conditional Gaussian */ + JacobianFactor(const GaussianConditional& cg); + + /** Convert from a HessianFactor (does Cholesky) */ + JacobianFactor(const HessianFactor& factor); + + virtual ~JacobianFactor() {} + + // Implementing Testable interface + virtual void print(const std::string& s = "") const; + virtual bool equals(const GaussianFactor& lf, double tol = 1e-9) const; + + Vector unweighted_error(const VectorValues& c) const; /** (A*x-b) */ + Vector error_vector(const VectorValues& c) const; /** (A*x-b)/sigma */ + virtual double error(const VectorValues& c) const; /** 0.5*(A*x-b)'*D*(A*x-b) */ + + /** Check if the factor contains no information, i.e. zero rows. This does + * not necessarily mean that the factor involves no variables (to check for + * involving no variables use keys().empty()). + */ + bool empty() const { return Ab_.size1() == 0;} + + /** Return the dimension of the variable pointed to by the given key iterator + * todo: Remove this in favor of keeping track of dimensions with variables? + */ + virtual size_t getDim(const_iterator variable) const { return Ab_(variable - keys_.begin()).size2(); } + + /** + * Permutes the GaussianFactor, but for efficiency requires the permutation + * to already be inverted. This acts just as a change-of-name for each + * variable. The order of the variables within the factor is not changed. + */ + virtual void permuteWithInverse(const Permutation& inversePermutation); + + /** + * return the number of rows in the corresponding linear system + */ + size_t size1() const { return Ab_.size1(); } + + /** + * return the number of rows in the corresponding linear system + */ + size_t numberOfRows() const { return size1(); } + + /** + * return the number of columns in the corresponding linear system + */ + size_t size2() const { return Ab_.size2(); } + + /** get a copy of model */ + const SharedDiagonal& get_model() const { return model_; } + + /** get a copy of model (non-const version) */ + SharedDiagonal& get_model() { return model_; } + + /** Get a view of the r.h.s. vector b */ + constBVector getb() const { return Ab_.column(size(), 0); } + + /** Get a view of the A matrix for the variable pointed to be the given key iterator */ + constABlock getA(const_iterator variable) const { return Ab_(variable - keys_.begin()); } + + BVector getb() { return Ab_.column(size(), 0); } + + ABlock getA(iterator variable) { return Ab_(variable - keys_.begin()); } + + /** Return A*x */ + Vector operator*(const VectorValues& x) const; + + /** x += A'*e */ + void transposeMultiplyAdd(double alpha, const Vector& e, VectorValues& x) const; + + /** + * Return (dense) matrix associated with factor + * @param ordering of variables needed for matrix column order + * @param set weight to true to bake in the weights + */ + std::pair matrix(bool weight = true) const; + + /** + * Return (dense) matrix associated with factor + * The returned system is an augmented matrix: [A b] + * @param ordering of variables needed for matrix column order + * @param set weight to use whitening to bake in weights + */ + Matrix matrix_augmented(bool weight = true) const; + + /** + * Return vectors i, j, and s to generate an m-by-n sparse matrix + * such that S(i(k),j(k)) = s(k), which can be given to MATLAB's sparse. + * As above, the standard deviations are baked into A and b + * @param first column index for each variable + */ + boost::tuple, std::list, std::list > + sparse(const std::map& columnIndices) const; + + /** + * Return a whitened version of the factor, i.e. with unit diagonal noise + * model. */ + JacobianFactor whiten() const; + + /** + * Combine and eliminate several factors. + */ + static std::pair CombineAndEliminate( + const FactorGraph& factors, size_t nrFrontals=1); + + GaussianConditional::shared_ptr eliminateFirst(); + + GaussianBayesNet::shared_ptr eliminate(size_t nrFrontals = 1); + + // Friend HessianFactor to facilitate convertion constructors + friend class HessianFactor; + + // Friend unit tests (see also forward declarations above) + friend class ::Combine2GaussianFactorTest; + friend class ::eliminateFrontalsGaussianFactorTest; + }; + + + /** return A*x */ + Errors operator*(const FactorGraph& fg, const VectorValues& x); + + /* In-place version e <- A*x that overwrites e. */ + void multiplyInPlace(const FactorGraph& fg, const VectorValues& x, Errors& e); + + /* In-place version e <- A*x that takes an iterator. */ + void multiplyInPlace(const FactorGraph& fg, const VectorValues& x, const Errors::iterator& e); + + /** x += alpha*A'*e */ + void transposeMultiplyAdd(const FactorGraph& fg, double alpha, const Errors& e, VectorValues& x); + + /** + * Calculate Gradient of A^(A*x-b) for a given config + * @param x: VectorValues specifying where to calculate gradient + * @return gradient, as a VectorValues as well + */ + VectorValues gradient(const FactorGraph& fg, const VectorValues& x); + + // matrix-vector operations + void residual(const FactorGraph& fg, const VectorValues &x, VectorValues &r); + void multiply(const FactorGraph& fg, const VectorValues &x, VectorValues &r); + void transposeMultiply(const FactorGraph& fg, const VectorValues &r, VectorValues &x); + +} + diff --git a/gtsam/linear/Makefile.am b/gtsam/linear/Makefile.am index 568aecf62..a5825da1d 100644 --- a/gtsam/linear/Makefile.am +++ b/gtsam/linear/Makefile.am @@ -25,11 +25,12 @@ check_PROGRAMS += tests/testVectorValues sources += GaussianSequentialSolver.cpp GaussianMultifrontalSolver.cpp # Gaussian Factor Graphs +sources += JacobianFactor.cpp HessianFactor.cpp sources += GaussianFactor.cpp GaussianFactorGraph.cpp sources += GaussianJunctionTree.cpp sources += GaussianConditional.cpp GaussianBayesNet.cpp sources += GaussianISAM.cpp -check_PROGRAMS += tests/testGaussianFactor tests/testGaussianConditional +check_PROGRAMS += tests/testHessianFactor tests/testGaussianFactor tests/testGaussianConditional check_PROGRAMS += tests/testGaussianJunctionTree # Iterative Methods diff --git a/gtsam/linear/NoiseModel.cpp b/gtsam/linear/NoiseModel.cpp index 076ef6cd3..568566249 100644 --- a/gtsam/linear/NoiseModel.cpp +++ b/gtsam/linear/NoiseModel.cpp @@ -124,6 +124,8 @@ void Gaussian::WhitenInPlace(MatrixColMajor& H) const { // General QR, see also special version in Constrained SharedDiagonal Gaussian::QR(Matrix& Ab, boost::optional&> firstZeroRows) const { + static const bool debug = false; + // get size(A) and maxRank // TODO: really no rank problems ? size_t m = Ab.size1(), n = Ab.size2()-1; @@ -132,6 +134,8 @@ SharedDiagonal Gaussian::QR(Matrix& Ab, boost::optional&> firstZeroR // pre-whiten everything (cheaply if possible) WhitenInPlace(Ab); + if(debug) gtsam::print(Ab, "Whitened Ab: "); + // Perform in-place Householder #ifdef GT_USE_LAPACK if(firstZeroRows) @@ -222,6 +226,9 @@ SharedDiagonal Gaussian::QR(Matrix& Ab, boost::optional&> firstZeroR // General QR, see also special version in Constrained SharedDiagonal Gaussian::QRColumnWise(MatrixColMajor& Ab, vector& firstZeroRows) const { + + static const bool debug = false; + // get size(A) and maxRank // TODO: really no rank problems ? size_t m = Ab.size1(), n = Ab.size2()-1; @@ -230,6 +237,8 @@ SharedDiagonal Gaussian::QRColumnWise(MatrixColMajor& Ab, vector& firstZero // pre-whiten everything (cheaply if possible) WhitenInPlace(Ab); + if(debug) gtsam::print(Ab, "Whitened Ab: "); + // Perform in-place Householder #ifdef GT_USE_LAPACK #ifdef USE_LAPACK_QR diff --git a/gtsam/linear/SubgraphPreconditioner.cpp b/gtsam/linear/SubgraphPreconditioner.cpp index 84a138e36..ebcb74e82 100644 --- a/gtsam/linear/SubgraphPreconditioner.cpp +++ b/gtsam/linear/SubgraphPreconditioner.cpp @@ -17,6 +17,8 @@ #include #include +#include +#include using namespace std; @@ -25,7 +27,7 @@ namespace gtsam { /* ************************************************************************* */ SubgraphPreconditioner::SubgraphPreconditioner(const sharedFG& Ab1, const sharedFG& Ab2, const sharedBayesNet& Rc1, const sharedValues& xbar) : - Ab1_(Ab1), Ab2_(Ab2), Rc1_(Rc1), xbar_(xbar), b2bar_(Ab2_->errors_(*xbar)) { + Ab1_(Ab1), Ab2_(Ab2), Rc1_(Rc1), xbar_(xbar), b2bar_(gaussianErrors_(*Ab2_,*xbar)) { } /* ************************************************************************* */ @@ -48,35 +50,35 @@ namespace gtsam { // } /* ************************************************************************* */ - double SubgraphPreconditioner::error(const VectorValues& y) const { + double error(const SubgraphPreconditioner& sp, const VectorValues& y) { Errors e(y); - VectorValues x = this->x(y); - Errors e2 = Ab2_->errors(x); + VectorValues x = sp.x(y); + Errors e2 = gaussianErrors(*sp.Ab2(),x); return 0.5 * (dot(e, e) + dot(e2,e2)); } /* ************************************************************************* */ // gradient is y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar), - VectorValues SubgraphPreconditioner::gradient(const VectorValues& y) const { - VectorValues x = this->x(y); // x = inv(R1)*y - Errors e2 = Ab2_->errors(x); + VectorValues gradient(const SubgraphPreconditioner& sp, const VectorValues& y) { + VectorValues x = sp.x(y); // x = inv(R1)*y + Errors e2 = gaussianErrors(*sp.Ab2(),x); VectorValues gx2 = VectorValues::zero(y); - Ab2_->transposeMultiplyAdd(1.0,e2,gx2); // A2'*e2; - VectorValues gy2 = gtsam::backSubstituteTranspose(*Rc1_, gx2); // inv(R1')*gx2 + gtsam::transposeMultiplyAdd(*sp.Ab2(),1.0,e2,gx2); // A2'*e2; + VectorValues gy2 = gtsam::backSubstituteTranspose(*sp.Rc1(), gx2); // inv(R1')*gx2 return y + gy2; } /* ************************************************************************* */ // Apply operator A, A*y = [I;A2*inv(R1)]*y = [y; A2*inv(R1)*y] - Errors SubgraphPreconditioner::operator*(const VectorValues& y) const { + Errors operator*(const SubgraphPreconditioner& sp, const VectorValues& y) { Errors e(y); // Add A2 contribution VectorValues x = y; // TODO avoid ? - gtsam::backSubstituteInPlace(*Rc1_, x); // x=inv(R1)*y - Errors e2 = *Ab2_ * x; // A2*x + gtsam::backSubstituteInPlace(*sp.Rc1(), x); // x=inv(R1)*y + Errors e2 = *sp.Ab2() * x; // A2*x e.splice(e.end(), e2); return e; @@ -84,7 +86,7 @@ namespace gtsam { /* ************************************************************************* */ // In-place version that overwrites e - void SubgraphPreconditioner::multiplyInPlace(const VectorValues& y, Errors& e) const { + void multiplyInPlace(const SubgraphPreconditioner& sp, const VectorValues& y, Errors& e) { Errors::iterator ei = e.begin(); @@ -94,27 +96,26 @@ namespace gtsam { // Add A2 contribution VectorValues x = y; // TODO avoid ? - gtsam::backSubstituteInPlace(*Rc1_, x); // x=inv(R1)*y - Ab2_->multiplyInPlace(x,ei); // use iterator version + gtsam::backSubstituteInPlace(*sp.Rc1(), x); // x=inv(R1)*y + gtsam::multiplyInPlace(*sp.Ab2(),x,ei); // use iterator version } /* ************************************************************************* */ // Apply operator A', A'*e = [I inv(R1')*A2']*e = e1 + inv(R1')*A2'*e2 - VectorValues SubgraphPreconditioner::operator^(const Errors& e) const { - + VectorValues operator^(const SubgraphPreconditioner& sp, const Errors& e) { Errors::const_iterator it = e.begin(); - VectorValues y = zero(); + VectorValues y = sp.zero(); for ( Index i = 0 ; i < y.size() ; ++i, ++it ) y[i] = *it ; - transposeMultiplyAdd2(1.0,it,e.end(),y); + sp.transposeMultiplyAdd2(1.0,it,e.end(),y); return y; } /* ************************************************************************* */ // y += alpha*A'*e - void SubgraphPreconditioner::transposeMultiplyAdd - (double alpha, const Errors& e, VectorValues& y) const { + void transposeMultiplyAdd + (const SubgraphPreconditioner& sp, double alpha, const Errors& e, VectorValues& y) { Errors::const_iterator it = e.begin(); @@ -122,7 +123,7 @@ namespace gtsam { const Vector& ei = *it; axpy(alpha,ei,y[i]) ; } - transposeMultiplyAdd2(alpha,it,e.end(),y); + sp.transposeMultiplyAdd2(alpha,it,e.end(),y); } /* ************************************************************************* */ @@ -137,7 +138,7 @@ namespace gtsam { e2.push_back(*(it++)); VectorValues x = VectorValues::zero(y); // x = 0 - Ab2_->transposeMultiplyAdd(1.0,e2,x); // x += A2'*e2 + gtsam::transposeMultiplyAdd(*Ab2_,1.0,e2,x); // x += A2'*e2 axpy(alpha, gtsam::backSubstituteTranspose(*Rc1_, x), y); // y += alpha*inv(R1')*x } diff --git a/gtsam/linear/SubgraphPreconditioner.h b/gtsam/linear/SubgraphPreconditioner.h index d47805660..5d161f808 100644 --- a/gtsam/linear/SubgraphPreconditioner.h +++ b/gtsam/linear/SubgraphPreconditioner.h @@ -17,7 +17,7 @@ #pragma once -#include +#include #include #include @@ -34,7 +34,7 @@ namespace gtsam { public: typedef boost::shared_ptr sharedBayesNet; - typedef boost::shared_ptr sharedFG; + typedef boost::shared_ptr > sharedFG; typedef boost::shared_ptr sharedValues; typedef boost::shared_ptr sharedErrors; @@ -56,6 +56,14 @@ namespace gtsam { */ SubgraphPreconditioner(const sharedFG& Ab1, const sharedFG& Ab2, const sharedBayesNet& Rc1, const sharedValues& xbar); + /** Access Ab1 */ + const sharedFG& Ab1() const { return Ab1_; } + + /** Access Ab2 */ + const sharedFG& Ab2() const { return Ab2_; } + + /** Access Rc1 */ + const sharedBayesNet& Rc1() const { return Rc1_; } /** * Add zero-mean i.i.d. Gaussian prior terms to each variable @@ -73,27 +81,6 @@ namespace gtsam { return V ; } - /* error, given y */ - double error(const VectorValues& y) const; - - /** gradient = y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar) */ - VectorValues gradient(const VectorValues& y) const; - - /** Apply operator A */ - Errors operator*(const VectorValues& y) const; - - /** Apply operator A in place: needs e allocated already */ - void multiplyInPlace(const VectorValues& y, Errors& e) const; - - /** Apply operator A' */ - VectorValues operator^(const Errors& e) const; - - /** - * Add A'*e to y - * y += alpha*A'*[e1;e2] = [alpha*e1; alpha*inv(R1')*A2'*e2] - */ - void transposeMultiplyAdd(double alpha, const Errors& e, VectorValues& y) const; - /** * Add constraint part of the error only, used in both calls above * y += alpha*inv(R1')*A2'*e2 @@ -106,4 +93,25 @@ namespace gtsam { void print(const std::string& s = "SubgraphPreconditioner") const; }; + /* error, given y */ + double error(const SubgraphPreconditioner& sp, const VectorValues& y); + + /** gradient = y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar) */ + VectorValues gradient(const SubgraphPreconditioner& sp, const VectorValues& y); + + /** Apply operator A */ + Errors operator*(const SubgraphPreconditioner& sp, const VectorValues& y); + + /** Apply operator A in place: needs e allocated already */ + void multiplyInPlace(const SubgraphPreconditioner& sp, const VectorValues& y, Errors& e); + + /** Apply operator A' */ + VectorValues operator^(const SubgraphPreconditioner& sp, const Errors& e); + + /** + * Add A'*e to y + * y += alpha*A'*[e1;e2] = [alpha*e1; alpha*inv(R1')*A2'*e2] + */ + void transposeMultiplyAdd(const SubgraphPreconditioner& sp, double alpha, const Errors& e, VectorValues& y); + } // namespace gtsam diff --git a/gtsam/linear/SubgraphSolver-inl.h b/gtsam/linear/SubgraphSolver-inl.h index a3dbebbb8..babee1cec 100644 --- a/gtsam/linear/SubgraphSolver-inl.h +++ b/gtsam/linear/SubgraphSolver-inl.h @@ -20,11 +20,11 @@ #include #include #include +#include #include #include #include - using namespace std; namespace gtsam { @@ -62,12 +62,11 @@ bool split(const std::map &M, } - template void SubgraphSolver::replaceFactors(const typename LINEAR::shared_ptr &graph) { - shared_linear Ab1 = boost::make_shared(), - Ab2 = boost::make_shared(); + GaussianFactorGraph::shared_ptr Ab1 = boost::make_shared(); + GaussianFactorGraph::shared_ptr Ab2 = boost::make_shared(); if (parameters_->verbosity()) cout << "split the graph ..."; split(pairs_, *graph, *Ab1, *Ab2) ; @@ -84,7 +83,8 @@ void SubgraphSolver::replaceFactors(const typename LINEAR:: SubgraphPreconditioner::sharedBayesNet Rc1 = EliminationTree::Create(sacrificialAb1)->eliminate(); SubgraphPreconditioner::sharedValues xbar = gtsam::optimize_(*Rc1); - pc_ = boost::make_shared(Ab1,Ab2,Rc1,xbar); + pc_ = boost::make_shared( + Ab1->dynamicCastFactors >(), Ab2->dynamicCastFactors >(),Rc1,xbar); } template diff --git a/gtsam/linear/iterative-inl.h b/gtsam/linear/iterative-inl.h index 157fc0806..d3e92e1e7 100644 --- a/gtsam/linear/iterative-inl.h +++ b/gtsam/linear/iterative-inl.h @@ -48,7 +48,7 @@ namespace gtsam { // Start with g0 = A'*(A*x0-b), d0 = - g0 // i.e., first step is in direction of negative gradient - g = Ab.gradient(x); + g = gradient(Ab,x); d = g; // instead of negating gradient, alpha will be negated // init gamma and calculate threshold @@ -88,9 +88,9 @@ namespace gtsam { double alpha = takeOptimalStep(x); // update gradient (or re-calculate at reset time) - if (k % parameters_.reset() == 0) g = Ab.gradient(x); + if (k % parameters_.reset() == 0) g = gradient(Ab,x); // axpy(alpha, Ab ^ Ad, g); // g += alpha*(Ab^Ad) - else Ab.transposeMultiplyAdd(alpha, Ad, g); + else transposeMultiplyAdd(Ab, alpha, Ad, g); // check for convergence double new_gamma = dot(g, g); @@ -111,7 +111,7 @@ namespace gtsam { gamma = new_gamma; // In-place recalculation Ad <- A*d to avoid re-allocating Ad - Ab.multiplyInPlace(d, Ad); + multiplyInPlace(Ab, d, Ad); return false; } diff --git a/gtsam/linear/iterative.h b/gtsam/linear/iterative.h index 224c0ae44..223d1c67f 100644 --- a/gtsam/linear/iterative.h +++ b/gtsam/linear/iterative.h @@ -54,37 +54,43 @@ namespace gtsam { A_(A), b_(b) { } - /** gradient of objective function 0.5*|Ax-b_|^2 at x = A_'*(Ax-b_) */ - Vector gradient(const Vector& x) const { - return A_ ^ (A_ * x - b_); - } + /** Access A matrix */ + const Matrix& A() const { return A_; } - /** Apply operator A */ - inline Vector operator*(const Vector& x) const { - return A_ * x; - } - - /** Apply operator A in place */ - inline void multiplyInPlace(const Vector& x, Vector& e) const { - e = A_ * x; - } + /** Access b vector */ + const Vector& b() const { return b_; } /** Apply operator A'*e */ - inline Vector operator^(const Vector& e) const { + Vector operator^(const Vector& e) const { return A_ ^ e; } - /** x += alpha* A'*e */ - inline void transposeMultiplyAdd(double alpha, const Vector& e, Vector& x) const { - gtsam::transposeMultiplyAdd(alpha,A_,e,x); - } - /** * Print with optional string */ void print (const std::string& s = "System") const; }; + /** gradient of objective function 0.5*|Ax-b_|^2 at x = A_'*(Ax-b_) */ + inline Vector gradient(const System& system, const Vector& x) { + return system.A() ^ (system.A() * x - system.b()); + } + + /** Apply operator A */ + inline Vector operator*(const System& system, const Vector& x) { + return system.A() * x; + } + + /** Apply operator A in place */ + inline void multiplyInPlace(const System& system, const Vector& x, Vector& e) { + e = system.A() * x; + } + + /** x += alpha* A'*e */ + inline void transposeMultiplyAdd(const System& system, double alpha, const Vector& e, Vector& x) { + transposeMultiplyAdd(alpha,system.A(),e,x); + } + /** * Method of steepest gradients, System version */ diff --git a/gtsam/linear/tests/testGaussianFactor.cpp b/gtsam/linear/tests/testGaussianFactor.cpp index fc2cbf4b8..f6b458448 100644 --- a/gtsam/linear/tests/testGaussianFactor.cpp +++ b/gtsam/linear/tests/testGaussianFactor.cpp @@ -35,6 +35,9 @@ using namespace boost::assign; #include #include #include +#include +#include +#include using namespace std; using namespace gtsam; @@ -48,30 +51,30 @@ static SharedDiagonal constraintModel = noiseModel::Constrained::All(2); /* ************************************************************************* */ -TEST( GaussianFactor, constructor) +TEST(GaussianFactor, constructor) { Vector b = Vector_(3, 1., 2., 3.); SharedDiagonal noise = noiseModel::Diagonal::Sigmas(Vector_(3,1.,1.,1.)); std::list > terms; terms.push_back(make_pair(_x0_, eye(3))); terms.push_back(make_pair(_x1_, 2.*eye(3))); - GaussianFactor actual(terms, b, noise); - GaussianFactor expected(_x0_, eye(3), _x1_, 2.*eye(3), b, noise); + JacobianFactor actual(terms, b, noise); + JacobianFactor expected(_x0_, eye(3), _x1_, 2.*eye(3), b, noise); EXPECT(assert_equal(expected, actual)); } /* ************************************************************************* */ -TEST( GaussianFactor, constructor2) +TEST(GaussianFactor, constructor2) { Vector b = Vector_(3, 1., 2., 3.); SharedDiagonal noise = noiseModel::Diagonal::Sigmas(Vector_(3,1.,1.,1.)); std::list > terms; terms.push_back(make_pair(_x0_, eye(3))); terms.push_back(make_pair(_x1_, 2.*eye(3))); - const GaussianFactor actual(terms, b, noise); + const JacobianFactor actual(terms, b, noise); - GaussianFactor::const_iterator key0 = actual.begin(); - GaussianFactor::const_iterator key1 = key0 + 1; + JacobianFactor::const_iterator key0 = actual.begin(); + JacobianFactor::const_iterator key1 = key0 + 1; EXPECT(assert_equal(*key0, _x0_)); EXPECT(assert_equal(*key1, _x1_)); @@ -125,7 +128,7 @@ TEST( GaussianFactor, constructor2) // variablePositions[1].resize(2); variablePositions[1][0]=0; variablePositions[1][1]=1; // variablePositions[2].resize(1); variablePositions[2][0]=0; // -// GaussianFactor actual = *GaussianFactor::Combine(gfg, varindex, factors, variables, variablePositions); +// JacobianFactor actual = *JacobianFactor::Combine(gfg, varindex, factors, variables, variablePositions); // // Matrix zero3x3 = zeros(3,3); // Matrix A0 = gtsam::stack(3, &A00, &A10, &A20); @@ -133,7 +136,7 @@ TEST( GaussianFactor, constructor2) // Vector b = gtsam::concatVectors(3, &b0, &b1, &b2); // Vector sigmas = gtsam::concatVectors(3, &s0, &s1, &s2); // -// GaussianFactor expected(0, A0, 1, A1, b, noiseModel::Diagonal::Sigmas(sigmas, true)); +// JacobianFactor expected(0, A0, 1, A1, b, noiseModel::Diagonal::Sigmas(sigmas, true)); // // EXPECT(assert_equal(expected, actual)); //} @@ -171,7 +174,7 @@ TEST(GaussianFactor, Combine2) gfg.add(0, A10, 1, A11, b1, noiseModel::Diagonal::Sigmas(s1, true)); gfg.add(1, A21, b2, noiseModel::Diagonal::Sigmas(s2, true)); - GaussianFactor actual = *GaussianFactor::Combine(gfg, VariableSlots(gfg)); + JacobianFactor actual = *JacobianFactor::Combine(gfg, VariableSlots(gfg)); Matrix zero3x3 = zeros(3,3); Matrix A0 = gtsam::stack(3, &A10, &zero3x3, &zero3x3); @@ -179,13 +182,13 @@ TEST(GaussianFactor, Combine2) Vector b = gtsam::concatVectors(3, &b1, &b0, &b2); Vector sigmas = gtsam::concatVectors(3, &s1, &s0, &s2); - GaussianFactor expected(0, A0, 1, A1, b, noiseModel::Diagonal::Sigmas(sigmas, true)); + JacobianFactor expected(0, A0, 1, A1, b, noiseModel::Diagonal::Sigmas(sigmas, true)); EXPECT(assert_equal(expected, actual)); } /* ************************************************************************* */ -TEST(GaussianFactor, CombineAndEliminate) +TEST_UNSAFE(GaussianFactor, CombineAndEliminate) { Matrix A01 = Matrix_(3,3, 1.0, 0.0, 0.0, @@ -223,22 +226,22 @@ TEST(GaussianFactor, CombineAndEliminate) Vector b = gtsam::concatVectors(3, &b1, &b0, &b2); Vector sigmas = gtsam::concatVectors(3, &s1, &s0, &s2); - GaussianFactor expectedFactor(0, A0, 1, A1, b, noiseModel::Diagonal::Sigmas(sigmas, true)); - GaussianBayesNet expectedBN(*expectedFactor.eliminate(1, GaussianFactor::SOLVE_QR)); + JacobianFactor expectedFactor(0, A0, 1, A1, b, noiseModel::Diagonal::Sigmas(sigmas, true)); + GaussianBayesNet expectedBN(*expectedFactor.eliminate(1)); - pair actual( - GaussianFactor::CombineAndEliminate(gfg, 1, GaussianFactor::SOLVE_CHOLESKY)); + pair actualQR(JacobianFactor::CombineAndEliminate( + *gfg.dynamicCastFactors >(), 1)); - EXPECT(assert_equal(expectedBN, *actual.first)); - EXPECT(assert_equal(expectedFactor, *actual.second)); + EXPECT(assert_equal(expectedBN, *actualQR.first)); + EXPECT(assert_equal(expectedFactor, *actualQR.second)); } ///* ************************************************************************* */ -//TEST( GaussianFactor, operators ) +//TEST(GaussianFactor, operators ) //{ // Matrix I = eye(2); // Vector b = Vector_(2,0.2,-0.1); -// GaussianFactor lf(_x1_, -I, _x2_, I, b, sigma0_1); +// JacobianFactor lf(_x1_, -I, _x2_, I, b, sigma0_1); // // VectorValues c; // c.insert(_x1_,Vector_(2,10.,20.)); @@ -271,33 +274,33 @@ TEST(GaussianFactor, CombineAndEliminate) // A11(1,0) = 0; A11(1,1) = 1; // Vector b(2); // b(0) = 2; b(1) = -1; -// GaussianFactor::shared_ptr f1(new GaussianFactor(_x1_, A11, b*sigma1, sharedSigma(2,sigma1))); +// JacobianFactor::shared_ptr f1(new JacobianFactor(_x1_, A11, b*sigma1, sharedSigma(2,sigma1))); // // double sigma2 = 0.5; // A11(0,0) = 1; A11(0,1) = 0; // A11(1,0) = 0; A11(1,1) = -1; // b(0) = 4 ; b(1) = -5; -// GaussianFactor::shared_ptr f2(new GaussianFactor(_x1_, A11, b*sigma2, sharedSigma(2,sigma2))); +// JacobianFactor::shared_ptr f2(new JacobianFactor(_x1_, A11, b*sigma2, sharedSigma(2,sigma2))); // // double sigma3 = 0.25; // A11(0,0) = 1; A11(0,1) = 0; // A11(1,0) = 0; A11(1,1) = -1; // b(0) = 3 ; b(1) = -88; -// GaussianFactor::shared_ptr f3(new GaussianFactor(_x1_, A11, b*sigma3, sharedSigma(2,sigma3))); +// JacobianFactor::shared_ptr f3(new JacobianFactor(_x1_, A11, b*sigma3, sharedSigma(2,sigma3))); // // // TODO: find a real sigma value for this example // double sigma4 = 0.1; // A11(0,0) = 6; A11(0,1) = 0; // A11(1,0) = 0; A11(1,1) = 7; // b(0) = 5 ; b(1) = -6; -// GaussianFactor::shared_ptr f4(new GaussianFactor(_x1_, A11*sigma4, b*sigma4, sharedSigma(2,sigma4))); +// JacobianFactor::shared_ptr f4(new JacobianFactor(_x1_, A11*sigma4, b*sigma4, sharedSigma(2,sigma4))); // -// vector lfg; +// vector lfg; // lfg.push_back(f1); // lfg.push_back(f2); // lfg.push_back(f3); // lfg.push_back(f4); -// GaussianFactor combined(lfg); +// JacobianFactor combined(lfg); // // Vector sigmas = Vector_(8, sigma1, sigma1, sigma2, sigma2, sigma3, sigma3, sigma4, sigma4); // Matrix A22(8,2); @@ -315,25 +318,25 @@ TEST(GaussianFactor, CombineAndEliminate) // // vector > meas; // meas.push_back(make_pair(_x1_, A22)); -// GaussianFactor expected(meas, exb, sigmas); +// JacobianFactor expected(meas, exb, sigmas); // EXPECT(assert_equal(expected,combined)); //} // ///* ************************************************************************* */ -//TEST( GaussianFactor, linearFactorN){ +//TEST(GaussianFactor, linearFactorN){ // Matrix I = eye(2); -// vector f; +// vector f; // SharedDiagonal model = sharedSigma(2,1.0); -// f.push_back(GaussianFactor::shared_ptr(new GaussianFactor(_x1_, I, Vector_(2, +// f.push_back(JacobianFactor::shared_ptr(new JacobianFactor(_x1_, I, Vector_(2, // 10.0, 5.0), model))); -// f.push_back(GaussianFactor::shared_ptr(new GaussianFactor(_x1_, -10 * I, +// f.push_back(JacobianFactor::shared_ptr(new JacobianFactor(_x1_, -10 * I, // _x2_, 10 * I, Vector_(2, 1.0, -2.0), model))); -// f.push_back(GaussianFactor::shared_ptr(new GaussianFactor(_x2_, -10 * I, +// f.push_back(JacobianFactor::shared_ptr(new JacobianFactor(_x2_, -10 * I, // _x3_, 10 * I, Vector_(2, 1.5, -1.5), model))); -// f.push_back(GaussianFactor::shared_ptr(new GaussianFactor(_x3_, -10 * I, +// f.push_back(JacobianFactor::shared_ptr(new JacobianFactor(_x3_, -10 * I, // _x4_, 10 * I, Vector_(2, 2.0, -1.0), model))); // -// GaussianFactor combinedFactor(f); +// JacobianFactor combinedFactor(f); // // vector > combinedMeasurement; // combinedMeasurement.push_back(make_pair(_x1_, Matrix_(8,2, @@ -376,12 +379,12 @@ TEST(GaussianFactor, CombineAndEliminate) // 10.0, 5.0, 1.0, -2.0, 1.5, -1.5, 2.0, -1.0); // // Vector sigmas = repeat(8,1.0); -// GaussianFactor expected(combinedMeasurement, b, sigmas); +// JacobianFactor expected(combinedMeasurement, b, sigmas); // EXPECT(assert_equal(expected,combinedFactor)); //} /* ************************************************************************* */ -TEST( GaussianFactor, eliminate2 ) +TEST(GaussianFactor, eliminate2 ) { // sigmas double sigma1 = 0.2; @@ -415,14 +418,12 @@ TEST( GaussianFactor, eliminate2 ) vector > meas; meas.push_back(make_pair(_x2_, Ax2)); meas.push_back(make_pair(_l11_, Al1x1)); - GaussianFactor combined(meas, b2, sigmas); + JacobianFactor combined(meas, b2, sigmas); // eliminate the combined factor - GaussianConditional::shared_ptr actualCG_QR, actualCG_Chol; - GaussianFactor::shared_ptr actualLF_QR(new GaussianFactor(combined)); - GaussianFactor::shared_ptr actualLF_Chol(new GaussianFactor(combined)); - actualCG_QR = actualLF_QR->eliminateFirst(GaussianFactor::SOLVE_QR); - actualCG_Chol = actualLF_Chol->eliminateFirst(GaussianFactor::SOLVE_CHOLESKY); + GaussianConditional::shared_ptr actualCG_QR; + JacobianFactor::shared_ptr actualLF_QR(new JacobianFactor(combined)); + actualCG_QR = actualLF_QR->eliminateFirst(); // create expected Conditional Gaussian double oldSigma = 0.0894427; // from when R was made unit @@ -437,7 +438,6 @@ TEST( GaussianFactor, eliminate2 ) Vector d = Vector_(2,0.2,-0.14)/oldSigma; GaussianConditional expectedCG(_x2_,d,R11,_l11_,S12,ones(2)); EXPECT(assert_equal(expectedCG,*actualCG_QR,1e-4)); - EXPECT(assert_equal(expectedCG,*actualCG_Chol,1e-4)); // the expected linear factor double sigma = 0.2236; @@ -447,9 +447,8 @@ TEST( GaussianFactor, eliminate2 ) 0.00, 1.00, +0.00, -1.00 )/sigma; Vector b1 = Vector_(2,0.0,0.894427); - GaussianFactor expectedLF(_l11_, Bl1x1, b1, repeat(2,1.0)); + JacobianFactor expectedLF(_l11_, Bl1x1, b1, repeat(2,1.0)); EXPECT(assert_equal(expectedLF,*actualLF_QR,1e-3)); - EXPECT(assert_equal(expectedLF,*actualLF_Chol,1e-3)); } /* ************************************************************************* */ @@ -481,7 +480,7 @@ TEST(GaussianFactor, eliminateFrontals) make_pair(9, ublas::project(Ab, ublas::range(0,4), ublas::range(6,8))), make_pair(11, ublas::project(Ab, ublas::range(0,4), ublas::range(8,10))); Vector b1 = ublas::project(ublas::column(Ab, 10), ublas::range(0,4)); - GaussianFactor::shared_ptr factor1(new GaussianFactor(terms1, b1, sharedSigma(4, 0.5))); + JacobianFactor::shared_ptr factor1(new JacobianFactor(terms1, b1, sharedSigma(4, 0.5))); // Create second factor list > terms2; @@ -491,7 +490,7 @@ TEST(GaussianFactor, eliminateFrontals) make_pair(9, ublas::project(Ab, ublas::range(4,8), ublas::range(6,8))), make_pair(11, ublas::project(Ab, ublas::range(4,8), ublas::range(8,10))); Vector b2 = ublas::project(ublas::column(Ab, 10), ublas::range(4,8)); - GaussianFactor::shared_ptr factor2(new GaussianFactor(terms2, b2, sharedSigma(4, 0.5))); + JacobianFactor::shared_ptr factor2(new JacobianFactor(terms2, b2, sharedSigma(4, 0.5))); // Create third factor list > terms3; @@ -500,14 +499,14 @@ TEST(GaussianFactor, eliminateFrontals) make_pair(9, ublas::project(Ab, ublas::range(8,12), ublas::range(6,8))), make_pair(11, ublas::project(Ab, ublas::range(8,12), ublas::range(8,10))); Vector b3 = ublas::project(ublas::column(Ab, 10), ublas::range(8,12)); - GaussianFactor::shared_ptr factor3(new GaussianFactor(terms3, b3, sharedSigma(4, 0.5))); + JacobianFactor::shared_ptr factor3(new JacobianFactor(terms3, b3, sharedSigma(4, 0.5))); // Create fourth factor list > terms4; terms4 += make_pair(11, ublas::project(Ab, ublas::range(12,14), ublas::range(8,10))); Vector b4 = ublas::project(ublas::column(Ab, 10), ublas::range(12,14)); - GaussianFactor::shared_ptr factor4(new GaussianFactor(terms4, b4, sharedSigma(2, 0.5))); + JacobianFactor::shared_ptr factor4(new JacobianFactor(terms4, b4, sharedSigma(2, 0.5))); // Create factor graph GaussianFactorGraph factors; @@ -517,11 +516,11 @@ TEST(GaussianFactor, eliminateFrontals) factors.push_back(factor4); // Create combined factor - GaussianFactor combined(*GaussianFactor::Combine(factors, VariableSlots(factors))); + JacobianFactor combined(*JacobianFactor::Combine(factors, VariableSlots(factors))); // Copies factors as they will be eliminated in place - GaussianFactor actualFactor_QR = combined; - GaussianFactor actualFactor_Chol = combined; + JacobianFactor actualFactor_QR = combined; + JacobianFactor actualFactor_Chol = combined; // Expected augmented matrix, both GaussianConditional (first 6 rows) and remaining factor (next 4 rows) Matrix R = 2.0*Matrix_(11,11, @@ -579,7 +578,7 @@ TEST(GaussianFactor, eliminateFrontals) Vector be = ublas::project(ublas::column(R, 10), ublas::range(6,10)); // Eliminate (3 frontal variables, 6 scalar columns) using QR !!!! - GaussianBayesNet actualFragment_QR = *actualFactor_QR.eliminate(3, GaussianFactor::SOLVE_QR); + GaussianBayesNet actualFragment_QR = *actualFactor_QR.eliminate(3); EXPECT(assert_equal(expectedFragment, actualFragment_QR, 0.001)); EXPECT(assert_equal(size_t(2), actualFactor_QR.keys().size())); EXPECT(assert_equal(Index(9), actualFactor_QR.keys()[0])); @@ -587,10 +586,10 @@ TEST(GaussianFactor, eliminateFrontals) EXPECT(assert_equal(Ae1, actualFactor_QR.getA(actualFactor_QR.begin()), 0.001)); EXPECT(assert_equal(Ae2, actualFactor_QR.getA(actualFactor_QR.begin()+1), 0.001)); EXPECT(assert_equal(be, actualFactor_QR.getb(), 0.001)); - EXPECT(assert_equal(ones(4), actualFactor_QR.get_sigmas(), 0.001)); + EXPECT(assert_equal(ones(4), actualFactor_QR.get_model()->sigmas(), 0.001)); // Eliminate (3 frontal variables, 6 scalar columns) using Cholesky !!!! -// GaussianBayesNet actualFragment_Chol = *actualFactor_Chol.eliminate(3, GaussianFactor::SOLVE_CHOLESKY); +// GaussianBayesNet actualFragment_Chol = *actualFactor_Chol.eliminate(3, JacobianFactor::SOLVE_CHOLESKY); // EXPECT(assert_equal(expectedFragment, actualFragment_Chol, 0.001)); // EXPECT(assert_equal(size_t(2), actualFactor_Chol.keys().size())); // EXPECT(assert_equal(Index(9), actualFactor_Chol.keys()[0])); @@ -602,9 +601,9 @@ TEST(GaussianFactor, eliminateFrontals) } /* ************************************************************************* */ -TEST( GaussianFactor, default_error ) +TEST(GaussianFactor, default_error ) { - GaussianFactor f; + JacobianFactor f; vector dims; VectorValues c(dims); double actual = f.error(c); @@ -612,21 +611,21 @@ TEST( GaussianFactor, default_error ) } ////* ************************************************************************* */ -//TEST( GaussianFactor, eliminate_empty ) +//TEST(GaussianFactor, eliminate_empty ) //{ // // create an empty factor -// GaussianFactor f; +// JacobianFactor f; // // // eliminate the empty factor // GaussianConditional::shared_ptr actualCG; -// GaussianFactor::shared_ptr actualLF(new GaussianFactor(f)); +// JacobianFactor::shared_ptr actualLF(new JacobianFactor(f)); // actualCG = actualLF->eliminateFirst(); // // // expected Conditional Gaussian is just a parent-less node with P(x)=1 // GaussianConditional expectedCG(_x2_); // // // expected remaining factor is still empty :-) -// GaussianFactor expectedLF; +// JacobianFactor expectedLF; // // // check if the result matches // EXPECT(actualCG->equals(expectedCG)); @@ -634,10 +633,10 @@ TEST( GaussianFactor, default_error ) //} //* ************************************************************************* */ -TEST( GaussianFactor, empty ) +TEST(GaussianFactor, empty ) { // create an empty factor - GaussianFactor f; + JacobianFactor f; EXPECT(f.empty()==true); } @@ -650,9 +649,9 @@ void print(const list& i) { } ///* ************************************************************************* */ -//TEST( GaussianFactor, tally_separator ) +//TEST(GaussianFactor, tally_separator ) //{ -// GaussianFactor f(_x1_, eye(2), _x2_, eye(2), _l1_, eye(2), ones(2), sigma0_1); +// JacobianFactor f(_x1_, eye(2), _x2_, eye(2), _l1_, eye(2), ones(2), sigma0_1); // // std::set act1, act2, act3; // f.tally_separator(_x1_, act1); @@ -673,7 +672,7 @@ void print(const list& i) { //} /* ************************************************************************* */ -TEST( GaussianFactor, CONSTRUCTOR_GaussianConditional ) +TEST(GaussianFactor, CONSTRUCTOR_GaussianConditional ) { Matrix R11 = eye(2); Matrix S12 = Matrix_(2,2, @@ -685,39 +684,39 @@ TEST( GaussianFactor, CONSTRUCTOR_GaussianConditional ) GaussianConditional::shared_ptr CG(new GaussianConditional(_x2_,d,R11,_l11_,S12,sigmas)); // Call the constructor we are testing ! - GaussianFactor actualLF(*CG); + JacobianFactor actualLF(*CG); - GaussianFactor expectedLF(_x2_,R11,_l11_,S12,d, sigmas); + JacobianFactor expectedLF(_x2_,R11,_l11_,S12,d, sigmas); EXPECT(assert_equal(expectedLF,actualLF,1e-5)); } ///* ************************************************************************* */ -//TEST( GaussianFactor, CONSTRUCTOR_GaussianConditionalConstrained ) +//TEST(GaussianFactor, CONSTRUCTOR_GaussianConditionalConstrained ) //{ // Matrix Ax = eye(2); // Vector b = Vector_(2, 3.0, 5.0); // SharedDiagonal noisemodel = noiseModel::Constrained::All(2); -// GaussianFactor::shared_ptr expected(new GaussianFactor(_x0_, Ax, b, noisemodel)); +// JacobianFactor::shared_ptr expected(new JacobianFactor(_x0_, Ax, b, noisemodel)); // GaussianFactorGraph graph; // graph.push_back(expected); // // GaussianConditional::shared_ptr conditional = GaussianSequentialSolver::EliminateUntil(graph,_x0_+1); -// GaussianFactor actual(*conditional); +// JacobianFactor actual(*conditional); // // EXPECT(assert_equal(*expected, actual)); //} /* ************************************************************************* */ -TEST ( GaussianFactor, constraint_eliminate1 ) +TEST ( JacobianFactor, constraint_eliminate1 ) { // construct a linear constraint Vector v(2); v(0)=1.2; v(1)=3.4; Index key = _x0_; - GaussianFactor lc(key, eye(2), v, constraintModel); + JacobianFactor lc(key, eye(2), v, constraintModel); // eliminate it GaussianConditional::shared_ptr actualCG; - GaussianFactor::shared_ptr actualLF(new GaussianFactor(lc)); + JacobianFactor::shared_ptr actualLF(new JacobianFactor(lc)); actualCG = actualLF->eliminateFirst(); // verify linear factor @@ -730,7 +729,7 @@ TEST ( GaussianFactor, constraint_eliminate1 ) } /* ************************************************************************* */ -TEST ( GaussianFactor, constraint_eliminate2 ) +TEST ( JacobianFactor, constraint_eliminate2 ) { // Construct a linear constraint // RHS @@ -746,15 +745,15 @@ TEST ( GaussianFactor, constraint_eliminate2 ) A2(0,0) = 1.0 ; A2(0,1) = 2.0; A2(1,0) = 2.0 ; A2(1,1) = 4.0; - GaussianFactor lc(_x_, A1, _y_, A2, b, constraintModel); + JacobianFactor lc(_x_, A1, _y_, A2, b, constraintModel); // eliminate x and verify results GaussianConditional::shared_ptr actualCG; - GaussianFactor::shared_ptr actualLF(new GaussianFactor(lc)); + JacobianFactor::shared_ptr actualLF(new JacobianFactor(lc)); actualCG = actualLF->eliminateFirst(); // LF should be null - GaussianFactor expectedLF; + JacobianFactor expectedLF; EXPECT(assert_equal(*actualLF, expectedLF)); // verify CG @@ -791,14 +790,14 @@ TEST(GaussianFactor, permuteWithInverse) inversePermutation[4] = 1; inversePermutation[5] = 0; - GaussianFactor actual(1, A1, 3, A2, 5, A3, b, sharedSigma(2, 1.0)); - GaussianFactorGraph actualFG; actualFG.push_back(GaussianFactor::shared_ptr(new GaussianFactor(actual))); + JacobianFactor actual(1, A1, 3, A2, 5, A3, b, sharedSigma(2, 1.0)); + GaussianFactorGraph actualFG; actualFG.push_back(JacobianFactor::shared_ptr(new JacobianFactor(actual))); VariableIndex actualIndex(actualFG); actual.permuteWithInverse(inversePermutation); // actualIndex.permute(*inversePermutation.inverse()); - GaussianFactor expected(0, A3, 2, A2, 4, A1, b, sharedSigma(2, 1.0)); - GaussianFactorGraph expectedFG; expectedFG.push_back(GaussianFactor::shared_ptr(new GaussianFactor(expected))); + JacobianFactor expected(0, A3, 2, A2, 4, A1, b, sharedSigma(2, 1.0)); + GaussianFactorGraph expectedFG; expectedFG.push_back(JacobianFactor::shared_ptr(new JacobianFactor(expected))); // GaussianVariableIndex expectedIndex(expectedFG); EXPECT(assert_equal(expected, actual)); @@ -816,7 +815,7 @@ TEST(GaussianFactor, permuteWithInverse) } ///* ************************************************************************* */ -//TEST( GaussianFactor, erase) +//TEST(GaussianFactor, erase) //{ // Vector b = Vector_(3, 1., 2., 3.); // SharedDiagonal noise = noiseModel::Diagonal::Sigmas(Vector_(3,1.,1.,1.)); @@ -824,16 +823,16 @@ TEST(GaussianFactor, permuteWithInverse) // terms.push_back(make_pair(_x0_, eye(2))); // terms.push_back(make_pair(_x1_, 2.*eye(2))); // -// GaussianFactor actual(terms, b, noise); +// JacobianFactor actual(terms, b, noise); // int erased = actual.erase_A(_x0_); // // LONGS_EQUAL(1, erased); -// GaussianFactor expected(_x1_, 2.*eye(2), b, noise); +// JacobianFactor expected(_x1_, 2.*eye(2), b, noise); // EXPECT(assert_equal(expected, actual)); //} ///* ************************************************************************* */ -//TEST( GaussianFactor, eliminateMatrix) +//TEST(GaussianFactor, eliminateMatrix) //{ // Matrix Ab = Matrix_(3, 4, // 1., 2., 0., 3., @@ -847,10 +846,10 @@ TEST(GaussianFactor, permuteWithInverse) // dimensions.insert(make_pair(_x2_, 1)); // dimensions.insert(make_pair(_x3_, 1)); // -// GaussianFactor::shared_ptr factor; +// JacobianFactor::shared_ptr factor; // GaussianBayesNet bn; // boost::tie(bn, factor) = -// GaussianFactor::eliminateMatrix(Ab, NULL, model, frontals, separator, dimensions); +// JacobianFactor::eliminateMatrix(Ab, NULL, model, frontals, separator, dimensions); // // GaussianBayesNet bn_expected; // GaussianBayesNet::sharedConditional conditional1(new GaussianConditional(_x1_, Vector_(1, 6.), Matrix_(1, 1, 2.), @@ -861,7 +860,7 @@ TEST(GaussianFactor, permuteWithInverse) // bn_expected.push_back(conditional2); // EXPECT(assert_equal(bn_expected, bn)); // -// GaussianFactor factor_expected(_x3_, Matrix_(1, 1, 14.), Vector_(1, 16.), SharedDiagonal(Vector_(1, 1.))); +// JacobianFactor factor_expected(_x3_, Matrix_(1, 1, 14.), Vector_(1, 16.), SharedDiagonal(Vector_(1, 1.))); // EXPECT(assert_equal(factor_expected, *factor)); //} diff --git a/gtsam/linear/tests/testGaussianJunctionTree.cpp b/gtsam/linear/tests/testGaussianJunctionTree.cpp index 4f983f900..4e75537eb 100644 --- a/gtsam/linear/tests/testGaussianJunctionTree.cpp +++ b/gtsam/linear/tests/testGaussianJunctionTree.cpp @@ -36,10 +36,10 @@ GaussianFactorGraph createChain() { typedef GaussianFactorGraph::sharedFactor Factor; SharedDiagonal model(Vector_(1, 0.5)); - Factor factor1(new GaussianFactor(x2, Matrix_(1,1,1.), x1, Matrix_(1,1,1.), Vector_(1,1.), model)); - Factor factor2(new GaussianFactor(x2, Matrix_(1,1,1.), x3, Matrix_(1,1,1.), Vector_(1,1.), model)); - Factor factor3(new GaussianFactor(x3, Matrix_(1,1,1.), x4, Matrix_(1,1,1.), Vector_(1,1.), model)); - Factor factor4(new GaussianFactor(x4, Matrix_(1,1,1.), Vector_(1,1.), model)); + Factor factor1(new JacobianFactor(x2, Matrix_(1,1,1.), x1, Matrix_(1,1,1.), Vector_(1,1.), model)); + Factor factor2(new JacobianFactor(x2, Matrix_(1,1,1.), x3, Matrix_(1,1,1.), Vector_(1,1.), model)); + Factor factor3(new JacobianFactor(x3, Matrix_(1,1,1.), x4, Matrix_(1,1,1.), Vector_(1,1.), model)); + Factor factor4(new JacobianFactor(x4, Matrix_(1,1,1.), Vector_(1,1.), model)); GaussianFactorGraph fg; fg.push_back(factor1); diff --git a/gtsam/linear/tests/testHessianFactor.cpp b/gtsam/linear/tests/testHessianFactor.cpp new file mode 100644 index 000000000..33de4bd0e --- /dev/null +++ b/gtsam/linear/tests/testHessianFactor.cpp @@ -0,0 +1,210 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testCholeskyFactor.cpp + * @brief + * @author Richard Roberts + * @created Dec 15, 2010 + */ + +#include +#include +#include + +#include +#include + +#include +#include + +using namespace gtsam; +using namespace std; + +/* ************************************************************************* */ +TEST(HessianFactor, ConversionConstructor) { + + HessianFactor expected; + expected.keys_.push_back(0); + expected.keys_.push_back(1); + size_t dims[] = { 2, 4, 1 }; + expected.info_.resize(dims, dims+3, false); + expected.matrix_ = Matrix_(7,7, + 125.0000, 0.0, -25.0000, 0.0, -100.0000, 0.0, 25.0000, + 0.0, 125.0000, 0.0, -25.0000, 0.0, -100.0000, -17.5000, + -25.0000, 0.0, 25.0000, 0.0, 0.0, 0.0, -5.0000, + 0.0, -25.0000, 0.0, 25.0000, 0.0, 0.0, 7.5000, + -100.0000, 0.0, 0.0, 0.0, 100.0000, 0.0, -20.0000, + 0.0, -100.0000, 0.0, 0.0, 0.0, 100.0000, 10.0000, + 25.0000, -17.5000, -5.0000, 7.5000, -20.0000, 10.0000, 8.2500); + + // sigmas + double sigma1 = 0.2; + double sigma2 = 0.1; + Vector sigmas = Vector_(4, sigma1, sigma1, sigma2, sigma2); + + // the combined linear factor + Matrix Ax2 = Matrix_(4,2, + // x2 + -1., 0., + +0.,-1., + 1., 0., + +0.,1. + ); + + Matrix Al1x1 = Matrix_(4,4, + // l1 x1 + 1., 0., 0.00, 0., // f4 + 0., 1., 0.00, 0., // f4 + 0., 0., -1., 0., // f2 + 0., 0., 0.00,-1. // f2 + ); + + // the RHS + Vector b2(4); + b2(0) = -0.2; + b2(1) = 0.3; + b2(2) = 0.2; + b2(3) = -0.1; + + vector > meas; + meas.push_back(make_pair(0, Ax2)); + meas.push_back(make_pair(1, Al1x1)); + JacobianFactor combined(meas, b2, sigmas); + + HessianFactor actual(combined); + + EXPECT(assert_equal(expected, actual, 1e-9)); + +} + +/* ************************************************************************* */ +TEST_UNSAFE(GaussianFactor, CombineAndEliminate) +{ + Matrix A01 = Matrix_(3,3, + 1.0, 0.0, 0.0, + 0.0, 1.0, 0.0, + 0.0, 0.0, 1.0); + Vector b0 = Vector_(3, 1.5, 1.5, 1.5); + Vector s0 = Vector_(3, 1.6, 1.6, 1.6); + + Matrix A10 = Matrix_(3,3, + 2.0, 0.0, 0.0, + 0.0, 2.0, 0.0, + 0.0, 0.0, 2.0); + Matrix A11 = Matrix_(3,3, + -2.0, 0.0, 0.0, + 0.0, -2.0, 0.0, + 0.0, 0.0, -2.0); + Vector b1 = Vector_(3, 2.5, 2.5, 2.5); + Vector s1 = Vector_(3, 2.6, 2.6, 2.6); + + Matrix A21 = Matrix_(3,3, + 3.0, 0.0, 0.0, + 0.0, 3.0, 0.0, + 0.0, 0.0, 3.0); + Vector b2 = Vector_(3, 3.5, 3.5, 3.5); + Vector s2 = Vector_(3, 3.6, 3.6, 3.6); + + GaussianFactorGraph gfg; + gfg.add(1, A01, b0, noiseModel::Diagonal::Sigmas(s0, true)); + gfg.add(0, A10, 1, A11, b1, noiseModel::Diagonal::Sigmas(s1, true)); + gfg.add(1, A21, b2, noiseModel::Diagonal::Sigmas(s2, true)); + + Matrix zero3x3 = zeros(3,3); + Matrix A0 = gtsam::stack(3, &A10, &zero3x3, &zero3x3); + Matrix A1 = gtsam::stack(3, &A11, &A01, &A21); + Vector b = gtsam::concatVectors(3, &b1, &b0, &b2); + Vector sigmas = gtsam::concatVectors(3, &s1, &s0, &s2); + + JacobianFactor expectedFactor(0, A0, 1, A1, b, noiseModel::Diagonal::Sigmas(sigmas, true)); + GaussianBayesNet expectedBN(*expectedFactor.eliminate(1)); + + pair actualCholesky(HessianFactor::CombineAndEliminate( + *gfg.convertCastFactors >(), 1)); + + EXPECT(assert_equal(expectedBN, *actualCholesky.first, 1e-6)); + EXPECT(assert_equal(HessianFactor(expectedFactor), *actualCholesky.second, 1e-6)); +} + +/* ************************************************************************* */ +TEST(GaussianFactor, eliminate2 ) +{ + // sigmas + double sigma1 = 0.2; + double sigma2 = 0.1; + Vector sigmas = Vector_(4, sigma1, sigma1, sigma2, sigma2); + + // the combined linear factor + Matrix Ax2 = Matrix_(4,2, + // x2 + -1., 0., + +0.,-1., + 1., 0., + +0.,1. + ); + + Matrix Al1x1 = Matrix_(4,4, + // l1 x1 + 1., 0., 0.00, 0., // f4 + 0., 1., 0.00, 0., // f4 + 0., 0., -1., 0., // f2 + 0., 0., 0.00,-1. // f2 + ); + + // the RHS + Vector b2(4); + b2(0) = -0.2; + b2(1) = 0.3; + b2(2) = 0.2; + b2(3) = -0.1; + + vector > meas; + meas.push_back(make_pair(0, Ax2)); + meas.push_back(make_pair(1, Al1x1)); + JacobianFactor combined(meas, b2, sigmas); + + // eliminate the combined factor + HessianFactor::shared_ptr combinedLF_Chol(new HessianFactor(combined)); + FactorGraph combinedLFG_Chol; + combinedLFG_Chol.push_back(combinedLF_Chol); + pair actual_Chol = + HessianFactor::CombineAndEliminate(combinedLFG_Chol, 1); + + // create expected Conditional Gaussian + double oldSigma = 0.0894427; // from when R was made unit + Matrix R11 = Matrix_(2,2, + 1.00, 0.00, + 0.00, 1.00 + )/oldSigma; + Matrix S12 = Matrix_(2,4, + -0.20, 0.00,-0.80, 0.00, + +0.00,-0.20,+0.00,-0.80 + )/oldSigma; + Vector d = Vector_(2,0.2,-0.14)/oldSigma; + GaussianConditional expectedCG(0,d,R11,1,S12,ones(2)); + EXPECT(assert_equal(expectedCG,*actual_Chol.first->front(),1e-4)); + + // the expected linear factor + double sigma = 0.2236; + Matrix Bl1x1 = Matrix_(2,4, + // l1 x1 + 1.00, 0.00, -1.00, 0.00, + 0.00, 1.00, +0.00, -1.00 + )/sigma; + Vector b1 = Vector_(2,0.0,0.894427); + JacobianFactor expectedLF(1, Bl1x1, b1, repeat(2,1.0)); + EXPECT(assert_equal(HessianFactor(expectedLF), *actual_Chol.second, 1.5e-3)); +} + +/* ************************************************************************* */ +int main() { TestResult tr; return TestRegistry::runAllTests(tr);} +/* ************************************************************************* */ diff --git a/gtsam/linear/tests/timeFactorOverhead.cpp b/gtsam/linear/tests/timeFactorOverhead.cpp index c25e069f1..57c222ef1 100644 --- a/gtsam/linear/tests/timeFactorOverhead.cpp +++ b/gtsam/linear/tests/timeFactorOverhead.cpp @@ -27,7 +27,7 @@ using namespace gtsam; using namespace std; -typedef EliminationTree GaussianEliminationTree; +typedef EliminationTree GaussianEliminationTree; static boost::variate_generator > rg(boost::mt19937(), boost::uniform_real<>(0.0, 1.0)); @@ -70,7 +70,7 @@ int main(int argc, char *argv[]) { Vector b(blockdim); for(size_t j=0; j // for operator += in Ordering #include -#include +#include +#include #include +#include using namespace gtsam; using namespace boost::assign; @@ -101,14 +103,14 @@ int main() b2(7) = -1; // time eliminate - GaussianFactor combined(_x2_, Ax2, _l1_, Al1, _x1_, Ax1, b2,sharedSigma(8,1)); + JacobianFactor combined(_x2_, Ax2, _l1_, Al1, _x1_, Ax1, b2, sharedSigma(8,1)); long timeLog = clock(); int n = 1000000; GaussianConditional::shared_ptr conditional; - GaussianFactor::shared_ptr factor; + JacobianFactor::shared_ptr factor; for(int i = 0; i < n; i++) - conditional = GaussianFactor(combined).eliminateFirst(); + conditional = JacobianFactor(combined).eliminateFirst(); long timeLog2 = clock(); double seconds = (double)(timeLog2-timeLog)/CLOCKS_PER_SEC; diff --git a/gtsam/linear/tests/timeSLAMlike.cpp b/gtsam/linear/tests/timeSLAMlike.cpp index 8cd074324..60213c52c 100644 --- a/gtsam/linear/tests/timeSLAMlike.cpp +++ b/gtsam/linear/tests/timeSLAMlike.cpp @@ -31,7 +31,7 @@ using namespace gtsam; using namespace std; using namespace boost::lambda; -typedef EliminationTree GaussianEliminationTree; +typedef EliminationTree GaussianEliminationTree; static boost::variate_generator > rg(boost::mt19937(), boost::uniform_real<>(0.0, 1.0)); @@ -103,7 +103,7 @@ int main(int argc, char *argv[]) { for(size_t j=0; j linearize(const VALUES& x, const Ordering& ordering) const { + virtual JacobianFactor::shared_ptr linearize(const VALUES& x, const Ordering& ordering) const { const T& xj = x[this->key_]; Matrix A; Vector b = evaluateError(xj, A); // TODO pass unwhitened + noise model to Gaussian factor SharedDiagonal model = noiseModel::Constrained::All(b.size()); - return GaussianFactor::shared_ptr(new GaussianFactor(ordering[this->key_], A, b, model)); + return JacobianFactor::shared_ptr(new JacobianFactor(ordering[this->key_], A, b, model)); } }; // NonlinearEquality diff --git a/gtsam/nonlinear/NonlinearFactor.h b/gtsam/nonlinear/NonlinearFactor.h index 85d13585a..93d57e3a3 100644 --- a/gtsam/nonlinear/NonlinearFactor.h +++ b/gtsam/nonlinear/NonlinearFactor.h @@ -31,6 +31,8 @@ #include #include #include +#include + #include // FIXME: is this necessary? These don't even fit the right format @@ -131,7 +133,7 @@ namespace gtsam { } /** linearize to a GaussianFactor */ - virtual boost::shared_ptr + virtual boost::shared_ptr linearize(const VALUES& c, const Ordering& ordering) const = 0; /** @@ -221,7 +223,7 @@ namespace gtsam { * Ax-b \approx h(x0+dx)-z = h(x0) + A*dx - z * Hence b = z - h(x0) = - error_vector(x) */ - virtual boost::shared_ptr linearize(const VALUES& x, const Ordering& ordering) const { + virtual boost::shared_ptr linearize(const VALUES& x, const Ordering& ordering) const { const X& xj = x[key_]; Matrix A; Vector b = - evaluateError(xj, A); @@ -230,10 +232,10 @@ namespace gtsam { SharedDiagonal constrained = boost::shared_dynamic_cast(this->noiseModel_); if (constrained.get() != NULL) - return GaussianFactor::shared_ptr(new GaussianFactor(var, A, b, constrained)); + return JacobianFactor::shared_ptr(new JacobianFactor(var, A, b, constrained)); this->noiseModel_->WhitenInPlace(A); this->noiseModel_->whitenInPlace(b); - return GaussianFactor::shared_ptr(new GaussianFactor(var, A, b, + return JacobianFactor::shared_ptr(new JacobianFactor(var, A, b, noiseModel::Unit::Create(b.size()))); } @@ -333,7 +335,7 @@ namespace gtsam { * Ax-b \approx h(x1+dx1,x2+dx2)-z = h(x1,x2) + A2*dx1 + A2*dx2 - z * Hence b = z - h(x1,x2) = - error_vector(x) */ - boost::shared_ptr linearize(const VALUES& c, const Ordering& ordering) const { + boost::shared_ptr linearize(const VALUES& c, const Ordering& ordering) const { const X1& x1 = c[key1_]; const X2& x2 = c[key2_]; Matrix A1, A2; @@ -343,17 +345,17 @@ namespace gtsam { SharedDiagonal constrained = boost::shared_dynamic_cast(this->noiseModel_); if (constrained.get() != NULL) { - return GaussianFactor::shared_ptr(new GaussianFactor(var1, A1, var2, + return JacobianFactor::shared_ptr(new JacobianFactor(var1, A1, var2, A2, b, constrained)); } this->noiseModel_->WhitenInPlace(A1); this->noiseModel_->WhitenInPlace(A2); this->noiseModel_->whitenInPlace(b); if(var1 < var2) - return GaussianFactor::shared_ptr(new GaussianFactor(var1, A1, var2, + return JacobianFactor::shared_ptr(new JacobianFactor(var1, A1, var2, A2, b, noiseModel::Unit::Create(b.size()))); else - return GaussianFactor::shared_ptr(new GaussianFactor(var2, A2, var1, + return JacobianFactor::shared_ptr(new JacobianFactor(var2, A2, var1, A1, b, noiseModel::Unit::Create(b.size()))); } @@ -474,7 +476,7 @@ namespace gtsam { * Ax-b \approx h(x1+dx1,x2+dx2,x3+dx3)-z = h(x1,x2,x3) + A2*dx1 + A2*dx2 + A3*dx3 - z * Hence b = z - h(x1,x2,x3) = - error_vector(x) */ - boost::shared_ptr linearize(const VALUES& c, const Ordering& ordering) const { + boost::shared_ptr linearize(const VALUES& c, const Ordering& ordering) const { const X1& x1 = c[key1_]; const X2& x2 = c[key2_]; const X3& x3 = c[key3_]; @@ -485,34 +487,34 @@ namespace gtsam { SharedDiagonal constrained = boost::shared_dynamic_cast(this->noiseModel_); if (constrained.get() != NULL) { - return GaussianFactor::shared_ptr( - new GaussianFactor(var1, A1, var2, A2, var3, A3, b, constrained)); + return JacobianFactor::shared_ptr( + new JacobianFactor(var1, A1, var2, A2, var3, A3, b, constrained)); } this->noiseModel_->WhitenInPlace(A1); this->noiseModel_->WhitenInPlace(A2); this->noiseModel_->WhitenInPlace(A3); this->noiseModel_->whitenInPlace(b); if(var1 < var2 && var2 < var3) - return GaussianFactor::shared_ptr( - new GaussianFactor(var1, A1, var2, A2, var3, A3, b, noiseModel::Unit::Create(b.size()))); + return JacobianFactor::shared_ptr( + new JacobianFactor(var1, A1, var2, A2, var3, A3, b, noiseModel::Unit::Create(b.size()))); else if(var2 < var1 && var1 < var3) - return GaussianFactor::shared_ptr( - new GaussianFactor(var2, A2, var1, A1, var3, A3, b, noiseModel::Unit::Create(b.size()))); + return JacobianFactor::shared_ptr( + new JacobianFactor(var2, A2, var1, A1, var3, A3, b, noiseModel::Unit::Create(b.size()))); else if(var1 < var3 && var3 < var2) - return GaussianFactor::shared_ptr( - new GaussianFactor(var1, A1, var3, A3, var2, A2, b, noiseModel::Unit::Create(b.size()))); + return JacobianFactor::shared_ptr( + new JacobianFactor(var1, A1, var3, A3, var2, A2, b, noiseModel::Unit::Create(b.size()))); else if(var2 < var3 && var3 < var1) - return GaussianFactor::shared_ptr( - new GaussianFactor(var2, A2, var3, A3, var1, A1, b, noiseModel::Unit::Create(b.size()))); + return JacobianFactor::shared_ptr( + new JacobianFactor(var2, A2, var3, A3, var1, A1, b, noiseModel::Unit::Create(b.size()))); else if(var3 < var1 && var1 < var2) - return GaussianFactor::shared_ptr( - new GaussianFactor(var3, A3, var1, A1, var2, A2, b, noiseModel::Unit::Create(b.size()))); + return JacobianFactor::shared_ptr( + new JacobianFactor(var3, A3, var1, A1, var2, A2, b, noiseModel::Unit::Create(b.size()))); else if(var3 < var2 && var2 < var1) - return GaussianFactor::shared_ptr( - new GaussianFactor(var3, A3, var2, A2, var1, A1, b, noiseModel::Unit::Create(b.size()))); + return JacobianFactor::shared_ptr( + new JacobianFactor(var3, A3, var2, A2, var1, A1, b, noiseModel::Unit::Create(b.size()))); else { assert(false); - return GaussianFactor::shared_ptr(); + return JacobianFactor::shared_ptr(); } } diff --git a/gtsam/nonlinear/NonlinearFactorGraph-inl.h b/gtsam/nonlinear/NonlinearFactorGraph-inl.h index bee25333d..8d4730b0e 100644 --- a/gtsam/nonlinear/NonlinearFactorGraph-inl.h +++ b/gtsam/nonlinear/NonlinearFactorGraph-inl.h @@ -120,16 +120,16 @@ void NonlinearFactorGraph::print(const std::string& str) const { /* ************************************************************************* */ template - boost::shared_ptr NonlinearFactorGraph::linearize( + typename FactorGraph::shared_ptr NonlinearFactorGraph::linearize( const VALUES& config, const Ordering& ordering) const { // create an empty linear FG - GaussianFactorGraph::shared_ptr linearFG(new GaussianFactorGraph); + typename FactorGraph::shared_ptr linearFG(new FactorGraph); linearFG->reserve(this->size()); // linearize all factors BOOST_FOREACH(const sharedFactor& factor, this->factors_) { - boost::shared_ptr lf = factor->linearize(config, ordering); + JacobianFactor::shared_ptr lf = factor->linearize(config, ordering); if (lf) linearFG->push_back(lf); } diff --git a/gtsam/nonlinear/NonlinearFactorGraph.h b/gtsam/nonlinear/NonlinearFactorGraph.h index fcefbae9b..1ed7ff60a 100644 --- a/gtsam/nonlinear/NonlinearFactorGraph.h +++ b/gtsam/nonlinear/NonlinearFactorGraph.h @@ -93,7 +93,7 @@ namespace gtsam { /** * linearize a nonlinear factor graph */ - boost::shared_ptr + boost::shared_ptr > linearize(const VALUES& config, const Ordering& ordering) const; diff --git a/gtsam/nonlinear/NonlinearISAM-inl.h b/gtsam/nonlinear/NonlinearISAM-inl.h index 4d6ef59a9..607bb1803 100644 --- a/gtsam/nonlinear/NonlinearISAM-inl.h +++ b/gtsam/nonlinear/NonlinearISAM-inl.h @@ -51,7 +51,8 @@ void NonlinearISAM::update(const Factors& newFactors, const Values& init } } - boost::shared_ptr linearizedNewFactors(newFactors.linearize(linPoint_, ordering_)); + boost::shared_ptr linearizedNewFactors( + newFactors.linearize(linPoint_, ordering_)->template dynamicCastFactors()); // Update ISAM isam_.update(*linearizedNewFactors); @@ -73,7 +74,8 @@ void NonlinearISAM::reorder_relinearize() { ordering_ = *factors_.orderingCOLAMD(newLinPoint); // Create a linear factor graph at the new linearization point - boost::shared_ptr gfg(factors_.linearize(newLinPoint, ordering_)); + boost::shared_ptr gfg( + factors_.linearize(newLinPoint, ordering_)->template dynamicCastFactors()); // Just recreate the whole BayesTree isam_.update(*gfg); diff --git a/gtsam/nonlinear/NonlinearOptimizer-inl.h b/gtsam/nonlinear/NonlinearOptimizer-inl.h index b1e1132f7..0b47dee17 100644 --- a/gtsam/nonlinear/NonlinearOptimizer-inl.h +++ b/gtsam/nonlinear/NonlinearOptimizer-inl.h @@ -71,7 +71,7 @@ namespace gtsam { /* ************************************************************************* */ template VectorValues NonlinearOptimizer::linearizeAndOptimizeForDelta() const { - boost::shared_ptr linearized = graph_->linearize(*values_, *ordering_); + boost::shared_ptr linearized = graph_->linearize(*values_, *ordering_)->template dynamicCastFactors(); // NonlinearOptimizer prepared(graph_, values_, ordering_, error_, lambda_); return *S(*linearized).optimize(); } @@ -84,7 +84,7 @@ namespace gtsam { NonlinearOptimizer NonlinearOptimizer::iterate() const { Parameters::verbosityLevel verbosity = parameters_->verbosity_ ; - boost::shared_ptr linearized = graph_->linearize(*values_, *ordering_); + boost::shared_ptr linearized = graph_->linearize(*values_, *ordering_)->template dynamicCastFactors(); shared_solver newSolver = solver_; if(newSolver) newSolver->replaceFactors(linearized); @@ -167,7 +167,7 @@ namespace gtsam { Matrix A = eye(dim); Vector b = zero(dim); SharedDiagonal model = noiseModel::Isotropic::Sigma(dim,sigma); - GaussianFactor::shared_ptr prior(new GaussianFactor(j, A, b, model)); + GaussianFactor::shared_ptr prior(new JacobianFactor(j, A, b, model)); damped->push_back(prior); } } @@ -225,7 +225,7 @@ namespace gtsam { if (verbosity >= Parameters::LAMBDA) cout << "lambda = " << lambda << endl; // linearize all factors once - boost::shared_ptr linear = graph_->linearize(*values_, *ordering_); + boost::shared_ptr linear = graph_->linearize(*values_, *ordering_)->template dynamicCastFactors(); if (verbosity >= Parameters::LINEAR) linear->print("linear"); diff --git a/gtsam/slam/smallExample.cpp b/gtsam/slam/smallExample.cpp index cf31b9ae0..26bac00fc 100644 --- a/gtsam/slam/smallExample.cpp +++ b/gtsam/slam/smallExample.cpp @@ -134,7 +134,7 @@ namespace example { } /* ************************************************************************* */ - GaussianFactorGraph createGaussianFactorGraph(const Ordering& ordering) { + FactorGraph createGaussianFactorGraph(const Ordering& ordering) { Matrix I = eye(2); // Create empty graph @@ -273,7 +273,7 @@ namespace example { } /* ************************************************************************* */ - pair createSmoother(int T, boost::optional ordering) { + pair, Ordering> createSmoother(int T, boost::optional ordering) { Graph nlfg; Values poses; boost::tie(nlfg, poses) = createNonlinearSmoother(T); @@ -283,14 +283,14 @@ namespace example { } /* ************************************************************************* */ - GaussianFactorGraph createSimpleConstraintGraph() { + FactorGraph createSimpleConstraintGraph() { // create unary factor // prior on _x_, mean = [1,-1], sigma=0.1 Matrix Ax = eye(2); Vector b1(2); b1(0) = 1.0; b1(1) = -1.0; - GaussianFactor::shared_ptr f1(new GaussianFactor(_x_, Ax, b1, sigma0_1)); + GaussianFactor::shared_ptr f1(new JacobianFactor(_x_, Ax, b1, sigma0_1)); // create binary constraint factor // between _x_ and _y_, that is going to be the only factor on _y_ @@ -299,11 +299,11 @@ namespace example { Matrix Ax1 = eye(2); Matrix Ay1 = eye(2) * -1; Vector b2 = Vector_(2, 0.0, 0.0); - GaussianFactor::shared_ptr f2(new GaussianFactor(_x_, Ax1, _y_, Ay1, b2, + GaussianFactor::shared_ptr f2(new JacobianFactor(_x_, Ax1, _y_, Ay1, b2, constraintModel)); // construct the graph - GaussianFactorGraph fg; + FactorGraph fg; fg.push_back(f1); fg.push_back(f2); @@ -320,15 +320,15 @@ namespace example { } /* ************************************************************************* */ - GaussianFactorGraph createSingleConstraintGraph() { + FactorGraph createSingleConstraintGraph() { // create unary factor // prior on _x_, mean = [1,-1], sigma=0.1 Matrix Ax = eye(2); Vector b1(2); b1(0) = 1.0; b1(1) = -1.0; - //GaussianFactor::shared_ptr f1(new GaussianFactor(_x_, sigma0_1->Whiten(Ax), sigma0_1->whiten(b1), sigma0_1)); - GaussianFactor::shared_ptr f1(new GaussianFactor(_x_, Ax, b1, sigma0_1)); + //GaussianFactor::shared_ptr f1(new JacobianFactor(_x_, sigma0_1->Whiten(Ax), sigma0_1->whiten(b1), sigma0_1)); + GaussianFactor::shared_ptr f1(new JacobianFactor(_x_, Ax, b1, sigma0_1)); // create binary constraint factor // between _x_ and _y_, that is going to be the only factor on _y_ @@ -341,11 +341,11 @@ namespace example { Ax1(1, 1) = 1.0; Matrix Ay1 = eye(2) * 10; Vector b2 = Vector_(2, 1.0, 2.0); - GaussianFactor::shared_ptr f2(new GaussianFactor(_x_, Ax1, _y_, Ay1, b2, + GaussianFactor::shared_ptr f2(new JacobianFactor(_x_, Ax1, _y_, Ay1, b2, constraintModel)); // construct the graph - GaussianFactorGraph fg; + FactorGraph fg; fg.push_back(f1); fg.push_back(f2); @@ -361,11 +361,11 @@ namespace example { } /* ************************************************************************* */ - GaussianFactorGraph createMultiConstraintGraph() { + FactorGraph createMultiConstraintGraph() { // unary factor 1 Matrix A = eye(2); Vector b = Vector_(2, -2.0, 2.0); - GaussianFactor::shared_ptr lf1(new GaussianFactor(_x_, A, b, sigma0_1)); + GaussianFactor::shared_ptr lf1(new JacobianFactor(_x_, A, b, sigma0_1)); // constraint 1 Matrix A11(2, 2); @@ -383,7 +383,7 @@ namespace example { Vector b1(2); b1(0) = 1.0; b1(1) = 2.0; - GaussianFactor::shared_ptr lc1(new GaussianFactor(_x_, A11, _y_, A12, b1, + GaussianFactor::shared_ptr lc1(new JacobianFactor(_x_, A11, _y_, A12, b1, constraintModel)); // constraint 2 @@ -402,11 +402,11 @@ namespace example { Vector b2(2); b2(0) = 3.0; b2(1) = 4.0; - GaussianFactor::shared_ptr lc2(new GaussianFactor(_x_, A21, _z_, A22, b2, + GaussianFactor::shared_ptr lc2(new JacobianFactor(_x_, A21, _z_, A22, b2, constraintModel)); // construct the graph - GaussianFactorGraph fg; + FactorGraph fg; fg.push_back(lf1); fg.push_back(lc1); fg.push_back(lc2); @@ -431,7 +431,7 @@ namespace example { } /* ************************************************************************* */ - boost::tuple planarGraph(size_t N) { + boost::tuple, Ordering, VectorValues> planarGraph(size_t N) { // create empty graph NonlinearFactorGraph nlfg; @@ -481,9 +481,9 @@ namespace example { } /* ************************************************************************* */ - pair splitOffPlanarTree(size_t N, - const GaussianFactorGraph& original) { - GaussianFactorGraph T, C; + pair, FactorGraph > splitOffPlanarTree(size_t N, + const FactorGraph& original) { + FactorGraph T, C; // Add the x11 constraint to the tree T.push_back(original[0]); diff --git a/gtsam/slam/smallExample.h b/gtsam/slam/smallExample.h index 37ea65e9a..b88c9a442 100644 --- a/gtsam/slam/smallExample.h +++ b/gtsam/slam/smallExample.h @@ -69,7 +69,7 @@ namespace gtsam { * create a linear factor graph * The non-linear graph above evaluated at NoisyValues */ - GaussianFactorGraph createGaussianFactorGraph(const Ordering& ordering); + FactorGraph createGaussianFactorGraph(const Ordering& ordering); /** * create small Chordal Bayes Net x <- y @@ -93,7 +93,7 @@ namespace gtsam { * Create a Kalman smoother by linearizing a non-linear factor graph * @param T number of time-steps */ - std::pair createSmoother(int T, boost::optional ordering = boost::none); + std::pair, Ordering> createSmoother(int T, boost::optional ordering = boost::none); /* ******************************************************* */ // Linear Constrained Examples @@ -103,21 +103,21 @@ namespace gtsam { * Creates a simple constrained graph with one linear factor and * one binary equality constraint that sets x = y */ - GaussianFactorGraph createSimpleConstraintGraph(); + FactorGraph createSimpleConstraintGraph(); VectorValues createSimpleConstraintValues(); /** * Creates a simple constrained graph with one linear factor and * one binary constraint. */ - GaussianFactorGraph createSingleConstraintGraph(); + FactorGraph createSingleConstraintGraph(); VectorValues createSingleConstraintValues(); /** * Creates a constrained graph with a linear factor and two * binary constraints that share a node */ - GaussianFactorGraph createMultiConstraintGraph(); + FactorGraph createMultiConstraintGraph(); VectorValues createMultiConstraintValues(); /* ******************************************************* */ @@ -133,7 +133,7 @@ namespace gtsam { * -x11-x21-x31 * with x11 clamped at (1,1), and others related by 2D odometry. */ - boost::tuple planarGraph(size_t N); + boost::tuple, Ordering, VectorValues> planarGraph(size_t N); /* * Create canonical ordering for planar graph that also works for tree @@ -150,8 +150,8 @@ namespace gtsam { * | * -x11-x21-x31 */ - std::pair splitOffPlanarTree( - size_t N, const GaussianFactorGraph& original); + std::pair, FactorGraph > splitOffPlanarTree( + size_t N, const FactorGraph& original); } // example } // gtsam diff --git a/gtsam/slam/tests/testPose2Factor.cpp b/gtsam/slam/tests/testPose2Factor.cpp index fd76e5485..0fd161350 100644 --- a/gtsam/slam/tests/testPose2Factor.cpp +++ b/gtsam/slam/tests/testPose2Factor.cpp @@ -52,7 +52,7 @@ TEST( Pose2Factor, error ) // Actual linearization Ordering ordering(*x0.orderingArbitrary()); - boost::shared_ptr linear = factor.linearize(x0, ordering); + boost::shared_ptr linear = factor.linearize(x0, ordering); // Check error at x0, i.e. delta = zero ! VectorValues delta(x0.dims(ordering)); @@ -60,7 +60,7 @@ TEST( Pose2Factor, error ) delta[ordering["x2"]] = zero(3); Vector error_at_zero = Vector_(3,0.0,0.0,0.0); CHECK(assert_equal(error_at_zero,factor.unwhitenedError(x0))); - CHECK(assert_equal(-error_at_zero,linear->error_vector(delta))); + CHECK(assert_equal(-error_at_zero, linear->error_vector(delta))); // Check error after increasing p2 VectorValues plus = delta; @@ -88,7 +88,7 @@ TEST( Pose2Factor, rhs ) // Actual linearization Ordering ordering(*x0.orderingArbitrary()); - boost::shared_ptr linear = factor.linearize(x0, ordering); + boost::shared_ptr linear = factor.linearize(x0, ordering); // Check RHS Pose2 hx0 = p1.between(p2); @@ -131,10 +131,10 @@ TEST( Pose2Factor, linearize ) // expected linear factor Ordering ordering(*x0.orderingArbitrary()); SharedDiagonal probModel1 = noiseModel::Unit::Create(3); - GaussianFactor expected(ordering["x1"], expectedH1, ordering["x2"], expectedH2, expected_b, probModel1); + JacobianFactor expected(ordering["x1"], expectedH1, ordering["x2"], expectedH2, expected_b, probModel1); // Actual linearization - boost::shared_ptr actual = factor.linearize(x0, ordering); + boost::shared_ptr actual = factor.linearize(x0, ordering); CHECK(assert_equal(expected,*actual)); // Numerical do not work out because BetweenFactor is approximate ? diff --git a/gtsam/slam/tests/testPose2Prior.cpp b/gtsam/slam/tests/testPose2Prior.cpp index 50b7aee90..fff6208d2 100644 --- a/gtsam/slam/tests/testPose2Prior.cpp +++ b/gtsam/slam/tests/testPose2Prior.cpp @@ -44,7 +44,7 @@ TEST( Pose2Prior, error ) // Actual linearization Ordering ordering(*x0.orderingArbitrary()); - boost::shared_ptr linear = factor.linearize(x0, ordering); + boost::shared_ptr linear = factor.linearize(x0, ordering); // Check error at x0, i.e. delta = zero ! VectorValues delta(x0.dims(ordering)); @@ -86,7 +86,7 @@ TEST( Pose2Prior, linearize ) // Actual linearization Ordering ordering(*x0.orderingArbitrary()); - boost::shared_ptr actual = factor.linearize(x0, ordering); + boost::shared_ptr actual = factor.linearize(x0, ordering); // Test with numerical derivative Matrix numericalH = numericalDerivative11(h, prior, 1e-5); diff --git a/gtsam/slam/tests/testPose2SLAM.cpp b/gtsam/slam/tests/testPose2SLAM.cpp index b8ccf4456..f7b3419b3 100644 --- a/gtsam/slam/tests/testPose2SLAM.cpp +++ b/gtsam/slam/tests/testPose2SLAM.cpp @@ -74,11 +74,11 @@ TEST( Pose2Graph, linearization ) config.insert(2,p2); // Linearize Ordering ordering(*config.orderingArbitrary()); - boost::shared_ptr lfg_linearized = graph.linearize(config, ordering); + boost::shared_ptr > lfg_linearized = graph.linearize(config, ordering); //lfg_linearized->print("lfg_actual"); // the expected linear factor - GaussianFactorGraph lfg_expected; + FactorGraph lfg_expected; Matrix A1 = Matrix_(3,3, 0.0,-2.0, -4.2, 2.0, 0.0, -4.2, @@ -91,7 +91,7 @@ TEST( Pose2Graph, linearization ) Vector b = Vector_(3,-0.1/sx,0.1/sy,0.0); SharedDiagonal probModel1 = noiseModel::Unit::Create(3); - lfg_expected.add(ordering["x1"], A1, ordering["x2"], A2, b, probModel1); + lfg_expected.push_back(JacobianFactor::shared_ptr(new JacobianFactor(ordering["x1"], A1, ordering["x2"], A2, b, probModel1))); CHECK(assert_equal(lfg_expected, *lfg_linearized)); } diff --git a/gtsam/slam/tests/testVSLAMFactor.cpp b/gtsam/slam/tests/testVSLAMFactor.cpp index 82abd4411..fdea48ad0 100644 --- a/gtsam/slam/tests/testVSLAMFactor.cpp +++ b/gtsam/slam/tests/testVSLAMFactor.cpp @@ -17,6 +17,7 @@ #define GTSAM_MAGIC_KEY +#include #include #include #include @@ -65,16 +66,16 @@ TEST( ProjectionFactor, error ) Matrix Al1 = Matrix_(2, 3, 61.584, 0., 0., 0., 61.584, 0.); Vector b = Vector_(2,3.,0.); SharedDiagonal probModel1 = noiseModel::Unit::Create(2); - GaussianFactor expected(ordering["x1"], Ax1, ordering["l1"], Al1, b, probModel1); - GaussianFactor::shared_ptr actual = factor->linearize(config, ordering); + JacobianFactor expected(ordering["x1"], Ax1, ordering["l1"], Al1, b, probModel1); + JacobianFactor::shared_ptr actual = factor->linearize(config, ordering); CHECK(assert_equal(expected,*actual,1e-3)); // linearize graph Graph graph; graph.push_back(factor); - GaussianFactorGraph expected_lfg; + FactorGraph expected_lfg; expected_lfg.push_back(actual); - boost::shared_ptr actual_lfg = graph.linearize(config, ordering); + boost::shared_ptr > actual_lfg = graph.linearize(config, ordering); CHECK(assert_equal(expected_lfg,*actual_lfg)); // expmap on a config diff --git a/tests/testGaussianFactor.cpp b/tests/testGaussianFactor.cpp index e947b6707..e0e5561af 100644 --- a/tests/testGaussianFactor.cpp +++ b/tests/testGaussianFactor.cpp @@ -49,13 +49,13 @@ TEST( GaussianFactor, linearFactor ) Matrix I = eye(2); Vector b = Vector_(2, 2.0, -1.0); - GaussianFactor expected(ordering["x1"], -10*I,ordering["x2"], 10*I, b, noiseModel::Unit::Create(2)); + JacobianFactor expected(ordering["x1"], -10*I,ordering["x2"], 10*I, b, noiseModel::Unit::Create(2)); // create a small linear factor graph - GaussianFactorGraph fg = createGaussianFactorGraph(ordering); + FactorGraph fg = createGaussianFactorGraph(ordering); // get the factor "f2" from the factor graph - GaussianFactor::shared_ptr lf = fg[1]; + JacobianFactor::shared_ptr lf = fg[1]; // check if the two factors are the same CHECK(assert_equal(expected,*lf)); @@ -225,7 +225,7 @@ TEST( GaussianFactor, matrix ) { // create a small linear factor graph Ordering ordering; ordering += "x1","x2","l1"; - GaussianFactorGraph fg = createGaussianFactorGraph(ordering); + FactorGraph fg = createGaussianFactorGraph(ordering); // get the factor "f2" from the factor graph //GaussianFactor::shared_ptr lf = fg[1]; // NOTE: using the older version @@ -234,7 +234,7 @@ TEST( GaussianFactor, matrix ) // render with a given ordering Ordering ord; ord += "x1","x2"; - GaussianFactor::shared_ptr lf(new GaussianFactor(ord["x1"], -I, ord["x2"], I, b2, sigma0_1)); + JacobianFactor::shared_ptr lf(new JacobianFactor(ord["x1"], -I, ord["x2"], I, b2, sigma0_1)); // Test whitened version Matrix A_act1; Vector b_act1; @@ -273,7 +273,7 @@ TEST( GaussianFactor, matrix_aug ) { // create a small linear factor graph Ordering ordering; ordering += "x1","x2","l1"; - GaussianFactorGraph fg = createGaussianFactorGraph(ordering); + FactorGraph fg = createGaussianFactorGraph(ordering); // get the factor "f2" from the factor graph //GaussianFactor::shared_ptr lf = fg[1]; @@ -282,7 +282,7 @@ TEST( GaussianFactor, matrix_aug ) // render with a given ordering Ordering ord; ord += "x1","x2"; - GaussianFactor::shared_ptr lf(new GaussianFactor(ord["x1"], -I, ord["x2"], I, b2, sigma0_1)); + JacobianFactor::shared_ptr lf(new JacobianFactor(ord["x1"], -I, ord["x2"], I, b2, sigma0_1)); // Test unwhitened version diff --git a/tests/testGaussianFactorGraph.cpp b/tests/testGaussianFactorGraph.cpp index 1a4cd9ccf..f9815c80c 100644 --- a/tests/testGaussianFactorGraph.cpp +++ b/tests/testGaussianFactorGraph.cpp @@ -58,13 +58,13 @@ TEST( GaussianFactorGraph, equals ){ TEST( GaussianFactorGraph, error ) { Ordering ordering; ordering += "x1","x2","l1"; - GaussianFactorGraph fg = createGaussianFactorGraph(ordering); + FactorGraph fg = createGaussianFactorGraph(ordering); VectorValues cfg = createZeroDelta(ordering); // note the error is the same as in testNonlinearFactorGraph as a // zero delta config in the linear graph is equivalent to noisy in // non-linear, which is really linear under the hood - double actual = fg.error(cfg); + double actual = gaussianError(fg, cfg); DOUBLES_EQUAL( 5.625, actual, 1e-9 ); } @@ -349,9 +349,9 @@ TEST( GaussianFactorGraph, eliminateAll ) // Matrix A = eye(2); // Vector b = zero(2); // SharedDiagonal sigma = sharedSigma(2,3.0); -// expected.push_back(GaussianFactor::shared_ptr(new GaussianFactor(ordering["l1"],A,b,sigma))); -// expected.push_back(GaussianFactor::shared_ptr(new GaussianFactor(ordering["x1"],A,b,sigma))); -// expected.push_back(GaussianFactor::shared_ptr(new GaussianFactor(ordering["x2"],A,b,sigma))); +// expected.push_back(GaussianFactor::shared_ptr(new JacobianFactor(ordering["l1"],A,b,sigma))); +// expected.push_back(GaussianFactor::shared_ptr(new JacobianFactor(ordering["x1"],A,b,sigma))); +// expected.push_back(GaussianFactor::shared_ptr(new JacobianFactor(ordering["x2"],A,b,sigma))); // CHECK(assert_equal(expected,actual)); //} @@ -650,7 +650,7 @@ double error(const VectorValues& x) { Ordering ord; ord += "x2","l1","x1"; GaussianFactorGraph fg = createGaussianFactorGraph(ord); - return fg.error(x); + return gaussianError(fg,x); } ///* ************************************************************************* */ @@ -899,13 +899,13 @@ TEST(GaussianFactorGraph, replace) Ordering ord; ord += "x1","x2","x3","x4","x5","x6"; SharedDiagonal noise(sharedSigma(3, 1.0)); - GaussianFactorGraph::sharedFactor f1(new GaussianFactor( + GaussianFactorGraph::sharedFactor f1(new JacobianFactor( ord["x1"], eye(3,3), ord["x2"], eye(3,3), zero(3), noise)); - GaussianFactorGraph::sharedFactor f2(new GaussianFactor( + GaussianFactorGraph::sharedFactor f2(new JacobianFactor( ord["x2"], eye(3,3), ord["x3"], eye(3,3), zero(3), noise)); - GaussianFactorGraph::sharedFactor f3(new GaussianFactor( + GaussianFactorGraph::sharedFactor f3(new JacobianFactor( ord["x3"], eye(3,3), ord["x4"], eye(3,3), zero(3), noise)); - GaussianFactorGraph::sharedFactor f4(new GaussianFactor( + GaussianFactorGraph::sharedFactor f4(new JacobianFactor( ord["x5"], eye(3,3), ord["x6"], eye(3,3), zero(3), noise)); GaussianFactorGraph actual; @@ -970,9 +970,9 @@ TEST(GaussianFactorGraph, replace) // typedef GaussianFactorGraph::sharedFactor Factor; // SharedDiagonal model(Vector_(1, 0.5)); // GaussianFactorGraph fg; -// Factor factor1(new GaussianFactor("x1", Matrix_(1,1,1.), "x2", Matrix_(1,1,1.), Vector_(1,1.), model)); -// Factor factor2(new GaussianFactor("x2", Matrix_(1,1,1.), "x3", Matrix_(1,1,1.), Vector_(1,1.), model)); -// Factor factor3(new GaussianFactor("x3", Matrix_(1,1,1.), "x3", Matrix_(1,1,1.), Vector_(1,1.), model)); +// Factor factor1(new JacobianFactor("x1", Matrix_(1,1,1.), "x2", Matrix_(1,1,1.), Vector_(1,1.), model)); +// Factor factor2(new JacobianFactor("x2", Matrix_(1,1,1.), "x3", Matrix_(1,1,1.), Vector_(1,1.), model)); +// Factor factor3(new JacobianFactor("x3", Matrix_(1,1,1.), "x3", Matrix_(1,1,1.), Vector_(1,1.), model)); // fg.push_back(factor1); // fg.push_back(factor2); // fg.push_back(factor3); @@ -989,7 +989,7 @@ TEST(GaussianFactorGraph, replace) // bn_expected.push_back(conditional2); // CHECK(assert_equal(bn_expected, bn)); // -// GaussianFactorGraph::sharedFactor factor_expected(new GaussianFactor("x3", Matrix_(1, 1, 2.), Vector_(1, 2.), SharedDiagonal(Vector_(1, 1.)))); +// GaussianFactorGraph::sharedFactor factor_expected(new JacobianFactor("x3", Matrix_(1, 1, 2.), Vector_(1, 2.), SharedDiagonal(Vector_(1, 1.)))); // GaussianFactorGraph fg_expected; // fg_expected.push_back(factor_expected); // CHECK(assert_equal(fg_expected, fg)); diff --git a/tests/testInference.cpp b/tests/testInference.cpp index d2f0b0557..4bcd2d56d 100644 --- a/tests/testInference.cpp +++ b/tests/testInference.cpp @@ -85,7 +85,7 @@ TEST( Inference, marginals2) init.insert(PointKey(0), Point2(1.0,1.0)); Ordering ordering(*fg.orderingCOLAMD(init)); - GaussianFactorGraph::shared_ptr gfg(fg.linearize(init, ordering)); + FactorGraph::shared_ptr gfg(fg.linearize(init, ordering)); GaussianMultifrontalSolver solver(*gfg); solver.marginalFactor(ordering[PointKey(0)]); } diff --git a/tests/testNonlinearEquality.cpp b/tests/testNonlinearEquality.cpp index 48c27a4a3..7d96dd7d9 100644 --- a/tests/testNonlinearEquality.cpp +++ b/tests/testNonlinearEquality.cpp @@ -49,8 +49,8 @@ TEST ( NonlinearEquality, linearization ) { // check linearize SharedDiagonal constraintModel = noiseModel::Constrained::All(3); - GaussianFactor expLF(0, eye(3), zero(3), constraintModel); - GaussianFactor::shared_ptr actualLF = nle->linearize(linearize, *linearize.orderingArbitrary()); + JacobianFactor expLF(0, eye(3), zero(3), constraintModel); + JacobianFactor::shared_ptr actualLF = nle->linearize(linearize, *linearize.orderingArbitrary()); EXPECT(assert_equal(*actualLF, expLF)); } @@ -174,7 +174,7 @@ TEST ( NonlinearEquality, allow_error_pose ) { Matrix A1 = eye(3,3); Vector b = expVec; SharedDiagonal model = noiseModel::Constrained::All(3); - GaussianFactor::shared_ptr expLinFactor(new GaussianFactor(0, A1, b, model)); + GaussianFactor::shared_ptr expLinFactor(new JacobianFactor(0, A1, b, model)); EXPECT(assert_equal(*expLinFactor, *actLinFactor, 1e-5)); } diff --git a/tests/testNonlinearFactor.cpp b/tests/testNonlinearFactor.cpp index c1e1e6c6a..b32defaed 100644 --- a/tests/testNonlinearFactor.cpp +++ b/tests/testNonlinearFactor.cpp @@ -200,12 +200,12 @@ TEST( NonlinearFactor, linearize_constraint1 ) Values config; config.insert(simulated2D::PoseKey(1), Point2(1.0, 2.0)); - GaussianFactor::shared_ptr actual = f0->linearize(config, *config.orderingArbitrary()); + JacobianFactor::shared_ptr actual = f0->linearize(config, *config.orderingArbitrary()); // create expected Ordering ord(*config.orderingArbitrary()); Vector b = Vector_(2, 0., -3.); - GaussianFactor expected(ord["x1"], eye(2), b, constraint); + JacobianFactor expected(ord["x1"], eye(2), b, constraint); CHECK(assert_equal(expected, *actual)); } @@ -221,12 +221,12 @@ TEST( NonlinearFactor, linearize_constraint2 ) Values config; config.insert(simulated2D::PoseKey(1), Point2(1.0, 2.0)); config.insert(simulated2D::PointKey(1), Point2(5.0, 4.0)); - GaussianFactor::shared_ptr actual = f0.linearize(config, *config.orderingArbitrary()); + JacobianFactor::shared_ptr actual = f0.linearize(config, *config.orderingArbitrary()); // create expected Ordering ord(*config.orderingArbitrary()); Vector b = Vector_(2, -3., -3.); - GaussianFactor expected(ord["x1"], -1*eye(2), ord["l1"], eye(2), b, constraint); + JacobianFactor expected(ord["x1"], -1*eye(2), ord["l1"], eye(2), b, constraint); CHECK(assert_equal(expected, *actual)); } diff --git a/tests/testNonlinearFactorGraph.cpp b/tests/testNonlinearFactorGraph.cpp index 0e8eca8eb..bd938a97b 100644 --- a/tests/testNonlinearFactorGraph.cpp +++ b/tests/testNonlinearFactorGraph.cpp @@ -96,8 +96,8 @@ TEST( Graph, linearize ) { Graph fg = createNonlinearFactorGraph(); Values initial = createNoisyValues(); - boost::shared_ptr linearized = fg.linearize(initial, *initial.orderingArbitrary()); - GaussianFactorGraph expected = createGaussianFactorGraph(*initial.orderingArbitrary()); + boost::shared_ptr > linearized = fg.linearize(initial, *initial.orderingArbitrary()); + FactorGraph expected = createGaussianFactorGraph(*initial.orderingArbitrary()); CHECK(assert_equal(expected,*linearized)); // Needs correct linearizations } diff --git a/tests/timeMultifrontalOnDataset.cpp b/tests/timeMultifrontalOnDataset.cpp index 0b329fdc6..9e892f699 100644 --- a/tests/timeMultifrontalOnDataset.cpp +++ b/tests/timeMultifrontalOnDataset.cpp @@ -38,21 +38,26 @@ int main(int argc, char *argv[]) { // Add a prior on the first pose data.first->addPrior(0, Pose2(), sharedSigma(Pose2::Dim(), 0.0005)); - tic_("Z 1 order"); + tic_(1, "order"); Ordering::shared_ptr ordering(data.first->orderingCOLAMD(*data.second)); - toc_("Z 1 order"); + toc_(1, "order"); tictoc_print_(); - tic_("Z 2 linearize"); - GaussianFactorGraph::shared_ptr gfg(data.first->linearize(*data.second, *ordering)); - toc_("Z 2 linearize"); + tic_(2, "linearize"); + GaussianFactorGraph::shared_ptr gfg(data.first->linearize(*data.second, *ordering)->dynamicCastFactors()); + toc_(2, "linearize"); tictoc_print_(); for(size_t trial = 0; trial < 10; ++trial) { - tic_("Z 3 solve"); - VectorValues soln(*GaussianMultifrontalSolver(*gfg).optimize()); - toc_("Z 3 solve"); + tic_(3, "solve"); + tic(1, "construct solver"); + GaussianMultifrontalSolver solver(*gfg); + toc(1, "construct solver"); + tic(2, "optimize"); + VectorValues soln(*solver.optimize()); + toc(2, "optimize"); + toc_(3, "solve"); tictoc_print_(); } diff --git a/tests/timeSequentialOnDataset.cpp b/tests/timeSequentialOnDataset.cpp index 79c32469c..117965736 100644 --- a/tests/timeSequentialOnDataset.cpp +++ b/tests/timeSequentialOnDataset.cpp @@ -38,21 +38,26 @@ int main(int argc, char *argv[]) { // Add a prior on the first pose data.first->addPrior(0, Pose2(), sharedSigma(Pose2::Dim(), 0.0005)); - tic_("Z 1 order"); + tic_(1, "order"); Ordering::shared_ptr ordering(data.first->orderingCOLAMD(*data.second)); - toc_("Z 1 order"); + toc_(1, "order"); tictoc_print_(); - tic_("Z 2 linearize"); - GaussianFactorGraph::shared_ptr gfg(data.first->linearize(*data.second, *ordering)); - toc_("Z 2 linearize"); + tic_(2, "linearize"); + GaussianFactorGraph::shared_ptr gfg(data.first->linearize(*data.second, *ordering)->dynamicCastFactors()); + toc_(2, "linearize"); tictoc_print_(); for(size_t trial = 0; trial < 10; ++trial) { - tic_("Z 3 solve"); - VectorValues soln(*GaussianSequentialSolver(*gfg).optimize()); - toc_("Z 3 solve"); + tic_(3, "solve"); + tic(1, "construct solver"); + GaussianSequentialSolver solver(*gfg); + toc(1, "construct solver"); + tic(2, "optimize"); + VectorValues soln(*solver.optimize()); + toc(2, "optimize"); + toc_(3, "solve"); tictoc_print_(); }