New SymmetricBlockMatrix interface - should be safer as all exposed functions use symmetric matrix expressions and the internal uninitialized values below the diagonal are never exposed.
parent
21297238ba
commit
5f87cbea7f
|
@ -18,6 +18,8 @@
|
|||
|
||||
#include <gtsam/base/SymmetricBlockMatrix.h>
|
||||
#include <gtsam/base/VerticalBlockMatrix.h>
|
||||
#include <gtsam/base/cholesky.h>
|
||||
#include <gtsam/base/timing.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
|
@ -47,4 +49,30 @@ namespace gtsam {
|
|||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VerticalBlockMatrix SymmetricBlockMatrix::choleskyPartial(DenseIndex nFrontals)
|
||||
{
|
||||
// Do dense elimination
|
||||
if(!blockStart() == 0)
|
||||
throw std::invalid_argument("Can only do Cholesky when the SymmetricBlockMatrix is not a restricted view, i.e. when blockStart == 0.");
|
||||
if(!gtsam::choleskyPartial(matrix_, offset(nFrontals)))
|
||||
throw CholeskyFailed();
|
||||
|
||||
// Split conditional
|
||||
|
||||
// Create one big conditionals with many frontal variables.
|
||||
gttic(Construct_eliminated);
|
||||
const size_t varDim = offset(nFrontals);
|
||||
VerticalBlockMatrix Ab = VerticalBlockMatrix::LikeActiveViewOf(*this, varDim);
|
||||
Ab.full() = matrix_.topRows(varDim);
|
||||
Ab.full().triangularView<Eigen::StrictlyLower>().setZero();
|
||||
gttoc(Construct_conditional);
|
||||
|
||||
gttic(Remaining_factor);
|
||||
// Take lower-right block of Ab_ to get the remaining factor
|
||||
blockStart() = nFrontals;
|
||||
gttoc(Remaining_factor);
|
||||
|
||||
return Ab;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/FastVector.h>
|
||||
#include <gtsam/base/SymmetricBlockMatrixBlockExpr.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
|
@ -36,12 +37,12 @@ namespace gtsam {
|
|||
* (except re-setting firstBlock()).
|
||||
*
|
||||
* @addtogroup base */
|
||||
class SymmetricBlockMatrix
|
||||
class GTSAM_EXPORT SymmetricBlockMatrix
|
||||
{
|
||||
public:
|
||||
typedef SymmetricBlockMatrix This;
|
||||
typedef Eigen::Block<Matrix> Block;
|
||||
typedef Eigen::Block<const Matrix> constBlock;
|
||||
typedef SymmetricBlockMatrixBlockExpr<This> Block;
|
||||
typedef SymmetricBlockMatrixBlockExpr<const This> constBlock;
|
||||
|
||||
protected:
|
||||
Matrix matrix_; ///< The full matrix
|
||||
|
@ -50,7 +51,7 @@ namespace gtsam {
|
|||
DenseIndex blockStart_; ///< Changes apparent matrix view, see main class comment.
|
||||
|
||||
public:
|
||||
/** Construct from an empty matrix (asserts that the matrix is empty) */
|
||||
/// Construct from an empty matrix (asserts that the matrix is empty)
|
||||
SymmetricBlockMatrix() :
|
||||
blockStart_(0)
|
||||
{
|
||||
|
@ -58,7 +59,7 @@ namespace gtsam {
|
|||
assertInvariants();
|
||||
}
|
||||
|
||||
/** Construct from a container of the sizes of each block. */
|
||||
/// Construct from a container of the sizes of each block.
|
||||
template<typename CONTAINER>
|
||||
SymmetricBlockMatrix(const CONTAINER& dimensions) :
|
||||
blockStart_(0)
|
||||
|
@ -68,8 +69,7 @@ namespace gtsam {
|
|||
assertInvariants();
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct from iterator over the sizes of each vertical block. */
|
||||
/// Construct from iterator over the sizes of each vertical block.
|
||||
template<typename ITERATOR>
|
||||
SymmetricBlockMatrix(ITERATOR firstBlockDim, ITERATOR lastBlockDim) :
|
||||
blockStart_(0)
|
||||
|
@ -79,11 +79,13 @@ namespace gtsam {
|
|||
assertInvariants();
|
||||
}
|
||||
|
||||
/** Construct from a container of the sizes of each vertical block and a pre-prepared matrix. */
|
||||
/// Construct from a container of the sizes of each vertical block and a pre-prepared matrix.
|
||||
template<typename CONTAINER>
|
||||
SymmetricBlockMatrix(const CONTAINER& dimensions, const Matrix& matrix) :
|
||||
matrix_(matrix), blockStart_(0)
|
||||
blockStart_(0)
|
||||
{
|
||||
matrix_.resize(matrix.rows(), matrix.cols());
|
||||
matrix_.triangularView<Eigen::Upper>() = matrix.triangularView<Eigen::Upper>();
|
||||
fillOffsets(dimensions.begin(), dimensions.end());
|
||||
if(matrix_.rows() != matrix_.cols())
|
||||
throw std::invalid_argument("Requested to create a SymmetricBlockMatrix from a non-square matrix.");
|
||||
|
@ -92,118 +94,195 @@ namespace gtsam {
|
|||
assertInvariants();
|
||||
}
|
||||
|
||||
/** Copy the block structure, but do not copy the matrix data. If blockStart() has been
|
||||
* modified, this copies the structure of the corresponding matrix view. In the destination
|
||||
* SymmetricBlockMatrix, blockStart() will be 0. */
|
||||
/// Copy the block structure, but do not copy the matrix data. If blockStart() has been
|
||||
/// modified, this copies the structure of the corresponding matrix view. In the destination
|
||||
/// SymmetricBlockMatrix, blockStart() will be 0.
|
||||
static SymmetricBlockMatrix LikeActiveViewOf(const SymmetricBlockMatrix& other);
|
||||
|
||||
/** Copy the block structure, but do not copy the matrix data. If blockStart() has been
|
||||
* modified, this copies the structure of the corresponding matrix view. In the destination
|
||||
* SymmetricBlockMatrix, blockStart() will be 0. */
|
||||
/// Copy the block structure, but do not copy the matrix data. If blockStart() has been
|
||||
/// modified, this copies the structure of the corresponding matrix view. In the destination
|
||||
/// SymmetricBlockMatrix, blockStart() will be 0.
|
||||
static SymmetricBlockMatrix LikeActiveViewOf(const VerticalBlockMatrix& other);
|
||||
|
||||
/** Row size */
|
||||
/// Row size
|
||||
DenseIndex rows() const { assertInvariants(); return variableColOffsets_.back() - variableColOffsets_[blockStart_]; }
|
||||
|
||||
/** Column size */
|
||||
/// Column size
|
||||
DenseIndex cols() const { return rows(); }
|
||||
|
||||
|
||||
/** Block count */
|
||||
/// Block count
|
||||
DenseIndex nBlocks() const { assertInvariants(); return variableColOffsets_.size() - 1 - blockStart_; }
|
||||
|
||||
/// Access the block with vertical block index \c i_block and horizontal block index \c j_block.
|
||||
/// Note that the actual block accessed in the underlying matrix is relative to blockStart().
|
||||
Block operator()(DenseIndex i_block, DenseIndex j_block) {
|
||||
return range(i_block, i_block+1, j_block, j_block+1);
|
||||
return Block(*this, i_block, j_block);
|
||||
}
|
||||
|
||||
/// Access the block with vertical block index \c i_block and horizontal block index \c j_block.
|
||||
/// Note that the actual block accessed in the underlying matrix is relative to blockStart().
|
||||
constBlock operator()(DenseIndex i_block, DenseIndex j_block) const {
|
||||
return range(i_block, i_block+1, j_block, j_block+1);
|
||||
return constBlock(*this, i_block, j_block);
|
||||
}
|
||||
|
||||
/// Access the range of blocks starting with vertical block index \c i_startBlock, ending with
|
||||
/// vertical block index \c i_endBlock, starting with horizontal block index \c j_startBlock,
|
||||
/// and ending with horizontal block index \c j_endBlock. End block indices are exclusive. Note
|
||||
/// that the actual blocks accessed in the underlying matrix are relative to blockStart().
|
||||
Block range(DenseIndex i_startBlock, DenseIndex i_endBlock, DenseIndex j_startBlock, DenseIndex j_endBlock) {
|
||||
assertInvariants();
|
||||
DenseIndex i_actualStartBlock = i_startBlock + blockStart_;
|
||||
DenseIndex i_actualEndBlock = i_endBlock + blockStart_;
|
||||
DenseIndex j_actualStartBlock = j_startBlock + blockStart_;
|
||||
DenseIndex j_actualEndBlock = j_endBlock + blockStart_;
|
||||
checkBlock(i_actualStartBlock);
|
||||
checkBlock(j_actualStartBlock);
|
||||
if(i_startBlock != 0 || i_endBlock != 0) {
|
||||
checkBlock(i_actualStartBlock);
|
||||
assert(i_actualEndBlock < (DenseIndex)variableColOffsets_.size());
|
||||
}
|
||||
if(j_startBlock != 0 || j_endBlock != 0) {
|
||||
checkBlock(j_actualStartBlock);
|
||||
assert(j_actualEndBlock < (DenseIndex)variableColOffsets_.size());
|
||||
}
|
||||
return matrix_.block(
|
||||
variableColOffsets_[i_actualStartBlock], variableColOffsets_[j_actualStartBlock],
|
||||
variableColOffsets_[i_actualEndBlock] - variableColOffsets_[i_actualStartBlock],
|
||||
variableColOffsets_[j_actualEndBlock] - variableColOffsets_[j_actualStartBlock]);
|
||||
return Block(*this, i_startBlock, j_startBlock, i_endBlock - i_startBlock, j_endBlock - j_startBlock);
|
||||
}
|
||||
|
||||
/// Access the range of blocks starting with vertical block index \c i_startBlock, ending with
|
||||
/// vertical block index \c i_endBlock, starting with horizontal block index \c j_startBlock,
|
||||
/// and ending with horizontal block index \c j_endBlock. End block indices are exclusive. Note
|
||||
/// that the actual blocks accessed in the underlying matrix are relative to blockStart().
|
||||
constBlock range(DenseIndex i_startBlock, DenseIndex i_endBlock, DenseIndex j_startBlock, DenseIndex j_endBlock) const {
|
||||
assertInvariants();
|
||||
DenseIndex i_actualStartBlock = i_startBlock + blockStart_;
|
||||
DenseIndex i_actualEndBlock = i_endBlock + blockStart_;
|
||||
DenseIndex j_actualStartBlock = j_startBlock + blockStart_;
|
||||
DenseIndex j_actualEndBlock = j_endBlock + blockStart_;
|
||||
if(i_startBlock != 0 || i_endBlock != 0) {
|
||||
checkBlock(i_actualStartBlock);
|
||||
assert(i_actualEndBlock < (DenseIndex)variableColOffsets_.size());
|
||||
}
|
||||
if(j_startBlock != 0 || j_endBlock != 0) {
|
||||
checkBlock(j_actualStartBlock);
|
||||
assert(j_actualEndBlock < (DenseIndex)variableColOffsets_.size());
|
||||
}
|
||||
return matrix_.block(
|
||||
variableColOffsets_[i_actualStartBlock], variableColOffsets_[j_actualStartBlock],
|
||||
variableColOffsets_[i_actualEndBlock] - variableColOffsets_[i_actualStartBlock],
|
||||
variableColOffsets_[j_actualEndBlock] - variableColOffsets_[j_actualStartBlock]);
|
||||
return constBlock(*this, i_startBlock, j_startBlock, i_endBlock - i_startBlock, j_endBlock - j_startBlock);
|
||||
}
|
||||
|
||||
/** Return the full matrix, *not* including any portions excluded by firstBlock(). */
|
||||
Block full() {
|
||||
return range(0,nBlocks(), 0,nBlocks());
|
||||
Block full()
|
||||
{
|
||||
return Block(*this, 0, nBlocks(), 0);
|
||||
}
|
||||
|
||||
/** Return the full matrix, *not* including any portions excluded by firstBlock(). */
|
||||
constBlock full() const {
|
||||
return range(0,nBlocks(), 0,nBlocks());
|
||||
constBlock full() const
|
||||
{
|
||||
return constBlock(*this, 0, nBlocks(), 0);
|
||||
}
|
||||
|
||||
/** Access to full matrix, including any portions excluded by firstBlock() to other operations. */
|
||||
const Matrix& matrix() const { return matrix_; }
|
||||
Eigen::SelfAdjointView<const Matrix, Eigen::Upper> matrix() const
|
||||
{
|
||||
return matrix_;
|
||||
}
|
||||
|
||||
/** Access to full matrix, including any portions excluded by firstBlock() to other operations. */
|
||||
Matrix& matrix() { return matrix_; }
|
||||
Eigen::SelfAdjointView<Matrix, Eigen::Upper> matrix()
|
||||
{
|
||||
return matrix_;
|
||||
}
|
||||
|
||||
DenseIndex offset(DenseIndex block) const {
|
||||
/// Return the absolute offset in the underlying matrix of the start of the specified \c block.
|
||||
DenseIndex offset(DenseIndex block) const
|
||||
{
|
||||
assertInvariants();
|
||||
DenseIndex actualBlock = block + blockStart_;
|
||||
checkBlock(actualBlock);
|
||||
return variableColOffsets_[actualBlock];
|
||||
}
|
||||
|
||||
/// Retrieve or modify the first logical block, i.e. the block referenced by block index 0.
|
||||
/// Blocks before it will be inaccessible, except by accessing the underlying matrix using
|
||||
/// matrix().
|
||||
DenseIndex& blockStart() { return blockStart_; }
|
||||
|
||||
/// Retrieve the first logical block, i.e. the block referenced by block index 0. Blocks before
|
||||
/// it will be inaccessible, except by accessing the underlying matrix using matrix().
|
||||
DenseIndex blockStart() const { return blockStart_; }
|
||||
|
||||
/// Do partial Cholesky in-place and return the eliminated block matrix, leaving the remaining
|
||||
/// symmetric matrix in place.
|
||||
VerticalBlockMatrix choleskyPartial(DenseIndex nFrontals);
|
||||
|
||||
protected:
|
||||
void assertInvariants() const {
|
||||
void assertInvariants() const
|
||||
{
|
||||
assert(matrix_.rows() == matrix_.cols());
|
||||
assert(matrix_.cols() == variableColOffsets_.back());
|
||||
assert(blockStart_ < (DenseIndex)variableColOffsets_.size());
|
||||
}
|
||||
|
||||
void checkBlock(DenseIndex block) const {
|
||||
void checkBlock(DenseIndex block) const
|
||||
{
|
||||
assert(matrix_.rows() == matrix_.cols());
|
||||
assert(matrix_.cols() == variableColOffsets_.back());
|
||||
assert(block >= 0);
|
||||
assert(block < (DenseIndex)variableColOffsets_.size()-1);
|
||||
assert(variableColOffsets_[block] < matrix_.cols() && variableColOffsets_[block+1] <= matrix_.cols());
|
||||
}
|
||||
|
||||
DenseIndex offsetUnchecked(DenseIndex block) const
|
||||
{
|
||||
return variableColOffsets_[block + blockStart_];
|
||||
}
|
||||
|
||||
//void checkRange(DenseIndex i_startBlock, DenseIndex i_endBlock, DenseIndex j_startBlock, DenseIndex j_endBlock) const
|
||||
//{
|
||||
// const DenseIndex i_actualStartBlock = i_startBlock + blockStart_;
|
||||
// const DenseIndex i_actualEndBlock = i_endBlock + blockStart_;
|
||||
// const DenseIndex j_actualStartBlock = j_startBlock + blockStart_;
|
||||
// const DenseIndex j_actualEndBlock = j_endBlock + blockStart_;
|
||||
// checkBlock(i_actualStartBlock);
|
||||
// checkBlock(j_actualStartBlock);
|
||||
// if(i_startBlock != 0 || i_endBlock != 0) {
|
||||
// checkBlock(i_actualStartBlock);
|
||||
// assert(i_actualEndBlock < (DenseIndex)variableColOffsets_.size());
|
||||
// }
|
||||
// if(j_startBlock != 0 || j_endBlock != 0) {
|
||||
// checkBlock(j_actualStartBlock);
|
||||
// assert(j_actualEndBlock < (DenseIndex)variableColOffsets_.size());
|
||||
// }
|
||||
//}
|
||||
|
||||
//void checkRange(DenseIndex startBlock, DenseIndex endBlock) const
|
||||
//{
|
||||
// const DenseIndex actualStartBlock = startBlock + blockStart_;
|
||||
// const DenseIndex actualEndBlock = endBlock + blockStart_;
|
||||
// checkBlock(actualStartBlock);
|
||||
// if(startBlock != 0 || endBlock != 0) {
|
||||
// checkBlock(actualStartBlock);
|
||||
// assert(actualEndBlock < (DenseIndex)variableColOffsets_.size());
|
||||
// }
|
||||
//}
|
||||
|
||||
//Block rangeUnchecked(DenseIndex i_startBlock, DenseIndex i_endBlock, DenseIndex j_startBlock, DenseIndex j_endBlock)
|
||||
//{
|
||||
// const DenseIndex i_actualStartBlock = i_startBlock + blockStart_;
|
||||
// const DenseIndex i_actualEndBlock = i_endBlock + blockStart_;
|
||||
// const DenseIndex j_actualStartBlock = j_startBlock + blockStart_;
|
||||
// const DenseIndex j_actualEndBlock = j_endBlock + blockStart_;
|
||||
|
||||
// return Block(matrix(),
|
||||
// variableColOffsets_[i_actualStartBlock],
|
||||
// variableColOffsets_[j_actualStartBlock],
|
||||
// variableColOffsets_[i_actualEndBlock] - variableColOffsets_[i_actualStartBlock],
|
||||
// variableColOffsets_[j_actualEndBlock] - variableColOffsets_[j_actualStartBlock]);
|
||||
//}
|
||||
|
||||
//constBlock rangeUnchecked(DenseIndex i_startBlock, DenseIndex i_endBlock, DenseIndex j_startBlock, DenseIndex j_endBlock) const
|
||||
//{
|
||||
// // Convert Block to constBlock
|
||||
// const Block block = const_cast<This*>(this)->rangeUnchecked(i_startBlock, i_endBlock, j_startBlock, j_endBlock);
|
||||
// return constBlock(matrix(), block.Base::Base::, block.startCol(), block.rows(), block.cols());
|
||||
//}
|
||||
|
||||
//Block rangeUnchecked(DenseIndex startBlock, DenseIndex endBlock)
|
||||
//{
|
||||
// const DenseIndex actualStartBlock = startBlock + blockStart_;
|
||||
// const DenseIndex actualEndBlock = endBlock + blockStart_;
|
||||
|
||||
// return Block(matrix(),
|
||||
// variableColOffsets_[actualStartBlock],
|
||||
// variableColOffsets_[actualStartBlock],
|
||||
// variableColOffsets_[actualEndBlock] - variableColOffsets_[actualStartBlock],
|
||||
// variableColOffsets_[actualEndBlock] - variableColOffsets_[actualStartBlock]);
|
||||
//}
|
||||
|
||||
//constBlock rangeUnchecked(DenseIndex startBlock, DenseIndex endBlock) const
|
||||
//{
|
||||
// // Convert Block to constBlock
|
||||
// const Block block = const_cast<This*>(this)->rangeUnchecked(startBlock, endBlock);
|
||||
// return constBlock(matrix(), block.startRow(), block.startCol(), block.rows(), block.cols());
|
||||
//}
|
||||
|
||||
template<typename ITERATOR>
|
||||
void fillOffsets(ITERATOR firstBlockDim, ITERATOR lastBlockDim) {
|
||||
void fillOffsets(ITERATOR firstBlockDim, ITERATOR lastBlockDim)
|
||||
{
|
||||
variableColOffsets_.resize((lastBlockDim-firstBlockDim)+1);
|
||||
variableColOffsets_[0] = 0;
|
||||
DenseIndex j=0;
|
||||
|
@ -214,6 +293,7 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
friend class VerticalBlockMatrix;
|
||||
template<typename SymmetricBlockMatrixType> friend class SymmetricBlockMatrixBlockExpr;
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
|
@ -226,5 +306,13 @@ namespace gtsam {
|
|||
}
|
||||
};
|
||||
|
||||
/* ************************************************************************* */
|
||||
class CholeskyFailed : public gtsam::ThreadsafeException<CholeskyFailed>
|
||||
{
|
||||
public:
|
||||
CholeskyFailed() throw() {}
|
||||
virtual ~CholeskyFailed() throw() {}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,337 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SymmetricBlockMatrixBlockExpr.h
|
||||
* @brief Matrix expression for a block of a SymmetricBlockMatrix
|
||||
* @author Richard Roberts
|
||||
* @date Nov 20, 2013
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
|
||||
namespace gtsam { template<typename SymmetricBlockMatrixType> class SymmetricBlockMatrixBlockExpr; }
|
||||
namespace gtsam { class SymmetricBlockMatrix; }
|
||||
|
||||
// traits class for Eigen expressions
|
||||
namespace Eigen
|
||||
{
|
||||
namespace internal
|
||||
{
|
||||
template<typename SymmetricBlockMatrixType>
|
||||
struct traits<gtsam::SymmetricBlockMatrixBlockExpr<SymmetricBlockMatrixType> > :
|
||||
public traits<typename gtsam::const_selector<
|
||||
SymmetricBlockMatrixType, gtsam::SymmetricBlockMatrix, gtsam::Matrix, const gtsam::Matrix>::type>
|
||||
{
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
namespace gtsam
|
||||
{
|
||||
/// A matrix expression that references a single block of a SymmetricBlockMatrix. Depending on
|
||||
/// the position of the block, this expression will behave either as a regular matrix block, a
|
||||
/// transposed matrix block, or a symmetric matrix block. The only reason this class is templated
|
||||
/// on SymmetricBlockMatrixType is to allow for both const and non-const references.
|
||||
template<typename SymmetricBlockMatrixType>
|
||||
class SymmetricBlockMatrixBlockExpr : public Eigen::EigenBase<SymmetricBlockMatrixBlockExpr<SymmetricBlockMatrixType> >
|
||||
{
|
||||
protected:
|
||||
SymmetricBlockMatrixType& xpr_; ///< The referenced SymmetricBlockMatrix
|
||||
DenseIndex densei_; ///< The scalar indices of the referenced block
|
||||
DenseIndex densej_; ///< The scalar indices of the referenced block
|
||||
DenseIndex denseRows_; ///< The scalar size of the referenced block
|
||||
DenseIndex denseCols_; ///< The scalar size of the referenced block
|
||||
enum BlockType { Plain, SelfAdjoint, Transposed } blockType_; ///< The type of the referenced block, as determined by the block position
|
||||
typedef SymmetricBlockMatrixBlockExpr<SymmetricBlockMatrixType> This;
|
||||
|
||||
public:
|
||||
// Typedefs and constants used in Eigen
|
||||
typedef typename const_selector<SymmetricBlockMatrixType, SymmetricBlockMatrix,
|
||||
typename Eigen::internal::traits<This>::Scalar&, typename Eigen::internal::traits<This>::Scalar>::type ScalarRef;
|
||||
typedef typename Eigen::internal::traits<This>::Scalar Scalar;
|
||||
typedef typename Eigen::internal::traits<This>::Index Index;
|
||||
static const Index ColsAtCompileTime = Eigen::Dynamic;
|
||||
static const Index RowsAtCompileTime = Eigen::Dynamic;
|
||||
|
||||
typedef typename const_selector<SymmetricBlockMatrixType, SymmetricBlockMatrix, Matrix, const Matrix>::type
|
||||
DenseMatrixType;
|
||||
|
||||
typedef Eigen::Map<DenseMatrixType, 0, Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic> > OffDiagonal;
|
||||
typedef Eigen::SelfAdjointView<Eigen::Block<DenseMatrixType>, Eigen::Upper> SelfAdjointView;
|
||||
typedef Eigen::TriangularView<Eigen::Block<DenseMatrixType>, Eigen::Upper> TriangularView;
|
||||
|
||||
protected:
|
||||
mutable Eigen::Block<DenseMatrixType> myBlock_;
|
||||
template<typename OtherSymmetricBlockMatrixType> friend class SymmetricBlockMatrixBlockExpr;
|
||||
|
||||
public:
|
||||
/// Create a SymmetricBlockMatrixBlockExpr from the specified block of a SymmetricBlockMatrix.
|
||||
SymmetricBlockMatrixBlockExpr(SymmetricBlockMatrixType& blockMatrix, Index iBlock, Index jBlock) :
|
||||
xpr_(blockMatrix), myBlock_(blockMatrix.matrix_.block(0, 0, 0, 0))
|
||||
{
|
||||
initIndices(iBlock, jBlock);
|
||||
}
|
||||
|
||||
/// Create a SymmetricBlockMatrixBlockExpr from the specified range of blocks of a
|
||||
/// SymmetricBlockMatrix.
|
||||
SymmetricBlockMatrixBlockExpr(SymmetricBlockMatrixType& blockMatrix,
|
||||
Index firstRowBlock, Index firstColBlock, Index rowBlocks, Index colBlocks) :
|
||||
xpr_(blockMatrix), myBlock_(blockMatrix.matrix_.block(0, 0, 0, 0))
|
||||
{
|
||||
initIndices(firstRowBlock, firstColBlock, rowBlocks, colBlocks);
|
||||
}
|
||||
|
||||
/// Create a SymmetricBlockMatrixBlockExpr from the specified range of blocks of a
|
||||
/// SymmetricBlockMatrix.
|
||||
SymmetricBlockMatrixBlockExpr(SymmetricBlockMatrixType& blockMatrix, Index firstBlock, Index blocks, char dummy) :
|
||||
xpr_(blockMatrix), myBlock_(blockMatrix.matrix_.block(0, 0, 0, 0))
|
||||
{
|
||||
initIndices(firstBlock, firstBlock, blocks, blocks);
|
||||
}
|
||||
|
||||
inline Index rows() const { return blockType_ != Transposed ? denseRows_ : denseCols_; }
|
||||
inline Index cols() const { return blockType_ != Transposed ? denseCols_ : denseRows_; }
|
||||
|
||||
inline BlockType blockType() const { return blockType_; }
|
||||
|
||||
inline ScalarRef operator()(Index row, Index col) const
|
||||
{
|
||||
return coeffInternal<ScalarRef>(row, col);
|
||||
}
|
||||
|
||||
inline OffDiagonal knownOffDiagonal() const
|
||||
{
|
||||
typedef Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic> DynamicStride;
|
||||
|
||||
// We can return a Map if we are either on an off-diagonal block, or a block of size 0 or 1
|
||||
assert(blockType_ != SelfAdjoint || (denseRows_ <= 1 && denseCols_ <= 1));
|
||||
if(blockType_ == Transposed)
|
||||
{
|
||||
// Swap the inner and outer stride to produce a transposed Map
|
||||
Eigen::Block<DenseMatrixType> block = const_cast<This&>(*this).xpr_.matrix_.block(densei_, densej_, denseRows_, denseCols_);
|
||||
return Eigen::Map<DenseMatrixType, 0, DynamicStride>(block.data(), block.cols(), block.rows(),
|
||||
DynamicStride(block.innerStride(), block.outerStride()));
|
||||
}
|
||||
else
|
||||
{
|
||||
Eigen::Block<DenseMatrixType> block = const_cast<This&>(*this).xpr_.matrix_.block(densei_, densej_, denseRows_, denseCols_);
|
||||
return Eigen::Map<DenseMatrixType, 0, DynamicStride>(block.data(), block.rows(), block.cols(),
|
||||
DynamicStride(block.outerStride(), block.innerStride()));
|
||||
}
|
||||
}
|
||||
|
||||
inline SelfAdjointView selfadjointView() const
|
||||
{
|
||||
assert(blockType_ == SelfAdjoint);
|
||||
return myBlock_;
|
||||
}
|
||||
|
||||
inline TriangularView triangularView() const
|
||||
{
|
||||
assert(blockType_ == SelfAdjoint);
|
||||
return myBlock_;
|
||||
}
|
||||
|
||||
template<typename Dest> inline void evalTo(Dest& dst) const
|
||||
{
|
||||
// Just try to assign to the object using either a selfadjoint view or a block view
|
||||
if(blockType_ == SelfAdjoint)
|
||||
dst = selfadjointView();
|
||||
else if(blockType_ == Plain)
|
||||
dst = myBlock_;
|
||||
else
|
||||
dst = myBlock_.transpose();
|
||||
}
|
||||
|
||||
//template<typename MatrixType> inline void evalTo(const Eigen::SelfAdjointView<MatrixType, Eigen::Upper>& rhs) const
|
||||
//{
|
||||
// if(blockType_ == SelfAdjoint)
|
||||
// rhs.nestedExpression().triangularView<Eigen::Upper>() = triangularView();
|
||||
// else
|
||||
// throw std::invalid_argument("Cannot assign an off-diagonal block to a self-adjoint matrix");
|
||||
//}
|
||||
|
||||
//template<typename MatrixType> inline void evalTo(const Eigen::TriangularView<MatrixType, Eigen::Upper>& rhs) const
|
||||
//{
|
||||
// if(blockType_ == SelfAdjoint)
|
||||
// rhs.nestedExpression().triangularView<Eigen::Upper>() = triangularView();
|
||||
// else
|
||||
// throw std::invalid_argument("Cannot assign an off-diagonal block to a self-adjoint matrix");
|
||||
//}
|
||||
|
||||
template<typename RhsDerived>
|
||||
This& operator=(const Eigen::MatrixBase<RhsDerived>& rhs)
|
||||
{
|
||||
// Just try to assign to the object using either a selfadjoint view or a block view
|
||||
if(blockType_ == SelfAdjoint)
|
||||
triangularView() = rhs.derived().template triangularView<Eigen::Upper>();
|
||||
else if(blockType_ == Plain)
|
||||
myBlock_ = rhs.derived();
|
||||
else
|
||||
myBlock_.transpose() = rhs.derived();
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename MatrixType>
|
||||
This& operator=(const Eigen::SelfAdjointView<MatrixType, Eigen::Upper>& rhs)
|
||||
{
|
||||
if(blockType_ == SelfAdjoint)
|
||||
triangularView() = rhs.nestedExpression().template triangularView<Eigen::Upper>();
|
||||
else
|
||||
throw std::invalid_argument("Cannot assign a self-adjoint matrix to an off-diagonal block");
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename OtherSymmetricBlockMatrixType>
|
||||
This& operator=(const SymmetricBlockMatrixBlockExpr<OtherSymmetricBlockMatrixType>& other)
|
||||
{
|
||||
_doAssign(other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
This& operator=(const This& other)
|
||||
{
|
||||
// This version is required so GCC doesn't synthesize a default operator=.
|
||||
_doAssign(other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename OtherSymmetricBlockMatrixType>
|
||||
This& operator+=(const SymmetricBlockMatrixBlockExpr<OtherSymmetricBlockMatrixType>& other)
|
||||
{
|
||||
if(blockType_ == SelfAdjoint)
|
||||
{
|
||||
assert(other.blockType() == SelfAdjoint);
|
||||
triangularView() += other.triangularView().nestedExpression();
|
||||
}
|
||||
else if(blockType_ == Plain)
|
||||
{
|
||||
assert(other.blockType() == Plain || other.blockType() == Transposed);
|
||||
if(other.blockType() == Transposed)
|
||||
myBlock_ += other.myBlock_.transpose();
|
||||
else
|
||||
myBlock_ += other.myBlock_;
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(other.blockType() == Plain || other.blockType() == Transposed);
|
||||
if(other.blockType() == Transposed)
|
||||
myBlock_.transpose() += other.myBlock_.transpose();
|
||||
else
|
||||
myBlock_.transpose() += other.myBlock_;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
void initIndices(Index iBlock, Index jBlock, Index blockRows = 1, Index blockCols = 1)
|
||||
{
|
||||
if(iBlock == jBlock && blockRows == blockCols)
|
||||
{
|
||||
densei_ = xpr_.offset(iBlock);
|
||||
densej_ = densei_;
|
||||
if(blockRows > 0)
|
||||
xpr_.checkBlock(iBlock + blockRows - 1);
|
||||
denseRows_ = xpr_.offsetUnchecked(iBlock + blockRows) - densei_;
|
||||
if(blockCols > 0)
|
||||
xpr_.checkBlock(jBlock + blockCols - 1);
|
||||
denseCols_ = xpr_.offsetUnchecked(jBlock + blockCols) - densej_;
|
||||
blockType_ = SelfAdjoint;
|
||||
}
|
||||
else
|
||||
{
|
||||
if(jBlock > iBlock || (iBlock == jBlock && blockCols > blockRows))
|
||||
{
|
||||
densei_ = xpr_.offset(iBlock);
|
||||
densej_ = xpr_.offset(jBlock);
|
||||
if(blockRows > 0)
|
||||
xpr_.checkBlock(iBlock + blockRows - 1);
|
||||
denseRows_ = xpr_.offsetUnchecked(iBlock + blockRows) - densei_;
|
||||
if(blockCols > 0)
|
||||
xpr_.checkBlock(jBlock + blockCols - 1);
|
||||
denseCols_ = xpr_.offsetUnchecked(jBlock + blockCols) - densej_;
|
||||
blockType_ = Plain;
|
||||
}
|
||||
else
|
||||
{
|
||||
densei_ = xpr_.offset(jBlock);
|
||||
densej_ = xpr_.offset(iBlock);
|
||||
if(blockCols > 0)
|
||||
xpr_.checkBlock(jBlock + blockCols - 1);
|
||||
denseRows_ = xpr_.offsetUnchecked(jBlock + blockCols) - densei_;
|
||||
if(blockRows > 0)
|
||||
xpr_.checkBlock(iBlock + blockRows - 1);
|
||||
denseCols_ = xpr_.offsetUnchecked(iBlock + blockRows) - densej_;
|
||||
blockType_ = Transposed;
|
||||
}
|
||||
|
||||
// Validate that the block does not cross below the diagonal (the indices have already been
|
||||
// flipped above the diagonal for ranges starting below the diagonal).
|
||||
if(densei_ + denseRows_ > densej_ + 1)
|
||||
throw std::invalid_argument("Off-diagonal block ranges may not cross the diagonal");
|
||||
}
|
||||
|
||||
new (&myBlock_) Eigen::Block<DenseMatrixType>(xpr_.matrix_.block(densei_, densej_, denseRows_, denseCols_));
|
||||
}
|
||||
|
||||
template<typename ScalarType>
|
||||
inline ScalarType coeffInternal(Index row, Index col) const
|
||||
{
|
||||
// We leave index checking up to the Block class
|
||||
if(blockType_ == Plain)
|
||||
{
|
||||
return myBlock_(row, col);
|
||||
}
|
||||
else if(blockType_ == SelfAdjoint)
|
||||
{
|
||||
if(row <= col)
|
||||
return myBlock_(row, col);
|
||||
else
|
||||
return myBlock_.transpose()(row, col);
|
||||
}
|
||||
else
|
||||
{
|
||||
return myBlock_.transpose()(row, col);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename OtherSymmetricBlockMatrixType>
|
||||
void _doAssign(const SymmetricBlockMatrixBlockExpr<OtherSymmetricBlockMatrixType>& other)
|
||||
{
|
||||
if(blockType_ == SelfAdjoint)
|
||||
{
|
||||
assert(other.blockType() == SelfAdjoint);
|
||||
triangularView() = other.triangularView().nestedExpression();
|
||||
}
|
||||
else if(blockType_ == Plain)
|
||||
{
|
||||
assert(other.blockType() == Plain || other.blockType() == Transposed);
|
||||
if(other.blockType() == Transposed)
|
||||
myBlock_ = other.myBlock_.transpose();
|
||||
else
|
||||
myBlock_ = other.myBlock_;
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(other.blockType() == Plain || other.blockType() == Transposed);
|
||||
if(other.blockType() == Transposed)
|
||||
myBlock_.transpose() = other.myBlock_.transpose();
|
||||
else
|
||||
myBlock_.transpose() = other.myBlock_;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
}
|
|
@ -0,0 +1,188 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testSymmetricBlockMatrix.cpp
|
||||
* @brief Unit tests for SymmetricBlockMatrix class
|
||||
* @author Richard Roberts
|
||||
**/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/SymmetricBlockMatrix.h>
|
||||
#include <boost/assign/list_of.hpp>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
using boost::assign::list_of;
|
||||
|
||||
static SymmetricBlockMatrix testBlockMatrix(
|
||||
list_of(3)(2)(1),
|
||||
(Mat(6, 6) <<
|
||||
1, 2, 3, 4, 5, 6,
|
||||
2, 8, 9, 10, 11, 12,
|
||||
3, 9, 15, 16, 17, 18,
|
||||
4, 10, 16, 22, 23, 24,
|
||||
5, 11, 17, 23, 29, 30,
|
||||
6, 12, 18, 24, 30, 36));
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SymmetricBlockMatrix, ReadBlocks)
|
||||
{
|
||||
// On the diagonal
|
||||
Matrix expected1 = (Mat(2, 2) <<
|
||||
22, 23,
|
||||
23, 29);
|
||||
Matrix actual1 = testBlockMatrix(1, 1);
|
||||
// Test only writing the upper triangle for efficiency
|
||||
Matrix actual1t = Matrix::Zero(2, 2);
|
||||
actual1t.triangularView<Eigen::Upper>() = testBlockMatrix(1, 1).triangularView();
|
||||
EXPECT(assert_equal(expected1, actual1));
|
||||
EXPECT(assert_equal(Matrix(expected1.triangularView<Eigen::Upper>()), actual1t));
|
||||
|
||||
// Above the diagonal
|
||||
Matrix expected2 = (Mat(3, 2) <<
|
||||
4, 5,
|
||||
10, 11,
|
||||
16, 17);
|
||||
Matrix actual2 = testBlockMatrix(0, 1);
|
||||
EXPECT(assert_equal(expected2, actual2));
|
||||
|
||||
// Below the diagonal
|
||||
Matrix expected3 = (Mat(2, 3) <<
|
||||
4, 10, 16,
|
||||
5, 11, 17);
|
||||
Matrix actual3 = testBlockMatrix(1, 0);
|
||||
EXPECT(assert_equal(expected3, actual3));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SymmetricBlockMatrix, WriteBlocks)
|
||||
{
|
||||
// On the diagonal
|
||||
Matrix expected1 = testBlockMatrix(1, 1);
|
||||
SymmetricBlockMatrix bm1 = SymmetricBlockMatrix::LikeActiveViewOf(testBlockMatrix);
|
||||
bm1(1, 1) = expected1.selfadjointView<Eigen::Upper>(); // Verified with debugger that this only writes the upper triangle
|
||||
Matrix actual1 = bm1(1, 1);
|
||||
EXPECT(assert_equal(expected1, actual1));
|
||||
|
||||
// On the diagonal
|
||||
Matrix expected1p = testBlockMatrix(1, 1);
|
||||
SymmetricBlockMatrix bm1p = SymmetricBlockMatrix::LikeActiveViewOf(testBlockMatrix);
|
||||
bm1p(1, 1) = expected1p; // Verified with debugger that this only writes the upper triangle
|
||||
Matrix actual1p = bm1p(1, 1);
|
||||
EXPECT(assert_equal(expected1p, actual1p));
|
||||
|
||||
// Above the diagonal
|
||||
Matrix expected2 = testBlockMatrix(0, 1);
|
||||
SymmetricBlockMatrix bm2 = SymmetricBlockMatrix::LikeActiveViewOf(testBlockMatrix);
|
||||
bm2(0, 1) = expected2;
|
||||
Matrix actual2 = bm2(0, 1);
|
||||
EXPECT(assert_equal(expected2, actual2));
|
||||
|
||||
// Below the diagonal
|
||||
Matrix expected3 = testBlockMatrix(1, 0);
|
||||
SymmetricBlockMatrix bm3 = SymmetricBlockMatrix::LikeActiveViewOf(testBlockMatrix);
|
||||
bm3(1, 0) = expected3;
|
||||
Matrix actual3 = bm3(1, 0);
|
||||
EXPECT(assert_equal(expected3, actual3));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SymmetricBlockMatrix, Ranges)
|
||||
{
|
||||
// On the diagonal
|
||||
Matrix expected1 = (Mat(3, 3) <<
|
||||
22, 23, 24,
|
||||
23, 29, 30,
|
||||
24, 30, 36);
|
||||
Matrix actual1 = testBlockMatrix.range(1, 3, 1, 3).selfadjointView();
|
||||
Matrix actual1a = testBlockMatrix.range(1, 3, 1, 3);
|
||||
EXPECT(assert_equal(expected1, actual1));
|
||||
EXPECT(assert_equal(expected1, actual1a));
|
||||
|
||||
// Above the diagonal
|
||||
Matrix expected2 = (Mat(3, 1) <<
|
||||
24,
|
||||
30,
|
||||
36);
|
||||
Matrix actual2 = testBlockMatrix.range(1, 3, 2, 3).knownOffDiagonal();
|
||||
Matrix actual2a = testBlockMatrix.range(1, 3, 2, 3);
|
||||
EXPECT(assert_equal(expected2, actual2));
|
||||
EXPECT(assert_equal(expected2, actual2a));
|
||||
|
||||
// Below the diagonal
|
||||
Matrix expected3 = (Mat(3, 3) <<
|
||||
4, 10, 16,
|
||||
5, 11, 17,
|
||||
6, 12, 18);
|
||||
Matrix actual3 = testBlockMatrix.range(1, 3, 0, 1).knownOffDiagonal();
|
||||
Matrix actual3a = testBlockMatrix.range(1, 3, 0, 1);
|
||||
EXPECT(assert_equal(expected3, actual3));
|
||||
EXPECT(assert_equal(expected3, actual3a));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SymmetricBlockMatrix, expressions)
|
||||
{
|
||||
SymmetricBlockMatrix expected1(list_of(2)(3)(1), (Mat(6, 6) <<
|
||||
0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0,
|
||||
0, 0, 4, 6, 8, 0,
|
||||
0, 0, 0, 9, 12, 0,
|
||||
0, 0, 0, 0, 16, 0,
|
||||
0, 0, 0, 0, 0, 0));
|
||||
|
||||
SymmetricBlockMatrix expected2(list_of(2)(3)(1), (Mat(6, 6) <<
|
||||
0, 0, 10, 15, 20, 0,
|
||||
0, 0, 12, 18, 24, 0,
|
||||
0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0));
|
||||
|
||||
Matrix a = (Mat(1, 3) << 2, 3, 4);
|
||||
Matrix b = (Mat(1, 2) << 5, 6);
|
||||
|
||||
SymmetricBlockMatrix bm1(list_of(2)(3)(1));
|
||||
bm1.full().triangularView().setZero();
|
||||
bm1(1, 1).selfadjointView().rankUpdate(a.transpose());
|
||||
EXPECT(assert_equal(Matrix(expected1.full().selfadjointView()), bm1.full().selfadjointView()));
|
||||
|
||||
SymmetricBlockMatrix bm2(list_of(2)(3)(1));
|
||||
bm2.full().triangularView().setZero();
|
||||
bm2(0, 1).knownOffDiagonal() += b.transpose() * a;
|
||||
EXPECT(assert_equal(Matrix(expected2.full().selfadjointView()), bm2.full().selfadjointView()));
|
||||
|
||||
SymmetricBlockMatrix bm3(list_of(2)(3)(1));
|
||||
bm3.full().triangularView().setZero();
|
||||
bm3(1, 0).knownOffDiagonal() += a.transpose() * b;
|
||||
EXPECT(assert_equal(Matrix(expected2.full().selfadjointView()), bm3.full().selfadjointView()));
|
||||
|
||||
SymmetricBlockMatrix bm4(list_of(2)(3)(1));
|
||||
bm4.full().triangularView().setZero();
|
||||
bm4(1, 1) += expected1(1, 1);
|
||||
EXPECT(assert_equal(Matrix(expected1.full().selfadjointView()), bm4.full().selfadjointView()));
|
||||
|
||||
SymmetricBlockMatrix bm5(list_of(2)(3)(1));
|
||||
bm5.full().triangularView().setZero();
|
||||
bm5(0, 1) += expected2(0, 1);
|
||||
EXPECT(assert_equal(Matrix(expected2.full().selfadjointView()), bm5.full().selfadjointView()));
|
||||
|
||||
SymmetricBlockMatrix bm6(list_of(2)(3)(1));
|
||||
bm6.full().triangularView().setZero();
|
||||
bm6(1, 0) += expected2(1, 0);
|
||||
EXPECT(assert_equal(Matrix(expected2.full().selfadjointView()), bm6.full().selfadjointView()));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -129,8 +129,8 @@ HessianFactor::HessianFactor(Key j, const Vector& mu, const Matrix& Sigma) :
|
|||
if (Sigma.rows() != Sigma.cols() || Sigma.rows() != mu.size()) throw invalid_argument(
|
||||
"Attempting to construct HessianFactor with inconsistent matrix and/or vector dimensions");
|
||||
info_(0,0) = Sigma.inverse(); // G
|
||||
info_(0,1) = info_(0,0) * mu; // g
|
||||
info_(1,1)(0,0) = mu.dot(info_(0,1).col(0)); // f
|
||||
info_(0,1) = info_(0,0).selfadjointView() * mu; // g
|
||||
info_(1,1)(0,0) = mu.dot(info_(0,1).knownOffDiagonal().col(0)); // f
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -230,11 +230,11 @@ void _FromJacobianHelper(const JacobianFactor& jf, SymmetricBlockMatrix& info)
|
|||
{
|
||||
if(jf.get_model()->isConstrained())
|
||||
throw invalid_argument("Cannot construct HessianFactor from JacobianFactor with constrained noise model");
|
||||
info.full().noalias() = jf.matrixObject().full().transpose() *
|
||||
info.full().triangularView() = jf.matrixObject().full().transpose() *
|
||||
(jfModel->invsigmas().array() * jfModel->invsigmas().array()).matrix().asDiagonal() *
|
||||
jf.matrixObject().full();
|
||||
} else {
|
||||
info.full().noalias() = jf.matrixObject().full().transpose() * jf.matrixObject().full();
|
||||
info.full().triangularView() = jf.matrixObject().full().transpose() * jf.matrixObject().full();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -292,7 +292,7 @@ HessianFactor::HessianFactor(const GaussianFactorGraph& factors,
|
|||
}
|
||||
dims.back() = 1;
|
||||
info_ = SymmetricBlockMatrix(dims);
|
||||
info_.full().setZero();
|
||||
info_.full().triangularView().setZero();
|
||||
gttoc(allocate);
|
||||
|
||||
// Form A' * A
|
||||
|
@ -318,7 +318,7 @@ void HessianFactor::print(const std::string& s, const KeyFormatter& formatter) c
|
|||
for(const_iterator key=this->begin(); key!=this->end(); ++key)
|
||||
cout << formatter(*key) << "(" << this->getDim(key) << ") ";
|
||||
cout << "\n";
|
||||
gtsam::print(Matrix(info_.range(0,info_.nBlocks(), 0,info_.nBlocks()).selfadjointView<Eigen::Upper>()), "Augmented information matrix: ");
|
||||
gtsam::print(Matrix(info_.full().selfadjointView()), "Augmented information matrix: ");
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -328,9 +328,9 @@ bool HessianFactor::equals(const GaussianFactor& lf, double tol) const {
|
|||
else {
|
||||
if(!Factor::equals(lf, tol))
|
||||
return false;
|
||||
Matrix thisMatrix = this->info_.full().selfadjointView<Eigen::Upper>();
|
||||
Matrix thisMatrix = this->info_.full().selfadjointView();
|
||||
thisMatrix(thisMatrix.rows()-1, thisMatrix.cols()-1) = 0.0;
|
||||
Matrix rhsMatrix = static_cast<const HessianFactor&>(lf).info_.full().selfadjointView<Eigen::Upper>();
|
||||
Matrix rhsMatrix = static_cast<const HessianFactor&>(lf).info_.full().selfadjointView();
|
||||
rhsMatrix(rhsMatrix.rows()-1, rhsMatrix.cols()-1) = 0.0;
|
||||
return equal_with_abs_tol(thisMatrix, rhsMatrix, tol);
|
||||
}
|
||||
|
@ -339,13 +339,13 @@ bool HessianFactor::equals(const GaussianFactor& lf, double tol) const {
|
|||
/* ************************************************************************* */
|
||||
Matrix HessianFactor::augmentedInformation() const
|
||||
{
|
||||
return info_.full().selfadjointView<Eigen::Upper>();
|
||||
return info_.full().selfadjointView();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix HessianFactor::information() const
|
||||
{
|
||||
return info_.range(0, this->size(), 0, this->size()).selfadjointView<Eigen::Upper>();
|
||||
return info_.range(0, this->size(), 0, this->size()).selfadjointView();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -369,7 +369,7 @@ double HessianFactor::error(const VectorValues& c) const {
|
|||
// NOTE may not be as efficient
|
||||
const Vector x = c.vector(this->keys());
|
||||
xtg = x.dot(linearTerm());
|
||||
xGx = x.transpose() * info_.range(0, this->size(), 0, this->size()).selfadjointView<Eigen::Upper>() * x;
|
||||
xGx = x.transpose() * info_.range(0, this->size(), 0, this->size()).selfadjointView() * x;
|
||||
return 0.5 * (f - 2.0 * xtg + xGx);
|
||||
}
|
||||
|
||||
|
@ -397,12 +397,7 @@ void HessianFactor::updateATA(const HessianFactor& update, const Scatter& scatte
|
|||
DenseIndex slot2 = (j2 == (DenseIndex)update.size()) ? nrInfoBlocks-1 : slots[j2];
|
||||
for(DenseIndex j1=0; j1<=j2; ++j1) {
|
||||
DenseIndex slot1 = (j1 == (DenseIndex)update.size()) ? nrInfoBlocks-1 : slots[j1];
|
||||
if(slot2 > slot1)
|
||||
info_(slot1, slot2).noalias() += update.info_(j1, j2);
|
||||
else if(slot1 > slot2)
|
||||
info_(slot2, slot1).noalias() += update.info_(j1, j2).transpose();
|
||||
else
|
||||
info_(slot1, slot2).triangularView<Eigen::Upper>() += update.info_(j1, j2);
|
||||
info_(slot1, slot2) += update.info_(j1, j2);
|
||||
}
|
||||
}
|
||||
gttoc(update);
|
||||
|
@ -458,48 +453,21 @@ void HessianFactor::updateATA(const JacobianFactor& update, const Scatter& scatt
|
|||
for(DenseIndex j1 = 0; j1 <= j2; ++j1) { // Vertical block of Hessian
|
||||
DenseIndex slot1 = (j1 == update.size()) ? nrInfoBlocks-1 : slots[j1];
|
||||
assert(slot1 >= 0 && slot1 < nrInfoBlocks);
|
||||
updateBlocks.offset(0);
|
||||
if(slot2 > slot1)
|
||||
info_(slot1, slot2).noalias() += updateBlocks(j1).transpose() * updateBlocks(j2);
|
||||
else if(slot1 > slot2)
|
||||
info_(slot2, slot1).noalias() += updateBlocks(j2).transpose() * updateBlocks(j1);
|
||||
if(slot1 != slot2)
|
||||
info_(slot1, slot2).knownOffDiagonal() += updateBlocks(j1).transpose() * updateBlocks(j2);
|
||||
else
|
||||
info_(slot1, slot2).triangularView<Eigen::Upper>() += updateBlocks(j1).transpose() * updateBlocks(j2);
|
||||
info_(slot1, slot2).selfadjointView().rankUpdate(updateBlocks(j1).transpose());
|
||||
}
|
||||
}
|
||||
gttoc(update);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
GaussianConditional::shared_ptr HessianFactor::splitEliminatedFactor(size_t nrFrontals)
|
||||
{
|
||||
gttic(HessianFactor_splitEliminatedFactor);
|
||||
|
||||
// Create one big conditionals with many frontal variables.
|
||||
gttic(Construct_conditional);
|
||||
const size_t varDim = info_.offset(nrFrontals);
|
||||
VerticalBlockMatrix Ab = VerticalBlockMatrix::LikeActiveViewOf(info_, varDim);
|
||||
Ab.full() = info_.range(0, nrFrontals, 0, info_.nBlocks());
|
||||
GaussianConditional::shared_ptr conditional = boost::make_shared<GaussianConditional>(
|
||||
keys_, nrFrontals, Ab);
|
||||
gttoc(Construct_conditional);
|
||||
|
||||
gttic(Remaining_factor);
|
||||
// Take lower-right block of Ab_ to get the new factor
|
||||
info_.blockStart() = nrFrontals;
|
||||
// Assign the keys
|
||||
keys_.erase(begin(), begin() + nrFrontals);
|
||||
gttoc(Remaining_factor);
|
||||
|
||||
return conditional;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
GaussianFactor::shared_ptr HessianFactor::negate() const
|
||||
{
|
||||
shared_ptr result = boost::make_shared<This>(*this);
|
||||
result->info_.full() = -result->info_.full(); // Negate the information matrix of the result
|
||||
result->info_.full().triangularView() = -result->info_.full().triangularView().nestedExpression(); // Negate the information matrix of the result
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -521,12 +489,12 @@ void HessianFactor::multiplyHessianAdd(double alpha, const VectorValues& x,
|
|||
Vector xj = x.at(keys_[j]);
|
||||
DenseIndex i = 0;
|
||||
for (; i < j; ++i)
|
||||
y[i] += info_(i, j) * xj;
|
||||
y[i] += info_(i, j).knownOffDiagonal() * xj;
|
||||
// blocks on the diagonal are only half
|
||||
y[i] += info_(j, j).selfadjointView<Eigen::Upper>() * xj;
|
||||
y[i] += info_(j, j).selfadjointView() * xj;
|
||||
// for below diagonal, we take transpose block from upper triangular part
|
||||
for (i = j + 1; i < (DenseIndex)size(); ++i)
|
||||
y[i] += info_(j, i).transpose() * xj;
|
||||
y[i] += info_(i, j).knownOffDiagonal() * xj;
|
||||
}
|
||||
|
||||
// copy to yvalues
|
||||
|
@ -546,7 +514,7 @@ VectorValues HessianFactor::gradientAtZero() const {
|
|||
VectorValues g;
|
||||
size_t n = size();
|
||||
for (size_t j = 0; j < n; ++j)
|
||||
g.insert(keys_[j],-info_(j,n));
|
||||
g.insert(keys_[j], -info_(j,n).knownOffDiagonal());
|
||||
return g;
|
||||
}
|
||||
|
||||
|
@ -567,12 +535,17 @@ EliminateCholesky(const GaussianFactorGraph& factors, const Ordering& keys)
|
|||
}
|
||||
|
||||
// Do dense elimination
|
||||
if(!choleskyPartial(jointFactor->info_.matrix(), jointFactor->info_.offset(keys.size())))
|
||||
GaussianConditional::shared_ptr conditional;
|
||||
try {
|
||||
VerticalBlockMatrix Ab = jointFactor->info_.choleskyPartial(keys.size());
|
||||
conditional = boost::make_shared<GaussianConditional>(jointFactor->keys(), keys.size(), Ab);
|
||||
// Erase the eliminated keys in the remaining factor
|
||||
jointFactor->keys_.erase(jointFactor->begin(), jointFactor->begin() + keys.size());
|
||||
} catch(CholeskyFailed&) {
|
||||
throw IndeterminantLinearSystemException(keys.front());
|
||||
}
|
||||
|
||||
// Split conditional
|
||||
GaussianConditional::shared_ptr conditional = jointFactor->splitEliminatedFactor(keys.size());
|
||||
|
||||
// Return result
|
||||
return make_pair(conditional, jointFactor);
|
||||
}
|
||||
|
||||
|
|
|
@ -265,13 +265,13 @@ namespace gtsam {
|
|||
* as described above. See HessianFactor class documentation above to explain that only the
|
||||
* upper-triangular part of the information matrix is stored and returned by this function.
|
||||
*/
|
||||
constBlock info() const { return info_.full(); }
|
||||
SymmetricBlockMatrix::constBlock info() const { return info_.full(); }
|
||||
|
||||
/** Return the <em>upper-triangular part</em> of the full *augmented* information matrix,
|
||||
* as described above. See HessianFactor class documentation above to explain that only the
|
||||
* upper-triangular part of the information matrix is stored and returned by this function.
|
||||
*/
|
||||
Block info() { return info_.full(); }
|
||||
SymmetricBlockMatrix::Block info() { return info_.full(); }
|
||||
|
||||
/** Return the constant term \f$ f \f$ as described above
|
||||
* @return The constant term \f$ f \f$
|
||||
|
@ -287,21 +287,25 @@ namespace gtsam {
|
|||
* @param j Which block row to get, as an iterator pointing to the slot in this factor. You can
|
||||
* use, for example, begin() + 2 to get the 3rd variable in this factor.
|
||||
* @return The linear term \f$ g \f$ */
|
||||
constBlock::ColXpr linearTerm(const_iterator j) const { return info_(j-begin(), size()).col(0); }
|
||||
constBlock::OffDiagonal::ColXpr linearTerm(const_iterator j) const {
|
||||
return info_(j-begin(), size()).knownOffDiagonal().col(0); }
|
||||
|
||||
/** Return the part of linear term \f$ g \f$ as described above corresponding to the requested variable.
|
||||
* @param j Which block row to get, as an iterator pointing to the slot in this factor. You can
|
||||
* use, for example, begin() + 2 to get the 3rd variable in this factor.
|
||||
* @return The linear term \f$ g \f$ */
|
||||
Block::ColXpr linearTerm(iterator j) { return info_(j-begin(), size()).col(0); }
|
||||
Block::OffDiagonal::ColXpr linearTerm(iterator j) {
|
||||
return info_(j-begin(), size()).knownOffDiagonal().col(0); }
|
||||
|
||||
/** Return the complete linear term \f$ g \f$ as described above.
|
||||
* @return The linear term \f$ g \f$ */
|
||||
constBlock::ColXpr linearTerm() const { return info_.range(0, this->size(), this->size(), this->size() + 1).col(0); }
|
||||
constBlock::OffDiagonal::ColXpr linearTerm() const {
|
||||
return info_.range(0, this->size(), this->size(), this->size() + 1).knownOffDiagonal().col(0); }
|
||||
|
||||
/** Return the complete linear term \f$ g \f$ as described above.
|
||||
* @return The linear term \f$ g \f$ */
|
||||
Block::ColXpr linearTerm() { return info_.range(0, this->size(), this->size(), this->size() + 1).col(0); }
|
||||
Block::OffDiagonal::ColXpr linearTerm() {
|
||||
return info_.range(0, this->size(), this->size(), this->size() + 1).knownOffDiagonal().col(0); }
|
||||
|
||||
/** Return the augmented information matrix represented by this GaussianFactor.
|
||||
* The augmented information matrix contains the information matrix with an
|
||||
|
@ -401,9 +405,6 @@ namespace gtsam {
|
|||
|
||||
private:
|
||||
|
||||
/** split partially eliminated factor */
|
||||
boost::shared_ptr<GaussianConditional> splitEliminatedFactor(size_t nrFrontals);
|
||||
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
template<class ARCHIVE>
|
||||
|
|
|
@ -86,7 +86,7 @@ TEST(HessianFactor, ConversionConstructor)
|
|||
/* ************************************************************************* */
|
||||
TEST(HessianFactor, Constructor1)
|
||||
{
|
||||
Matrix G = (Mat(2,2) << 3.0, 5.0, 0.0, 6.0);
|
||||
Matrix G = (Mat(2,2) << 3.0, 5.0, 5.0, 6.0);
|
||||
Vector g = (Vec(2) << -8.0, -9.0);
|
||||
double f = 10.0;
|
||||
HessianFactor factor(0, G, g, f);
|
||||
|
@ -132,7 +132,7 @@ TEST(HessianFactor, Constructor2)
|
|||
{
|
||||
Matrix G11 = (Mat(1,1) << 1.0);
|
||||
Matrix G12 = (Mat(1,2) << 2.0, 4.0);
|
||||
Matrix G22 = (Mat(2,2) << 3.0, 5.0, 0.0, 6.0);
|
||||
Matrix G22 = (Mat(2,2) << 3.0, 5.0, 5.0, 6.0);
|
||||
Vector g1 = (Vec(1) << -7.0);
|
||||
Vector g2 = (Vec(2) << -8.0, -9.0);
|
||||
double f = 10.0;
|
||||
|
@ -175,10 +175,10 @@ TEST(HessianFactor, Constructor3)
|
|||
Matrix G12 = (Mat(1,2) << 2.0, 4.0);
|
||||
Matrix G13 = (Mat(1,3) << 3.0, 6.0, 9.0);
|
||||
|
||||
Matrix G22 = (Mat(2,2) << 3.0, 5.0, 0.0, 6.0);
|
||||
Matrix G22 = (Mat(2,2) << 3.0, 5.0, 5.0, 6.0);
|
||||
Matrix G23 = (Mat(2,3) << 4.0, 6.0, 8.0, 1.0, 2.0, 4.0);
|
||||
|
||||
Matrix G33 = (Mat(3,3) << 1.0, 2.0, 3.0, 0.0, 5.0, 6.0, 0.0, 0.0, 9.0);
|
||||
Matrix G33 = (Mat(3,3) << 1.0, 2.0, 3.0, 2.0, 5.0, 6.0, 3.0, 6.0, 9.0);
|
||||
|
||||
Vector g1 = (Vec(1) << -7.0);
|
||||
Vector g2 = (Vec(2) << -8.0, -9.0);
|
||||
|
@ -222,10 +222,10 @@ TEST(HessianFactor, ConstructorNWay)
|
|||
Matrix G12 = (Mat(1,2) << 2.0, 4.0);
|
||||
Matrix G13 = (Mat(1,3) << 3.0, 6.0, 9.0);
|
||||
|
||||
Matrix G22 = (Mat(2,2) << 3.0, 5.0, 0.0, 6.0);
|
||||
Matrix G22 = (Mat(2,2) << 3.0, 5.0, 5.0, 6.0);
|
||||
Matrix G23 = (Mat(2,3) << 4.0, 6.0, 8.0, 1.0, 2.0, 4.0);
|
||||
|
||||
Matrix G33 = (Mat(3,3) << 1.0, 2.0, 3.0, 0.0, 5.0, 6.0, 0.0, 0.0, 9.0);
|
||||
Matrix G33 = (Mat(3,3) << 1.0, 2.0, 3.0, 2.0, 5.0, 6.0, 3.0, 6.0, 9.0);
|
||||
|
||||
Vector g1 = (Vec(1) << -7.0);
|
||||
Vector g2 = (Vec(2) << -8.0, -9.0);
|
||||
|
@ -423,8 +423,7 @@ TEST(HessianFactor, combine) {
|
|||
-100.0000, 0.0, 20.0000, 0.0, 80.0000, 0.0, -20.0000,
|
||||
0.0, -100.0000, 0.0, 20.0000, 0.0, 80.0000, 14.0000,
|
||||
25.0000, -17.5000, -5.0000, 3.5000, -20.0000, 14.0000, 7.4500);
|
||||
EXPECT(assert_equal(Matrix(expected.triangularView<Eigen::Upper>()),
|
||||
Matrix(actual.matrixObject().full().triangularView<Eigen::Upper>()), tol));
|
||||
EXPECT(assert_equal(expected, Matrix(actual.matrixObject().full()), tol));
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -123,10 +123,9 @@ GaussianFactor::shared_ptr LinearContainerFactor::linearize(const Values& c) con
|
|||
jacFactor->getb() = -jacFactor->unweighted_error(delta);
|
||||
} else {
|
||||
HessianFactor::shared_ptr hesFactor = boost::dynamic_pointer_cast<HessianFactor>(linFactor);
|
||||
size_t dim = hesFactor->linearTerm().size();
|
||||
Eigen::Block<HessianFactor::Block> Gview = hesFactor->info().block(0, 0, dim, dim);
|
||||
SymmetricBlockMatrix::constBlock Gview = hesFactor->matrixObject().range(0, hesFactor->size(), 0, hesFactor->size());
|
||||
Vector deltaVector = delta.vector(keys());
|
||||
Vector G_delta = Gview.selfadjointView<Eigen::Upper>() * deltaVector;
|
||||
Vector G_delta = Gview.selfadjointView() * deltaVector;
|
||||
hesFactor->constantTerm() += deltaVector.dot(G_delta) - 2.0 * deltaVector.dot(hesFactor->linearTerm());
|
||||
hesFactor->linearTerm() -= G_delta;
|
||||
}
|
||||
|
|
|
@ -76,7 +76,9 @@ Matrix Marginals::marginalInformation(Key variable) const {
|
|||
/* ************************************************************************* */
|
||||
JointMarginal Marginals::jointMarginalCovariance(const std::vector<Key>& variables) const {
|
||||
JointMarginal info = jointMarginalInformation(variables);
|
||||
info.blockMatrix_.full() = info.blockMatrix_.full().inverse();
|
||||
info.blockMatrix_.full().triangularView() =
|
||||
info.blockMatrix_.full().selfadjointView().llt().solve(
|
||||
Matrix::Identity(info.blockMatrix_.full().rows(), info.blockMatrix_.full().rows())).triangularView<Eigen::Upper>();
|
||||
return info;
|
||||
}
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ public:
|
|||
* in scope while this view is needed. Otherwise assign this block object to a Matrix
|
||||
* to store it.
|
||||
*/
|
||||
const Matrix& fullMatrix() const { return blockMatrix_.matrix(); }
|
||||
Eigen::SelfAdjointView<const Matrix, Eigen::Upper> fullMatrix() const { return blockMatrix_.matrix(); }
|
||||
|
||||
/** Print */
|
||||
void print(const std::string& s = "", const KeyFormatter& formatter = DefaultKeyFormatter) const;
|
||||
|
|
|
@ -69,7 +69,7 @@ TEST( AntiFactor, NegativeHessian)
|
|||
size_t variable_count = originalFactor->size();
|
||||
for(size_t i = 0; i < variable_count; ++i){
|
||||
for(size_t j = i; j < variable_count; ++j){
|
||||
Matrix expected_G = -originalHessian->info(originalHessian->begin()+i, originalHessian->begin()+j);
|
||||
Matrix expected_G = -Matrix(originalHessian->info(originalHessian->begin()+i, originalHessian->begin()+j));
|
||||
Matrix actual_G = antiHessian->info(antiHessian->begin()+i, antiHessian->begin()+j);
|
||||
CHECK(assert_equal(expected_G, actual_G, 1e-5));
|
||||
}
|
||||
|
|
|
@ -153,7 +153,7 @@ LinearizedHessianFactor::LinearizedHessianFactor(
|
|||
// Update the BlockInfo accessor
|
||||
info_ = SymmetricBlockMatrix(dims, dims+hessian->size()+1);
|
||||
// Copy the augmented matrix holding G, g, and f
|
||||
info_.matrix() = hessian->info();
|
||||
info_.full() = hessian->info();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -166,7 +166,7 @@ void LinearizedHessianFactor::print(const std::string& s, const KeyFormatter& ke
|
|||
std::cout << keyFormatter(key) << " ";
|
||||
std::cout << std::endl;
|
||||
|
||||
gtsam::print(Matrix(info_.range(0,info_.nBlocks(), 0,info_.nBlocks()).selfadjointView<Eigen::Upper>()), "Ab^T * Ab: ");
|
||||
gtsam::print(Matrix(info_.full()), "Ab^T * Ab: ");
|
||||
|
||||
lin_points_.print("Linearization Point: ");
|
||||
}
|
||||
|
@ -177,9 +177,9 @@ bool LinearizedHessianFactor::equals(const NonlinearFactor& expected, double tol
|
|||
const This *e = dynamic_cast<const This*> (&expected);
|
||||
if (e) {
|
||||
|
||||
Matrix thisMatrix = this->info_.full().selfadjointView<Eigen::Upper>();
|
||||
Matrix thisMatrix = this->info_.full();
|
||||
thisMatrix(thisMatrix.rows()-1, thisMatrix.cols()-1) = 0.0;
|
||||
Matrix rhsMatrix = e->info_.full().selfadjointView<Eigen::Upper>();
|
||||
Matrix rhsMatrix = e->info_.full();
|
||||
rhsMatrix(rhsMatrix.rows()-1, rhsMatrix.cols()-1) = 0.0;
|
||||
|
||||
return Base::equals(expected, tol)
|
||||
|
@ -207,7 +207,7 @@ double LinearizedHessianFactor::error(const Values& c) const {
|
|||
// error 0.5*(f - 2*x'*g + x'*G*x)
|
||||
double f = constantTerm();
|
||||
double xtg = dx.dot(linearTerm());
|
||||
double xGx = dx.transpose() * squaredTerm().selfadjointView<Eigen::Upper>() * dx;
|
||||
double xGx = dx.transpose() * squaredTerm() * dx;
|
||||
|
||||
return 0.5 * (f - 2.0 * xtg + xGx);
|
||||
}
|
||||
|
@ -229,11 +229,11 @@ LinearizedHessianFactor::linearize(const Values& c) const {
|
|||
|
||||
// f2 = f1 - 2*dx'*g1 + dx'*G1*dx
|
||||
//newInfo(this->size(), this->size())(0,0) += -2*dx.dot(linearTerm()) + dx.transpose() * squaredTerm().selfadjointView<Eigen::Upper>() * dx;
|
||||
double f = constantTerm() - 2*dx.dot(linearTerm()) + dx.transpose() * squaredTerm().selfadjointView<Eigen::Upper>() * dx;
|
||||
double f = constantTerm() - 2*dx.dot(linearTerm()) + dx.transpose() * squaredTerm() * dx;
|
||||
|
||||
// g2 = g1 - G1*dx
|
||||
//newInfo.rangeColumn(0, this->size(), this->size(), 0) -= squaredTerm().selfadjointView<Eigen::Upper>() * dx;
|
||||
Vector g = linearTerm() - squaredTerm().selfadjointView<Eigen::Upper>() * dx;
|
||||
Vector g = linearTerm() - squaredTerm() * dx;
|
||||
std::vector<Vector> gs;
|
||||
for(DenseIndex i = 0; i < info_.nBlocks()-1; ++i) {
|
||||
gs.push_back(g.segment(info_.offset(i), info_.offset(i+1) - info_.offset(i)));
|
||||
|
|
|
@ -176,8 +176,8 @@ public:
|
|||
/** hessian block data types */
|
||||
typedef SymmetricBlockMatrix::Block Block; ///< A block from the Hessian matrix
|
||||
typedef SymmetricBlockMatrix::constBlock constBlock; ///< A block from the Hessian matrix (const version)
|
||||
typedef SymmetricBlockMatrix::Block::ColXpr Column; ///< A column containing the linear term h
|
||||
typedef SymmetricBlockMatrix::constBlock::ColXpr constColumn; ///< A column containing the linear term h (const version)
|
||||
typedef SymmetricBlockMatrix::Block::OffDiagonal::ColXpr Column; ///< A column containing the linear term h
|
||||
typedef SymmetricBlockMatrix::constBlock::OffDiagonal::ColXpr constColumn; ///< A column containing the linear term h (const version)
|
||||
|
||||
protected:
|
||||
|
||||
|
@ -220,11 +220,11 @@ public:
|
|||
* @param j Which block row to get, as an iterator pointing to the slot in this factor. You can
|
||||
* use, for example, begin() + 2 to get the 3rd variable in this factor.
|
||||
* @return The linear term \f$ g \f$ */
|
||||
constColumn linearTerm(const_iterator j) const { return info_(j - this->begin(), this->size()).col(0); }
|
||||
constColumn linearTerm(const_iterator j) const { return info_(j - this->begin(), this->size()).knownOffDiagonal().col(0); }
|
||||
|
||||
/** Return the complete linear term \f$ g \f$ as described above.
|
||||
* @return The linear term \f$ g \f$ */
|
||||
constColumn linearTerm() const { return info_.range(0, this->size(), this->size(), this->size() + 1).col(0); };
|
||||
constColumn linearTerm() const { return info_.range(0, this->size(), this->size(), this->size() + 1).knownOffDiagonal().col(0); };
|
||||
|
||||
/** Return a view of the block at (j1,j2) of the <emph>upper-triangular part</emph> of the
|
||||
* squared term \f$ H \f$, no data is copied. See HessianFactor class documentation
|
||||
|
@ -242,7 +242,7 @@ public:
|
|||
* See HessianFactor class documentation above to explain that only the
|
||||
* upper-triangular part of the information matrix is stored and returned by this function.
|
||||
*/
|
||||
constBlock squaredTerm() const { return info_.range(0, this->size(), 0, this->size()); }
|
||||
constBlock::SelfAdjointView squaredTerm() const { return info_.range(0, this->size(), 0, this->size()).selfadjointView(); }
|
||||
|
||||
|
||||
/** get the dimension of the factor (number of rows on linearization) */
|
||||
|
|
Loading…
Reference in New Issue