Started on HessianFactor, converted Nonlinear stuff and disabled a lot of more advanced classes so the library compiles with nonlinear using unordered.
parent
c868056c22
commit
0d05bf5ac5
|
@ -103,6 +103,7 @@ endif()
|
|||
if(MSVC AND NOT Boost_USE_STATIC_LIBS)
|
||||
add_definitions(-DBOOST_ALL_DYN_LINK)
|
||||
endif()
|
||||
add_definitions(-DBOOST_ALL_NO_LIB)
|
||||
|
||||
find_package(Boost 1.43 COMPONENTS serialization system filesystem thread program_options date_time regex timer chrono)
|
||||
|
||||
|
@ -114,7 +115,9 @@ endif()
|
|||
|
||||
option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF)
|
||||
# Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library)
|
||||
set(GTSAM_BOOST_LIBRARIES ${Boost_SERIALIZATION_LIBRARY} ${Boost_SYSTEM_LIBRARY} ${Boost_FILESYSTEM_LIBRARY} ${Boost_THREAD_LIBRARY} ${Boost_DATE_TIME_LIBRARY} ${Boost_REGEX_LIBRARY})
|
||||
set(GTSAM_BOOST_LIBRARIES
|
||||
${Boost_SERIALIZATION_LIBRARY} ${Boost_SYSTEM_LIBRARY} ${Boost_FILESYSTEM_LIBRARY}
|
||||
${Boost_THREAD_LIBRARY} ${Boost_DATE_TIME_LIBRARY} ${Boost_REGEX_LIBRARY})
|
||||
if (GTSAM_DISABLE_NEW_TIMERS)
|
||||
message("WARNING: GTSAM timing instrumentation manually disabled")
|
||||
add_definitions(-DGTSAM_DISABLE_NEW_TIMERS)
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SymmetricBlockMatrix.cpp
|
||||
* @brief Access to matrices via blocks of pre-defined sizes. Used in GaussianFactor and GaussianConditional.
|
||||
* @author Richard Roberts
|
||||
* @date Sep 18, 2010
|
||||
*/
|
||||
|
||||
#include <gtsam/base/SymmetricBlockMatrix.h>
|
||||
#include <gtsam/base/VerticalBlockMatrix.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
SymmetricBlockMatrix SymmetricBlockMatrix::LikeActiveViewOf(const SymmetricBlockMatrix& other)
|
||||
{
|
||||
SymmetricBlockMatrix result;
|
||||
result.variableColOffsets_.resize(other.nBlocks() + 1);
|
||||
std::copy(other.variableColOffsets_.begin() + other.blockStart_, other.variableColOffsets_.end(),
|
||||
result.variableColOffsets_.begin());
|
||||
result.matrix_.resize(other.cols(), other.cols());
|
||||
result.assertInvariants();
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
SymmetricBlockMatrix SymmetricBlockMatrix::LikeActiveViewOf(const VerticalBlockMatrix& other)
|
||||
{
|
||||
SymmetricBlockMatrix result;
|
||||
result.variableColOffsets_.resize(other.nBlocks() + 1);
|
||||
std::copy(other.variableColOffsets_.begin() + other.blockStart_, other.variableColOffsets_.end(),
|
||||
result.variableColOffsets_.begin());
|
||||
result.matrix_.resize(other.cols(), other.cols());
|
||||
result.assertInvariants();
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,229 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SymmetricBlockMatrix.h
|
||||
* @brief Access to matrices via blocks of pre-defined sizes. Used in GaussianFactor and GaussianConditional.
|
||||
* @author Richard Roberts
|
||||
* @date Sep 18, 2010
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
// Forward declarations
|
||||
class VerticalBlockMatrix;
|
||||
|
||||
/**
|
||||
* This class stores a dense matrix and allows it to be accessed as a collection of blocks. When
|
||||
* constructed, the caller must provide the dimensions of the blocks.
|
||||
*
|
||||
* The block structure is symmetric, but the underlying matrix does not necessarily need to be.
|
||||
*
|
||||
* This class also has a parameter that can be changed after construction to change the apparent
|
||||
* matrix view. firstBlock() determines the block that appears to have index 0 for all operations
|
||||
* (except re-setting firstBlock()).
|
||||
*
|
||||
* @addtogroup base */
|
||||
class SymmetricBlockMatrix
|
||||
{
|
||||
public:
|
||||
typedef SymmetricBlockMatrix This;
|
||||
typedef Eigen::Block<Matrix> Block;
|
||||
typedef Eigen::Block<const Matrix> constBlock;
|
||||
|
||||
protected:
|
||||
Matrix matrix_; ///< The full matrix
|
||||
std::vector<DenseIndex> variableColOffsets_; ///< the starting columns of each block (0-based)
|
||||
|
||||
DenseIndex blockStart_; ///< Changes apparent matrix view, see main class comment.
|
||||
|
||||
public:
|
||||
/** Construct from an empty matrix (asserts that the matrix is empty) */
|
||||
SymmetricBlockMatrix() :
|
||||
blockStart_(0)
|
||||
{
|
||||
variableColOffsets_.push_back(0);
|
||||
assertInvariants();
|
||||
}
|
||||
|
||||
/** Construct from a container of the sizes of each block. */
|
||||
template<typename CONTAINER>
|
||||
SymmetricBlockMatrix(const CONTAINER dimensions) :
|
||||
blockStart_(0)
|
||||
{
|
||||
fillOffsets(dimensions.begin(), dimensions.end());
|
||||
matrix_.resize(variableColOffsets_.back(), variableColOffsets_.back());
|
||||
assertInvariants();
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct from iterator over the sizes of each vertical block. */
|
||||
template<typename ITERATOR>
|
||||
SymmetricBlockMatrix(ITERATOR firstBlockDim, ITERATOR lastBlockDim) :
|
||||
blockStart_(0)
|
||||
{
|
||||
fillOffsets(firstBlockDim, lastBlockDim);
|
||||
matrix_.resize(variableColOffsets_.back(), variableColOffsets_.back());
|
||||
assertInvariants();
|
||||
}
|
||||
|
||||
/** Construct from a container of the sizes of each vertical block and a pre-prepared matrix. */
|
||||
template<typename CONTAINER>
|
||||
SymmetricBlockMatrix(const CONTAINER dimensions, const Matrix& matrix) :
|
||||
matrix_(matrix), blockStart_(0)
|
||||
{
|
||||
fillOffsets(dimensions.begin(), dimensions.end());
|
||||
if(matrix_.rows() != matrix_.cols())
|
||||
throw std::invalid_argument("Requested to create a SymmetricBlockMatrix from a non-square matrix.");
|
||||
if(variableColOffsets_.back() != matrix_.cols())
|
||||
throw std::invalid_argument("Requested to create a SymmetricBlockMatrix with dimensions that do not sum to the total size of the provided matrix.");
|
||||
assertInvariants();
|
||||
}
|
||||
|
||||
/** Copy the block structure, but do not copy the matrix data. If blockStart() has been
|
||||
* modified, this copies the structure of the corresponding matrix view. In the destination
|
||||
* SymmetricBlockMatrix, blockStart() will be 0. */
|
||||
static SymmetricBlockMatrix LikeActiveViewOf(const SymmetricBlockMatrix& other);
|
||||
|
||||
/** Copy the block structure, but do not copy the matrix data. If blockStart() has been
|
||||
* modified, this copies the structure of the corresponding matrix view. In the destination
|
||||
* SymmetricBlockMatrix, blockStart() will be 0. */
|
||||
static SymmetricBlockMatrix LikeActiveViewOf(const VerticalBlockMatrix& other);
|
||||
|
||||
/** Row size */
|
||||
DenseIndex rows() const { assertInvariants(); return variableColOffsets_.back() - variableColOffsets_[blockStart_]; }
|
||||
|
||||
/** Column size */
|
||||
DenseIndex cols() const { return rows(); }
|
||||
|
||||
|
||||
/** Block count */
|
||||
DenseIndex nBlocks() const { assertInvariants(); return variableColOffsets_.size() - 1 - blockStart_; }
|
||||
|
||||
Block operator()(DenseIndex i_block, DenseIndex j_block) {
|
||||
return range(i_block, i_block+1, j_block, j_block+1);
|
||||
}
|
||||
|
||||
constBlock operator()(DenseIndex i_block, DenseIndex j_block) const {
|
||||
return range(i_block, i_block+1, j_block, j_block+1);
|
||||
}
|
||||
|
||||
Block range(DenseIndex i_startBlock, DenseIndex i_endBlock, DenseIndex j_startBlock, DenseIndex j_endBlock) {
|
||||
assertInvariants();
|
||||
DenseIndex i_actualStartBlock = i_startBlock + blockStart_;
|
||||
DenseIndex i_actualEndBlock = i_endBlock + blockStart_;
|
||||
DenseIndex j_actualStartBlock = j_startBlock + blockStart_;
|
||||
DenseIndex j_actualEndBlock = j_endBlock + blockStart_;
|
||||
checkBlock(i_actualStartBlock);
|
||||
checkBlock(j_actualStartBlock);
|
||||
if(i_startBlock != 0 || i_endBlock != 0) {
|
||||
checkBlock(i_actualStartBlock);
|
||||
assert(i_actualEndBlock < (DenseIndex)variableColOffsets_.size());
|
||||
}
|
||||
if(j_startBlock != 0 || j_endBlock != 0) {
|
||||
checkBlock(j_actualStartBlock);
|
||||
assert(j_actualEndBlock < (DenseIndex)variableColOffsets_.size());
|
||||
}
|
||||
return matrix_.block(
|
||||
variableColOffsets_[i_actualStartBlock], variableColOffsets_[j_actualStartBlock],
|
||||
variableColOffsets_[i_actualEndBlock] - variableColOffsets_[i_actualStartBlock],
|
||||
variableColOffsets_[j_actualEndBlock] - variableColOffsets_[j_actualStartBlock]);
|
||||
}
|
||||
|
||||
constBlock range(DenseIndex i_startBlock, DenseIndex i_endBlock, DenseIndex j_startBlock, DenseIndex j_endBlock) const {
|
||||
assertInvariants();
|
||||
DenseIndex i_actualStartBlock = i_startBlock + blockStart_;
|
||||
DenseIndex i_actualEndBlock = i_endBlock + blockStart_;
|
||||
DenseIndex j_actualStartBlock = j_startBlock + blockStart_;
|
||||
DenseIndex j_actualEndBlock = j_endBlock + blockStart_;
|
||||
checkBlock(i_actualStartBlock);
|
||||
checkBlock(j_actualStartBlock);
|
||||
if(i_startBlock != 0 || i_endBlock != 0) {
|
||||
checkBlock(i_actualStartBlock);
|
||||
assert(i_actualEndBlock < (DenseIndex)variableColOffsets_.size());
|
||||
}
|
||||
if(j_startBlock != 0 || j_endBlock != 0) {
|
||||
checkBlock(j_actualStartBlock);
|
||||
assert(j_actualEndBlock < (DenseIndex)variableColOffsets_.size());
|
||||
}
|
||||
return matrix_.block(
|
||||
variableColOffsets_[i_actualStartBlock], variableColOffsets_[j_actualStartBlock],
|
||||
variableColOffsets_[i_actualEndBlock] - variableColOffsets_[i_actualStartBlock],
|
||||
variableColOffsets_[j_actualEndBlock] - variableColOffsets_[j_actualStartBlock]);
|
||||
}
|
||||
|
||||
/** Return the full matrix, *not* including any portions excluded by firstBlock(). */
|
||||
Block full() {
|
||||
return range(0,nBlocks(), 0,nBlocks());
|
||||
}
|
||||
|
||||
/** Return the full matrix, *not* including any portions excluded by firstBlock(). */
|
||||
constBlock full() const {
|
||||
return range(0,nBlocks(), 0,nBlocks());
|
||||
}
|
||||
|
||||
/** Access to full matrix, including any portions excluded by firstBlock() to other operations. */
|
||||
const Matrix& matrix() const { return matrix_; }
|
||||
|
||||
/** Access to full matrix, including any portions excluded by firstBlock() to other operations. */
|
||||
Matrix& matrix() { return matrix_; }
|
||||
|
||||
DenseIndex offset(DenseIndex block) const {
|
||||
assertInvariants();
|
||||
DenseIndex actualBlock = block + blockStart_;
|
||||
checkBlock(actualBlock);
|
||||
return variableColOffsets_[actualBlock];
|
||||
}
|
||||
|
||||
DenseIndex& blockStart() { return blockStart_; }
|
||||
DenseIndex blockStart() const { return blockStart_; }
|
||||
|
||||
protected:
|
||||
void assertInvariants() const {
|
||||
assert(matrix_.rows() == matrix_.cols());
|
||||
assert(matrix_.cols() == variableColOffsets_.back());
|
||||
assert(blockStart_ < (DenseIndex)variableColOffsets_.size());
|
||||
}
|
||||
|
||||
void checkBlock(DenseIndex block) const {
|
||||
assert(matrix_.rows() == matrix_.cols());
|
||||
assert(matrix_.cols() == variableColOffsets_.back());
|
||||
assert(block < (DenseIndex)variableColOffsets_.size()-1);
|
||||
assert(variableColOffsets_[block] < matrix_.cols() && variableColOffsets_[block+1] <= matrix_.cols());
|
||||
}
|
||||
|
||||
template<typename ITERATOR>
|
||||
void fillOffsets(ITERATOR firstBlockDim, ITERATOR lastBlockDim) {
|
||||
variableColOffsets_.resize((lastBlockDim-firstBlockDim)+1);
|
||||
variableColOffsets_[0] = 0;
|
||||
DenseIndex j=0;
|
||||
for(ITERATOR dim=firstBlockDim; dim!=lastBlockDim; ++dim) {
|
||||
variableColOffsets_[j+1] = variableColOffsets_[j] + *dim;
|
||||
++ j;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
template<class ARCHIVE>
|
||||
void serialize(ARCHIVE & ar, const unsigned int version) {
|
||||
ar & BOOST_SERIALIZATION_NVP(matrix_);
|
||||
ar & BOOST_SERIALIZATION_NVP(variableColOffsets_);
|
||||
ar & BOOST_SERIALIZATION_NVP(blockStart_);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
}
|
|
@ -21,11 +21,12 @@
|
|||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
VerticalBlockMatrix VerticalBlockMatrix::LikeActiveViewOf(const VerticalBlockMatrix& rhs) {
|
||||
VerticalBlockMatrix VerticalBlockMatrix::LikeActiveViewOf(const VerticalBlockMatrix& rhs)
|
||||
{
|
||||
VerticalBlockMatrix result;
|
||||
result.variableColOffsets_.resize(rhs.nBlocks() + 1);
|
||||
for(Index i = 0; i < rhs.nBlocks() + 1; ++i)
|
||||
result.variableColOffsets_[i] = rhs.variableColOffsets_[i + rhs.blockStart_];
|
||||
std::copy(rhs.variableColOffsets_.begin() + rhs.blockStart_, rhs.variableColOffsets_.end(),
|
||||
result.variableColOffsets_.begin());
|
||||
result.matrix_.resize(rhs.rows(), result.variableColOffsets_.back());
|
||||
result.rowEnd_ = rhs.rows();
|
||||
result.assertInvariants();
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
/**
|
||||
* @file VerticalBlockMatrix.h
|
||||
* @brief A matrix with column blocks of pre-defined sizes. Used in JacobianFactor and
|
||||
* GaussianConditional.
|
||||
* GaussianConditional.
|
||||
* @author Richard Roberts
|
||||
* @date Sep 18, 2010 */
|
||||
#pragma once
|
||||
|
@ -22,40 +22,35 @@
|
|||
namespace gtsam {
|
||||
|
||||
// Forward declarations
|
||||
//class S
|
||||
class SymmetricBlockMatrix;
|
||||
|
||||
/**
|
||||
* This class stores a dense matrix and allows it to be accessed as a collection of vertical blocks.
|
||||
* It also provides for accessing individual columns from those blocks. When constructed or
|
||||
* resized, the caller must provide the dimensions of the blocks.
|
||||
* This class stores a dense matrix and allows it to be accessed as a collection of vertical
|
||||
* blocks. The dimensions of the blocks are provided when constructing this class.
|
||||
*
|
||||
* This class also has three parameters that can be changed after construction that change the
|
||||
* apparent view of the matrix without any reallocation or data copying. firstBlock() determines
|
||||
* the block that has index 0 for all operations (except for re-setting firstBlock()). rowStart()
|
||||
* determines the apparent first row of the matrix for all operations (except for setting rowStart()
|
||||
* and rowEnd()). rowEnd() determines the apparent exclusive (one-past-the-last) last row for all
|
||||
* operations. To include all rows, rowEnd() should be set to the number of rows in the matrix
|
||||
* (i.e. one after the last true row index).
|
||||
* determines the apparent first row of the matrix for all operations (except for setting
|
||||
* rowStart() and rowEnd()). rowEnd() determines the apparent exclusive (one-past-the-last) last
|
||||
* row for all operations. To include all rows, rowEnd() should be set to the number of rows in
|
||||
* the matrix (i.e. one after the last true row index).
|
||||
*
|
||||
* @addtogroup base */
|
||||
class GTSAM_EXPORT VerticalBlockMatrix {
|
||||
class GTSAM_EXPORT VerticalBlockMatrix
|
||||
{
|
||||
public:
|
||||
typedef VerticalBlockMatrix This;
|
||||
typedef Matrix::Index Index;
|
||||
typedef Eigen::Block<Matrix> Block;
|
||||
typedef Eigen::Block<const Matrix> constBlock;
|
||||
|
||||
// columns of blocks
|
||||
typedef Eigen::Block<Matrix>::ColXpr Column;
|
||||
typedef Eigen::Block<const Matrix>::ConstColXpr constColumn;
|
||||
|
||||
protected:
|
||||
Matrix matrix_; ///< The full matrix
|
||||
std::vector<Index> variableColOffsets_; ///< the starting columns of each block (0-based)
|
||||
std::vector<DenseIndex> variableColOffsets_; ///< the starting columns of each block (0-based)
|
||||
|
||||
Index rowStart_; ///< Changes apparent matrix view, see main class comment.
|
||||
Index rowEnd_; ///< Changes apparent matrix view, see main class comment.
|
||||
Index blockStart_; ///< Changes apparent matrix view, see main class comment.
|
||||
DenseIndex rowStart_; ///< Changes apparent matrix view, see main class comment.
|
||||
DenseIndex rowEnd_; ///< Changes apparent matrix view, see main class comment.
|
||||
DenseIndex blockStart_; ///< Changes apparent matrix view, see main class comment.
|
||||
|
||||
public:
|
||||
|
||||
|
@ -64,10 +59,10 @@ namespace gtsam {
|
|||
rowStart_(0), rowEnd_(0), blockStart_(0)
|
||||
{
|
||||
variableColOffsets_.push_back(0);
|
||||
assertInvariants();
|
||||
}
|
||||
|
||||
/** Construct from a container of the sizes of each vertical block, resize the matrix so that
|
||||
* its height is matrixNewHeight and its width fits the given block dimensions. */
|
||||
/** Construct from a container of the sizes of each vertical block. */
|
||||
template<typename CONTAINER>
|
||||
VerticalBlockMatrix(const CONTAINER dimensions, DenseIndex height) :
|
||||
rowStart_(0), rowEnd_(height), blockStart_(0)
|
||||
|
@ -89,8 +84,7 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/**
|
||||
* Construct from iterator over the sizes of each vertical block, resize the matrix so that its
|
||||
* height is matrixNewHeight and its width fits the given block dimensions. */
|
||||
* Construct from iterator over the sizes of each vertical block. */
|
||||
template<typename ITERATOR>
|
||||
VerticalBlockMatrix(ITERATOR firstBlockDim, ITERATOR lastBlockDim, DenseIndex height) :
|
||||
rowStart_(0), rowEnd_(height), blockStart_(0)
|
||||
|
@ -101,44 +95,44 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/// Row size
|
||||
Index rows() const { assertInvariants(); return rowEnd_ - rowStart_; }
|
||||
DenseIndex rows() const { assertInvariants(); return rowEnd_ - rowStart_; }
|
||||
|
||||
/// Column size
|
||||
Index cols() const { assertInvariants(); return variableColOffsets_.back() - variableColOffsets_[blockStart_]; }
|
||||
DenseIndex cols() const { assertInvariants(); return variableColOffsets_.back() - variableColOffsets_[blockStart_]; }
|
||||
|
||||
/// Block count
|
||||
Index nBlocks() const { assertInvariants(); return variableColOffsets_.size() - 1 - blockStart_; }
|
||||
DenseIndex nBlocks() const { assertInvariants(); return variableColOffsets_.size() - 1 - blockStart_; }
|
||||
|
||||
/** Access a single block in the underlying matrix with read/write access */
|
||||
Block operator()(Index block) { return range(block, block+1); }
|
||||
Block operator()(DenseIndex block) { return range(block, block+1); }
|
||||
|
||||
/** Access a const block view */
|
||||
const constBlock operator()(Index block) const { return range(block, block+1); }
|
||||
const constBlock operator()(DenseIndex block) const { return range(block, block+1); }
|
||||
|
||||
/** access ranges of blocks at a time */
|
||||
Block range(Index startBlock, Index endBlock) {
|
||||
Block range(DenseIndex startBlock, DenseIndex endBlock) {
|
||||
assertInvariants();
|
||||
Index actualStartBlock = startBlock + blockStart_;
|
||||
Index actualEndBlock = endBlock + blockStart_;
|
||||
DenseIndex actualStartBlock = startBlock + blockStart_;
|
||||
DenseIndex actualEndBlock = endBlock + blockStart_;
|
||||
if(startBlock != 0 || endBlock != 0) {
|
||||
checkBlock(actualStartBlock);
|
||||
assert(actualEndBlock < (DenseIndex)variableColOffsets_.size());
|
||||
}
|
||||
const Index startCol = variableColOffsets_[actualStartBlock];
|
||||
const Index rangeCols = variableColOffsets_[actualEndBlock] - startCol;
|
||||
const DenseIndex startCol = variableColOffsets_[actualStartBlock];
|
||||
const DenseIndex rangeCols = variableColOffsets_[actualEndBlock] - startCol;
|
||||
return matrix_.block(rowStart_, startCol, this->rows(), rangeCols);
|
||||
}
|
||||
|
||||
const constBlock range(Index startBlock, Index endBlock) const {
|
||||
const constBlock range(DenseIndex startBlock, DenseIndex endBlock) const {
|
||||
assertInvariants();
|
||||
Index actualStartBlock = startBlock + blockStart_;
|
||||
Index actualEndBlock = endBlock + blockStart_;
|
||||
DenseIndex actualStartBlock = startBlock + blockStart_;
|
||||
DenseIndex actualEndBlock = endBlock + blockStart_;
|
||||
if(startBlock != 0 || endBlock != 0) {
|
||||
checkBlock(actualStartBlock);
|
||||
assert(actualEndBlock < (DenseIndex)variableColOffsets_.size());
|
||||
}
|
||||
const Index startCol = variableColOffsets_[actualStartBlock];
|
||||
const Index rangeCols = variableColOffsets_[actualEndBlock] - startCol;
|
||||
const DenseIndex startCol = variableColOffsets_[actualStartBlock];
|
||||
const DenseIndex rangeCols = variableColOffsets_[actualEndBlock] - startCol;
|
||||
return ((const Matrix&)matrix_).block(rowStart_, startCol, this->rows(), rangeCols);
|
||||
}
|
||||
|
||||
|
@ -148,30 +142,30 @@ namespace gtsam {
|
|||
/** Return the full matrix, *not* including any portions excluded by rowStart(), rowEnd(), and firstBlock() */
|
||||
const constBlock full() const { return range(0, nBlocks()); }
|
||||
|
||||
Index offset(Index block) const {
|
||||
DenseIndex offset(DenseIndex block) const {
|
||||
assertInvariants();
|
||||
Index actualBlock = block + blockStart_;
|
||||
DenseIndex actualBlock = block + blockStart_;
|
||||
checkBlock(actualBlock);
|
||||
return variableColOffsets_[actualBlock];
|
||||
}
|
||||
|
||||
/** Get or set the apparent first row of the underlying matrix for all operations */
|
||||
Index& rowStart() { return rowStart_; }
|
||||
DenseIndex& rowStart() { return rowStart_; }
|
||||
|
||||
/** Get or set the apparent last row (exclusive, i.e. rows() == rowEnd() - rowStart()) of the underlying matrix for all operations */
|
||||
Index& rowEnd() { return rowEnd_; }
|
||||
DenseIndex& rowEnd() { return rowEnd_; }
|
||||
|
||||
/** Get or set the apparent first block for all operations */
|
||||
Index& firstBlock() { return blockStart_; }
|
||||
DenseIndex& firstBlock() { return blockStart_; }
|
||||
|
||||
/** Get the apparent first row of the underlying matrix for all operations */
|
||||
Index rowStart() const { return rowStart_; }
|
||||
DenseIndex rowStart() const { return rowStart_; }
|
||||
|
||||
/** Get the apparent last row (exclusive, i.e. rows() == rowEnd() - rowStart()) of the underlying matrix for all operations */
|
||||
Index rowEnd() const { return rowEnd_; }
|
||||
DenseIndex rowEnd() const { return rowEnd_; }
|
||||
|
||||
/** Get the apparent first block for all operations */
|
||||
Index firstBlock() const { return blockStart_; }
|
||||
DenseIndex firstBlock() const { return blockStart_; }
|
||||
|
||||
/** Access to full matrix (*including* any portions excluded by rowStart(), rowEnd(), and firstBlock()) */
|
||||
const Matrix& matrix() const { return matrix_; }
|
||||
|
@ -179,14 +173,11 @@ namespace gtsam {
|
|||
/** Non-const access to full matrix (*including* any portions excluded by rowStart(), rowEnd(), and firstBlock()) */
|
||||
Matrix& matrix() { return matrix_; }
|
||||
|
||||
/**
|
||||
* Copy the block structure and resize the underlying matrix, but do not
|
||||
* copy the matrix data. If blockStart(), rowStart(), and/or rowEnd() have
|
||||
* been modified, this copies the structure of the corresponding matrix view.
|
||||
* In the destination VerticalBlockView, blockStart() and rowStart() will
|
||||
* thus be 0, rowEnd() will be cols() of the source VerticalBlockView, and
|
||||
* the underlying matrix will be the size of the view of the source matrix.
|
||||
*/
|
||||
/** Copy the block structure and resize the underlying matrix, but do not copy the matrix data.
|
||||
* If blockStart(), rowStart(), and/or rowEnd() have been modified, this copies the structure of
|
||||
* the corresponding matrix view. In the destination VerticalBlockView, blockStart() and
|
||||
* rowStart() will thus be 0, rowEnd() will be cols() of the source VerticalBlockView, and the
|
||||
* underlying matrix will be the size of the view of the source matrix. */
|
||||
static VerticalBlockMatrix LikeActiveViewOf(const VerticalBlockMatrix& rhs);
|
||||
|
||||
protected:
|
||||
|
@ -208,14 +199,14 @@ namespace gtsam {
|
|||
void fillOffsets(ITERATOR firstBlockDim, ITERATOR lastBlockDim) {
|
||||
variableColOffsets_.resize((lastBlockDim-firstBlockDim)+1);
|
||||
variableColOffsets_[0] = 0;
|
||||
Index j=0;
|
||||
DenseIndex j=0;
|
||||
for(ITERATOR dim=firstBlockDim; dim!=lastBlockDim; ++dim) {
|
||||
variableColOffsets_[j+1] = variableColOffsets_[j] + *dim;
|
||||
++ j;
|
||||
}
|
||||
}
|
||||
|
||||
//friend class SymmetricBlockView;
|
||||
friend class SymmetricBlockMatrix;
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
|
|
|
@ -67,7 +67,7 @@ namespace gtsam {
|
|||
* \f$ \frac{1}{2} \Vert Ax-b \Vert^2 \f$. See also
|
||||
* GaussianFactorGraph::jacobian and GaussianFactorGraph::sparseJacobian.
|
||||
*/
|
||||
virtual Matrix augmentedJacobian(bool weight = true) const = 0;
|
||||
virtual Matrix augmentedJacobian() const = 0;
|
||||
|
||||
/**
|
||||
* Return the dense Jacobian \f$ A \f$ and right-hand-side \f$ b \f$,
|
||||
|
@ -76,7 +76,7 @@ namespace gtsam {
|
|||
* GaussianFactorGraph::augmentedJacobian and
|
||||
* GaussianFactorGraph::sparseJacobian.
|
||||
*/
|
||||
virtual std::pair<Matrix,Vector> jacobian(bool weight = true) const = 0;
|
||||
virtual std::pair<Matrix,Vector> jacobian() const = 0;
|
||||
|
||||
/** Return the augmented information matrix represented by this GaussianFactor.
|
||||
* The augmented information matrix contains the information matrix with an
|
||||
|
@ -104,7 +104,7 @@ namespace gtsam {
|
|||
* stored stored in this factor.
|
||||
* @return a HessianFactor with negated Hessian matrices
|
||||
*/
|
||||
//virtual GaussianFactor::shared_ptr negate() const = 0;
|
||||
virtual GaussianFactor::shared_ptr negate() const = 0;
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
|
|
|
@ -0,0 +1,539 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file HessianFactor.cpp
|
||||
* @author Richard Roberts
|
||||
* @date Dec 8, 2010
|
||||
*/
|
||||
|
||||
#include <gtsam/base/debug.h>
|
||||
#include <gtsam/base/timing.h>
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/FastMap.h>
|
||||
#include <gtsam/base/cholesky.h>
|
||||
#include <gtsam/linear/linearExceptions.h>
|
||||
#include <gtsam/linear/GaussianConditional.h>
|
||||
#include <gtsam/linear/GaussianFactor.h>
|
||||
#include <gtsam/linear/HessianFactor.h>
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
|
||||
#include <boost/foreach.hpp>
|
||||
#include <boost/format.hpp>
|
||||
#include <boost/make_shared.hpp>
|
||||
#include <boost/tuple/tuple.hpp>
|
||||
#ifdef __GNUC__
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wunused-variable"
|
||||
#endif
|
||||
#include <boost/bind.hpp>
|
||||
#ifdef __GNUC__
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
#include <boost/assign/list_of.hpp>
|
||||
#include <boost/range/adaptor/transformed.hpp>
|
||||
#include <boost/range/join.hpp>
|
||||
|
||||
#include <sstream>
|
||||
|
||||
using namespace std;
|
||||
using namespace boost::assign;
|
||||
using boost::adaptors::transformed;
|
||||
namespace br = boost::range;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
string SlotEntry::toString() const {
|
||||
ostringstream oss;
|
||||
oss << "SlotEntry: slot=" << slot << ", dim=" << dimension;
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Scatter::Scatter(const GaussianFactorGraph& gfg) {
|
||||
// First do the set union.
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, gfg) {
|
||||
if(factor) {
|
||||
for(GaussianFactor::const_iterator variable = factor->begin(); variable != factor->end(); ++variable) {
|
||||
this->insert(make_pair(*variable, SlotEntry(0, factor->getDim(variable))));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Next fill in the slot indices (we can only get these after doing the set
|
||||
// union.
|
||||
size_t slot = 0;
|
||||
BOOST_FOREACH(value_type& var_slot, *this) {
|
||||
var_slot.second.slot = (slot ++);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
HessianFactor::HessianFactor() :
|
||||
info_(cref_list_of<1>(1))
|
||||
{
|
||||
linearTerm().setZero();
|
||||
constantTerm() = 0.0;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
HessianFactor::HessianFactor(Key j, const Matrix& G, const Vector& g, double f) :
|
||||
GaussianFactor(cref_list_of<1>(j)), info_(cref_list_of<2>(G.cols())(1))
|
||||
{
|
||||
if(G.rows() != G.cols() || G.rows() != g.size()) throw invalid_argument(
|
||||
"Attempting to construct HessianFactor with inconsistent matrix and/or vector dimensions");
|
||||
info_(0,0) = G;
|
||||
info_(0,1) = g;
|
||||
info_(1,1)(0,0) = f;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// error is 0.5*(x-mu)'*inv(Sigma)*(x-mu) = 0.5*(x'*G*x - 2*x'*G*mu + mu'*G*mu)
|
||||
// where G = inv(Sigma), g = G*mu, f = mu'*G*mu = mu'*g
|
||||
HessianFactor::HessianFactor(Key j, const Vector& mu, const Matrix& Sigma) :
|
||||
GaussianFactor(cref_list_of<1>(j)),
|
||||
info_(cref_list_of<2> (Sigma.cols()) (1) )
|
||||
{
|
||||
if (Sigma.rows() != Sigma.cols() || Sigma.rows() != mu.size()) throw invalid_argument(
|
||||
"Attempting to construct HessianFactor with inconsistent matrix and/or vector dimensions");
|
||||
info_(0,0) = Sigma.inverse(); // G
|
||||
info_(0,1) = info_(0,0) * mu; // g
|
||||
info_(1,1)(0,0) = mu.dot(info_(0,1).col(0)); // f
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
HessianFactor::HessianFactor(Key j1, Key j2,
|
||||
const Matrix& G11, const Matrix& G12, const Vector& g1,
|
||||
const Matrix& G22, const Vector& g2, double f) :
|
||||
GaussianFactor(cref_list_of<2>(j1)(j2)),
|
||||
info_(cref_list_of<3> (G11.cols()) (G22.cols()) (1) )
|
||||
{
|
||||
info_(0,0) = G11;
|
||||
info_(0,1) = G12;
|
||||
info_(0,2) = g1;
|
||||
info_(1,1) = G22;
|
||||
info_(1,2) = g2;
|
||||
info_(2,2)(0,0) = f;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
HessianFactor::HessianFactor(Key j1, Key j2, Key j3,
|
||||
const Matrix& G11, const Matrix& G12, const Matrix& G13, const Vector& g1,
|
||||
const Matrix& G22, const Matrix& G23, const Vector& g2,
|
||||
const Matrix& G33, const Vector& g3, double f) :
|
||||
GaussianFactor(cref_list_of<3>(j1)(j2)(j3)),
|
||||
info_(cref_list_of<4> (G11.cols()) (G22.cols()) (G33.cols()) (1) )
|
||||
{
|
||||
if(G11.rows() != G11.cols() || G11.rows() != G12.rows() || G11.rows() != G13.rows() || G11.rows() != g1.size() ||
|
||||
G22.cols() != G12.cols() || G33.cols() != G13.cols() || G22.cols() != g2.size() || G33.cols() != g3.size())
|
||||
throw invalid_argument("Inconsistent matrix and/or vector dimensions in HessianFactor constructor");
|
||||
info_(0,0) = G11;
|
||||
info_(0,1) = G12;
|
||||
info_(0,2) = G13;
|
||||
info_(0,3) = g1;
|
||||
info_(1,1) = G22;
|
||||
info_(1,2) = G23;
|
||||
info_(1,3) = g2;
|
||||
info_(2,2) = G33;
|
||||
info_(2,3) = g3;
|
||||
info_(3,3)(0,0) = f;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
namespace { DenseIndex _getColsHF(const Matrix& m) { return m.cols(); } }
|
||||
|
||||
/* ************************************************************************* */
|
||||
HessianFactor::HessianFactor(const std::vector<Key>& js, const std::vector<Matrix>& Gs,
|
||||
const std::vector<Vector>& gs, double f) :
|
||||
GaussianFactor(js), info_(br::join(Gs | transformed(&_getColsHF), cref_list_of<1,DenseIndex>(1)))
|
||||
{
|
||||
// Get the number of variables
|
||||
size_t variable_count = js.size();
|
||||
|
||||
// Verify the provided number of entries in the vectors are consistent
|
||||
if(gs.size() != variable_count || Gs.size() != (variable_count*(variable_count+1))/2)
|
||||
throw invalid_argument("Inconsistent number of entries between js, Gs, and gs in HessianFactor constructor.\nThe number of keys provided \
|
||||
in js must match the number of linear vector pieces in gs. The number of upper-diagonal blocks in Gs must be n*(n+1)/2");
|
||||
|
||||
// Verify the dimensions of each provided matrix are consistent
|
||||
// Note: equations for calculating the indices derived from the "sum of an arithmetic sequence" formula
|
||||
for(size_t i = 0; i < variable_count; ++i){
|
||||
DenseIndex block_size = gs[i].size();
|
||||
// Check rows
|
||||
for(size_t j = 0; j < variable_count-i; ++j){
|
||||
size_t index = i*(2*variable_count - i + 1)/2 + j;
|
||||
if(Gs[index].rows() != block_size){
|
||||
throw invalid_argument("Inconsistent matrix and/or vector dimensions in HessianFactor constructor");
|
||||
}
|
||||
}
|
||||
// Check cols
|
||||
for(size_t j = 0; j <= i; ++j){
|
||||
size_t index = j*(2*variable_count - j + 1)/2 + (i-j);
|
||||
if(Gs[index].cols() != block_size){
|
||||
throw invalid_argument("Inconsistent matrix and/or vector dimensions in HessianFactor constructor");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fill in the blocks
|
||||
size_t index = 0;
|
||||
for(size_t i = 0; i < variable_count; ++i){
|
||||
for(size_t j = i; j < variable_count; ++j){
|
||||
info_(i, j) = Gs[index++];
|
||||
}
|
||||
info_(i, variable_count) = gs[i];
|
||||
}
|
||||
info_(variable_count, variable_count)(0,0) = f;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
HessianFactor::HessianFactor(const JacobianFactor& jf)
|
||||
{
|
||||
throw std::runtime_error("Not implemented");
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
HessianFactor::HessianFactor(const GaussianFactor& gf)
|
||||
{
|
||||
throw std::runtime_error("Not implemented");
|
||||
//// Copy the variable indices
|
||||
//(GaussianFactorOrdered&)(*this) = gf;
|
||||
//// Copy the matrix data depending on what type of factor we're copying from
|
||||
//if(dynamic_cast<const JacobianFactorOrdered*>(&gf)) {
|
||||
// const JacobianFactorOrdered& jf(static_cast<const JacobianFactorOrdered&>(gf));
|
||||
// if(jf.model_->isConstrained())
|
||||
// throw invalid_argument("Cannot construct HessianFactor from JacobianFactor with constrained noise model");
|
||||
// else {
|
||||
// Vector invsigmas = jf.model_->invsigmas().cwiseProduct(jf.model_->invsigmas());
|
||||
// info_.copyStructureFrom(jf.Ab_);
|
||||
// BlockInfo::constBlock A = jf.Ab_.full();
|
||||
// matrix_.noalias() = A.transpose() * invsigmas.asDiagonal() * A;
|
||||
// }
|
||||
//} else if(dynamic_cast<const HessianFactorOrdered*>(&gf)) {
|
||||
// const HessianFactorOrdered& hf(static_cast<const HessianFactorOrdered&>(gf));
|
||||
// info_.assignNoalias(hf.info_);
|
||||
//} else
|
||||
// throw std::invalid_argument("In HessianFactor(const GaussianFactor& gf), gf is neither a JacobianFactor nor a HessianFactor");
|
||||
//assertInvariants();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
HessianFactor::HessianFactor(const GaussianFactorGraph& factors)
|
||||
{
|
||||
throw std::runtime_error("Not implemented");
|
||||
|
||||
//Scatter scatter(factors);
|
||||
|
||||
//// Pull out keys and dimensions
|
||||
//gttic(keys);
|
||||
//vector<size_t> dimensions(scatter.size() + 1);
|
||||
//BOOST_FOREACH(const Scatter::value_type& var_slot, scatter) {
|
||||
// dimensions[var_slot.second.slot] = var_slot.second.dimension;
|
||||
//}
|
||||
//// This is for the r.h.s. vector
|
||||
//dimensions.back() = 1;
|
||||
//gttoc(keys);
|
||||
|
||||
//const bool debug = ISDEBUG("EliminateCholesky");
|
||||
//// Form Ab' * Ab
|
||||
//gttic(allocate);
|
||||
//info_.resize(dimensions.begin(), dimensions.end(), false);
|
||||
//// Fill in keys
|
||||
//keys_.resize(scatter.size());
|
||||
//std::transform(scatter.begin(), scatter.end(), keys_.begin(), boost::bind(&Scatter::value_type::first, ::_1));
|
||||
//gttoc(allocate);
|
||||
//gttic(zero);
|
||||
//matrix_.noalias() = Matrix::Zero(matrix_.rows(),matrix_.cols());
|
||||
//gttoc(zero);
|
||||
//gttic(update);
|
||||
//if (debug) cout << "Combining " << factors.size() << " factors" << endl;
|
||||
//BOOST_FOREACH(const GaussianFactorOrdered::shared_ptr& factor, factors)
|
||||
//{
|
||||
// if(factor) {
|
||||
// if(shared_ptr hessian = boost::dynamic_pointer_cast<HessianFactorOrdered>(factor))
|
||||
// updateATA(*hessian, scatter);
|
||||
// else if(JacobianFactorOrdered::shared_ptr jacobianFactor = boost::dynamic_pointer_cast<JacobianFactorOrdered>(factor))
|
||||
// updateATA(*jacobianFactor, scatter);
|
||||
// else
|
||||
// throw invalid_argument("GaussianFactor is neither Hessian nor Jacobian");
|
||||
// }
|
||||
//}
|
||||
//gttoc(update);
|
||||
|
||||
//if (debug) gtsam::print(matrix_, "Ab' * Ab: ");
|
||||
|
||||
//assertInvariants();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void HessianFactor::print(const std::string& s, const KeyFormatter& formatter) const {
|
||||
cout << s << "\n";
|
||||
cout << " keys: ";
|
||||
for(const_iterator key=this->begin(); key!=this->end(); ++key)
|
||||
cout << formatter(*key) << "(" << this->getDim(key) << ") ";
|
||||
cout << "\n";
|
||||
gtsam::print(Matrix(info_.range(0,info_.nBlocks(), 0,info_.nBlocks()).selfadjointView<Eigen::Upper>()), "Ab^T * Ab: ");
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
bool HessianFactor::equals(const GaussianFactor& lf, double tol) const {
|
||||
if(!dynamic_cast<const HessianFactor*>(&lf))
|
||||
return false;
|
||||
else {
|
||||
Matrix thisMatrix = this->info_.full().selfadjointView<Eigen::Upper>();
|
||||
thisMatrix(thisMatrix.rows()-1, thisMatrix.cols()-1) = 0.0;
|
||||
Matrix rhsMatrix = static_cast<const HessianFactor&>(lf).info_.full().selfadjointView<Eigen::Upper>();
|
||||
rhsMatrix(rhsMatrix.rows()-1, rhsMatrix.cols()-1) = 0.0;
|
||||
return equal_with_abs_tol(thisMatrix, rhsMatrix, tol);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix HessianFactor::augmentedInformation() const {
|
||||
return info_.full().selfadjointView<Eigen::Upper>();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix HessianFactor::information() const {
|
||||
return info_.range(0, this->size(), 0, this->size()).selfadjointView<Eigen::Upper>();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix HessianFactor::augmentedJacobian() const
|
||||
{
|
||||
throw std::runtime_error("Not implemented");
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::pair<Matrix, Vector> HessianFactor::jacobian() const
|
||||
{
|
||||
throw std::runtime_error("Not implemented");
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double HessianFactor::error(const VectorValues& c) const {
|
||||
// error 0.5*(f - 2*x'*g + x'*G*x)
|
||||
const double f = constantTerm();
|
||||
double xtg = 0, xGx = 0;
|
||||
// extract the relevant subset of the VectorValues
|
||||
// NOTE may not be as efficient
|
||||
const Vector x = c.vector(this->keys());
|
||||
xtg = x.dot(linearTerm());
|
||||
xGx = x.transpose() * info_.range(0, this->size(), 0, this->size()).selfadjointView<Eigen::Upper>() * x;
|
||||
return 0.5 * (f - 2.0 * xtg + xGx);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void HessianFactor::updateATA(const HessianFactor& update, const Scatter& scatter)
|
||||
{
|
||||
|
||||
throw std::runtime_error("Not implemented");
|
||||
|
||||
//// This function updates 'combined' with the information in 'update'.
|
||||
//// 'scatter' maps variables in the update factor to slots in the combined
|
||||
//// factor.
|
||||
|
||||
//const bool debug = ISDEBUG("updateATA");
|
||||
|
||||
//// First build an array of slots
|
||||
//gttic(slots);
|
||||
//size_t* slots = (size_t*)alloca(sizeof(size_t)*update.size()); // FIXME: alloca is bad, just ask Google.
|
||||
//size_t slot = 0;
|
||||
//BOOST_FOREACH(Index j, update) {
|
||||
// slots[slot] = scatter.find(j)->second.slot;
|
||||
// ++ slot;
|
||||
//}
|
||||
//gttoc(slots);
|
||||
|
||||
//if(debug) {
|
||||
// this->print("Updating this: ");
|
||||
// update.print("with (Hessian): ");
|
||||
//}
|
||||
|
||||
//// Apply updates to the upper triangle
|
||||
//gttic(update);
|
||||
//for(size_t j2=0; j2<update.info_.nBlocks(); ++j2) {
|
||||
// size_t slot2 = (j2 == update.size()) ? this->info_.nBlocks()-1 : slots[j2];
|
||||
// for(size_t j1=0; j1<=j2; ++j1) {
|
||||
// size_t slot1 = (j1 == update.size()) ? this->info_.nBlocks()-1 : slots[j1];
|
||||
// if(slot2 > slot1) {
|
||||
// if(debug)
|
||||
// cout << "Updating (" << slot1 << "," << slot2 << ") from (" << j1 << "," << j2 << ")" << endl;
|
||||
// matrix_.block(info_.offset(slot1), info_.offset(slot2), info_(slot1,slot2).rows(), info_(slot1,slot2).cols()).noalias() +=
|
||||
// update.matrix_.block(update.info_.offset(j1), update.info_.offset(j2), update.info_(j1,j2).rows(), update.info_(j1,j2).cols());
|
||||
// } else if(slot1 > slot2) {
|
||||
// if(debug)
|
||||
// cout << "Updating (" << slot2 << "," << slot1 << ") from (" << j1 << "," << j2 << ")" << endl;
|
||||
// matrix_.block(info_.offset(slot2), info_.offset(slot1), info_(slot2,slot1).rows(), info_(slot2,slot1).cols()).noalias() +=
|
||||
// update.matrix_.block(update.info_.offset(j1), update.info_.offset(j2), update.info_(j1,j2).rows(), update.info_(j1,j2).cols()).transpose();
|
||||
// } else {
|
||||
// if(debug)
|
||||
// cout << "Updating (" << slot1 << "," << slot2 << ") from (" << j1 << "," << j2 << ")" << endl;
|
||||
// matrix_.block(info_.offset(slot1), info_.offset(slot2), info_(slot1,slot2).rows(), info_(slot1,slot2).cols()).triangularView<Eigen::Upper>() +=
|
||||
// update.matrix_.block(update.info_.offset(j1), update.info_.offset(j2), update.info_(j1,j2).rows(), update.info_(j1,j2).cols());
|
||||
// }
|
||||
// if(debug) cout << "Updating block " << slot1 << "," << slot2 << " from block " << j1 << "," << j2 << "\n";
|
||||
// if(debug) this->print();
|
||||
// }
|
||||
//}
|
||||
//gttoc(update);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void HessianFactor::updateATA(const JacobianFactor& update, const Scatter& scatter)
|
||||
{
|
||||
throw std::runtime_error("Not implemented");
|
||||
|
||||
//// This function updates 'combined' with the information in 'update'.
|
||||
//// 'scatter' maps variables in the update factor to slots in the combined
|
||||
//// factor.
|
||||
|
||||
//const bool debug = ISDEBUG("updateATA");
|
||||
|
||||
//// First build an array of slots
|
||||
//gttic(slots);
|
||||
//size_t* slots = (size_t*)alloca(sizeof(size_t)*update.size()); // FIXME: alloca is bad, just ask Google.
|
||||
//size_t slot = 0;
|
||||
//BOOST_FOREACH(Index j, update) {
|
||||
// slots[slot] = scatter.find(j)->second.slot;
|
||||
// ++ slot;
|
||||
//}
|
||||
//gttoc(slots);
|
||||
|
||||
//gttic(form_ATA);
|
||||
//if(update.model_->isConstrained())
|
||||
// throw invalid_argument("Cannot update HessianFactor from JacobianFactor with constrained noise model");
|
||||
|
||||
//if(debug) {
|
||||
// this->print("Updating this: ");
|
||||
// update.print("with (Jacobian): ");
|
||||
//}
|
||||
|
||||
//typedef Eigen::Block<const JacobianFactorOrdered::AbMatrix> BlockUpdateMatrix;
|
||||
//BlockUpdateMatrix updateA(update.matrix_.block(
|
||||
// update.Ab_.rowStart(),update.Ab_.offset(0), update.Ab_.full().rows(), update.Ab_.full().cols()));
|
||||
//if (debug) cout << "updateA: \n" << updateA << endl;
|
||||
|
||||
//Matrix updateInform;
|
||||
//if(boost::dynamic_pointer_cast<noiseModel::Unit>(update.model_)) {
|
||||
// updateInform.noalias() = updateA.transpose() * updateA;
|
||||
//} else {
|
||||
// noiseModel::Diagonal::shared_ptr diagonal(boost::dynamic_pointer_cast<noiseModel::Diagonal>(update.model_));
|
||||
// if(diagonal) {
|
||||
// Vector invsigmas2 = update.model_->invsigmas().cwiseProduct(update.model_->invsigmas());
|
||||
// updateInform.noalias() = updateA.transpose() * invsigmas2.asDiagonal() * updateA;
|
||||
// } else
|
||||
// throw invalid_argument("In HessianFactor::updateATA, JacobianFactor noise model is neither Unit nor Diagonal");
|
||||
//}
|
||||
//if (debug) cout << "updateInform: \n" << updateInform << endl;
|
||||
// gttoc(form_ATA);
|
||||
|
||||
//// Apply updates to the upper triangle
|
||||
//gttic(update);
|
||||
//for(size_t j2=0; j2<update.Ab_.nBlocks(); ++j2) {
|
||||
// size_t slot2 = (j2 == update.size()) ? this->info_.nBlocks()-1 : slots[j2];
|
||||
// for(size_t j1=0; j1<=j2; ++j1) {
|
||||
// size_t slot1 = (j1 == update.size()) ? this->info_.nBlocks()-1 : slots[j1];
|
||||
// size_t off0 = update.Ab_.offset(0);
|
||||
// if(slot2 > slot1) {
|
||||
// if(debug)
|
||||
// cout << "Updating (" << slot1 << "," << slot2 << ") from (" << j1 << "," << j2 << ")" << endl;
|
||||
// matrix_.block(info_.offset(slot1), info_.offset(slot2), info_(slot1,slot2).rows(), info_(slot1,slot2).cols()).noalias() +=
|
||||
// updateInform.block(update.Ab_.offset(j1)-off0, update.Ab_.offset(j2)-off0, update.Ab_(j1).cols(), update.Ab_(j2).cols());
|
||||
// } else if(slot1 > slot2) {
|
||||
// if(debug)
|
||||
// cout << "Updating (" << slot2 << "," << slot1 << ") from (" << j1 << "," << j2 << ")" << endl;
|
||||
// matrix_.block(info_.offset(slot2), info_.offset(slot1), info_(slot2,slot1).rows(), info_(slot2,slot1).cols()).noalias() +=
|
||||
// updateInform.block(update.Ab_.offset(j1)-off0, update.Ab_.offset(j2)-off0, update.Ab_(j1).cols(), update.Ab_(j2).cols()).transpose();
|
||||
// } else {
|
||||
// if(debug)
|
||||
// cout << "Updating (" << slot1 << "," << slot2 << ") from (" << j1 << "," << j2 << ")" << endl;
|
||||
// matrix_.block(info_.offset(slot1), info_.offset(slot2), info_(slot1,slot2).rows(), info_(slot1,slot2).cols()).triangularView<Eigen::Upper>() +=
|
||||
// updateInform.block(update.Ab_.offset(j1)-off0, update.Ab_.offset(j2)-off0, update.Ab_(j1).cols(), update.Ab_(j2).cols());
|
||||
// }
|
||||
// if(debug) cout << "Updating block " << slot1 << "," << slot2 << " from block " << j1 << "," << j2 << "\n";
|
||||
// if(debug) this->print();
|
||||
// }
|
||||
//}
|
||||
//gttoc(update);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void HessianFactor::partialCholesky(size_t nrFrontals)
|
||||
{
|
||||
throw std::runtime_error("Not implemented");
|
||||
|
||||
//if(!choleskyPartial(matrix_, info_.offset(nrFrontals)))
|
||||
// throw IndeterminantLinearSystemException(this->keys().front());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
GaussianConditional::shared_ptr HessianFactor::splitEliminatedFactor(size_t nrFrontals)
|
||||
{
|
||||
throw std::runtime_error("Not implemented");
|
||||
|
||||
//static const bool debug = false;
|
||||
|
||||
//// Extract conditionals
|
||||
//gttic(extract_conditionals);
|
||||
//GaussianConditionalOrdered::shared_ptr conditional(new GaussianConditionalOrdered());
|
||||
//typedef VerticalBlockView<Matrix> BlockAb;
|
||||
//BlockAb Ab(matrix_, info_);
|
||||
|
||||
//size_t varDim = info_.offset(nrFrontals);
|
||||
//Ab.rowEnd() = Ab.rowStart() + varDim;
|
||||
|
||||
//// Create one big conditionals with many frontal variables.
|
||||
//gttic(construct_cond);
|
||||
//Vector sigmas = Vector::Ones(varDim);
|
||||
//conditional = boost::make_shared<ConditionalType>(keys_.begin(), keys_.end(), nrFrontals, Ab, sigmas);
|
||||
//gttoc(construct_cond);
|
||||
//if(debug) conditional->print("Extracted conditional: ");
|
||||
|
||||
//gttoc(extract_conditionals);
|
||||
|
||||
//// Take lower-right block of Ab_ to get the new factor
|
||||
//gttic(remaining_factor);
|
||||
//info_.blockStart() = nrFrontals;
|
||||
//// Assign the keys
|
||||
//vector<Index> remainingKeys(keys_.size() - nrFrontals);
|
||||
//remainingKeys.assign(keys_.begin() + nrFrontals, keys_.end());
|
||||
//keys_.swap(remainingKeys);
|
||||
//gttoc(remaining_factor);
|
||||
|
||||
//return conditional;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
GaussianFactor::shared_ptr HessianFactor::negate() const
|
||||
{
|
||||
throw std::runtime_error("Not implemented");
|
||||
|
||||
//// Copy Hessian Blocks from Hessian factor and invert
|
||||
//std::vector<Index> js;
|
||||
//std::vector<Matrix> Gs;
|
||||
//std::vector<Vector> gs;
|
||||
//double f;
|
||||
//js.insert(js.end(), begin(), end());
|
||||
//for(size_t i = 0; i < js.size(); ++i){
|
||||
// for(size_t j = i; j < js.size(); ++j){
|
||||
// Gs.push_back( -info(begin()+i, begin()+j) );
|
||||
// }
|
||||
// gs.push_back( -linearTerm(begin()+i) );
|
||||
//}
|
||||
//f = -constantTerm();
|
||||
|
||||
//// Create the Anti-Hessian Factor from the negated blocks
|
||||
//return HessianFactorOrdered::shared_ptr(new HessianFactorOrdered(js, Gs, gs, f));
|
||||
}
|
||||
|
||||
} // gtsam
|
|
@ -0,0 +1,402 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file HessianFactor.h
|
||||
* @brief Contains the HessianFactor class, a general quadratic factor
|
||||
* @author Richard Roberts
|
||||
* @date Dec 8, 2010
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/SymmetricBlockMatrix.h>
|
||||
#include <gtsam/linear/GaussianFactor.h>
|
||||
|
||||
#include <vector>
|
||||
|
||||
// Forward declarations for friend unit tests
|
||||
class HessianFactorConversionConstructorTest;
|
||||
class HessianFactorConstructor1Test;
|
||||
class HessianFactorConstructor1bTest;
|
||||
class HessianFactorcombineTest;
|
||||
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
// Forward declarations
|
||||
class JacobianFactor;
|
||||
class GaussianConditional;
|
||||
class GaussianBayesNet;
|
||||
|
||||
// Definition of Scatter, which is an intermediate data structure used when
|
||||
// building a HessianFactor incrementally, to get the keys in the right
|
||||
// order.
|
||||
// The "scatter" is a map from global variable indices to slot indices in the
|
||||
// union of involved variables. We also include the dimensionality of the
|
||||
// variable.
|
||||
struct GTSAM_EXPORT SlotEntry {
|
||||
size_t slot;
|
||||
size_t dimension;
|
||||
SlotEntry(size_t _slot, size_t _dimension)
|
||||
: slot(_slot), dimension(_dimension) {}
|
||||
std::string toString() const;
|
||||
};
|
||||
class Scatter : public FastMap<Index, SlotEntry> {
|
||||
public:
|
||||
Scatter() {}
|
||||
Scatter(const GaussianFactorGraph& gfg);
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief A Gaussian factor using the canonical parameters (information form)
|
||||
*
|
||||
* HessianFactor implements a general quadratic factor of the form
|
||||
* \f[ E(x) = 0.5 x^T G x - x^T g + 0.5 f \f]
|
||||
* that stores the matrix \f$ G \f$, the vector \f$ g \f$, and the constant term \f$ f \f$.
|
||||
*
|
||||
* When \f$ G \f$ is positive semidefinite, this factor represents a Gaussian,
|
||||
* in which case \f$ G \f$ is the information matrix \f$ \Lambda \f$,
|
||||
* \f$ g \f$ is the information vector \f$ \eta \f$, and \f$ f \f$ is the residual
|
||||
* sum-square-error at the mean, when \f$ x = \mu \f$.
|
||||
*
|
||||
* Indeed, the negative log-likelihood of a Gaussian is (up to a constant)
|
||||
* @f$ E(x) = 0.5(x-\mu)^T P^{-1} (x-\mu) @f$
|
||||
* with @f$ \mu @f$ the mean and @f$ P @f$ the covariance matrix. Expanding the product we get
|
||||
* @f[
|
||||
* E(x) = 0.5 x^T P^{-1} x - x^T P^{-1} \mu + 0.5 \mu^T P^{-1} \mu
|
||||
* @f]
|
||||
* We define the Information matrix (or Hessian) @f$ \Lambda = P^{-1} @f$
|
||||
* and the information vector @f$ \eta = P^{-1} \mu = \Lambda \mu @f$
|
||||
* to arrive at the canonical form of the Gaussian:
|
||||
* @f[
|
||||
* E(x) = 0.5 x^T \Lambda x - x^T \eta + 0.5 \mu^T \Lambda \mu
|
||||
* @f]
|
||||
*
|
||||
* This factor is one of the factors that can be in a GaussianFactorGraph.
|
||||
* It may be returned from NonlinearFactor::linearize(), but is also
|
||||
* used internally to store the Hessian during Cholesky elimination.
|
||||
*
|
||||
* This can represent a quadratic factor with characteristics that cannot be
|
||||
* represented using a JacobianFactor (which has the form
|
||||
* \f$ E(x) = \Vert Ax - b \Vert^2 \f$ and stores the Jacobian \f$ A \f$
|
||||
* and error vector \f$ b \f$, i.e. is a sum-of-squares factor). For example,
|
||||
* a HessianFactor need not be positive semidefinite, it can be indefinite or
|
||||
* even negative semidefinite.
|
||||
*
|
||||
* If a HessianFactor is indefinite or negative semi-definite, then in order
|
||||
* for solving the linear system to be possible,
|
||||
* the Hessian of the full system must be positive definite (i.e. when all
|
||||
* small Hessians are combined, the result must be positive definite). If
|
||||
* this is not the case, an error will occur during elimination.
|
||||
*
|
||||
* This class stores G, g, and f as an augmented matrix HessianFactor::matrix_.
|
||||
* The upper-left n x n blocks of HessianFactor::matrix_ store the upper-right
|
||||
* triangle of G, the upper-right-most column of length n of HessianFactor::matrix_
|
||||
* stores g, and the lower-right entry of HessianFactor::matrix_ stores f, i.e.
|
||||
* \code
|
||||
HessianFactor::matrix_ = [ G11 G12 G13 ... g1
|
||||
0 G22 G23 ... g2
|
||||
0 0 G33 ... g3
|
||||
: : : :
|
||||
0 0 0 ... f ]
|
||||
\endcode
|
||||
Blocks can be accessed as follows:
|
||||
\code
|
||||
G11 = info(begin(), begin());
|
||||
G12 = info(begin(), begin()+1);
|
||||
G23 = info(begin()+1, begin()+2);
|
||||
g2 = linearTerm(begin()+1);
|
||||
f = constantTerm();
|
||||
.......
|
||||
\endcode
|
||||
*/
|
||||
class GTSAM_EXPORT HessianFactor : public GaussianFactor {
|
||||
protected:
|
||||
|
||||
SymmetricBlockMatrix info_; ///< The full augmented information matrix, s.t. the quadratic error is 0.5*[x -1]'*H*[x -1]
|
||||
|
||||
public:
|
||||
|
||||
typedef GaussianFactor Base; ///< Typedef to base class
|
||||
typedef HessianFactor This; ///< Typedef to this class
|
||||
typedef boost::shared_ptr<This> shared_ptr; ///< A shared_ptr to this class
|
||||
typedef SymmetricBlockMatrix::Block Block; ///< A block from the Hessian matrix
|
||||
typedef SymmetricBlockMatrix::constBlock constBlock; ///< A block from the Hessian matrix (const version)
|
||||
|
||||
/** default constructor for I/O */
|
||||
HessianFactor();
|
||||
|
||||
/** Construct a unary factor. G is the quadratic term (Hessian matrix), g
|
||||
* the linear term (a vector), and f the constant term. The quadratic
|
||||
* error is:
|
||||
* 0.5*(f - 2*x'*g + x'*G*x)
|
||||
*/
|
||||
HessianFactor(Index j, const Matrix& G, const Vector& g, double f);
|
||||
|
||||
/** Construct a unary factor, given a mean and covariance matrix.
|
||||
* error is 0.5*(x-mu)'*inv(Sigma)*(x-mu)
|
||||
*/
|
||||
HessianFactor(Index j, const Vector& mu, const Matrix& Sigma);
|
||||
|
||||
/** Construct a binary factor. Gxx are the upper-triangle blocks of the
|
||||
* quadratic term (the Hessian matrix), gx the pieces of the linear vector
|
||||
* term, and f the constant term.
|
||||
* JacobianFactor error is \f[ 0.5* (Ax-b)' M (Ax-b) = 0.5*x'A'MAx - x'A'Mb + 0.5*b'Mb \f]
|
||||
* HessianFactor error is \f[ 0.5*(x'Gx - 2x'g + f) = 0.5*x'Gx - x'*g + 0.5*f \f]
|
||||
* So, with \f$ A = [A1 A2] \f$ and \f$ G=A*'M*A = [A1';A2']*M*[A1 A2] \f$ we have
|
||||
\code
|
||||
n1*n1 G11 = A1'*M*A1
|
||||
n1*n2 G12 = A1'*M*A2
|
||||
n2*n2 G22 = A2'*M*A2
|
||||
n1*1 g1 = A1'*M*b
|
||||
n2*1 g2 = A2'*M*b
|
||||
1*1 f = b'*M*b
|
||||
\endcode
|
||||
*/
|
||||
HessianFactor(Index j1, Index j2,
|
||||
const Matrix& G11, const Matrix& G12, const Vector& g1,
|
||||
const Matrix& G22, const Vector& g2, double f);
|
||||
|
||||
/** Construct a ternary factor. Gxx are the upper-triangle blocks of the
|
||||
* quadratic term (the Hessian matrix), gx the pieces of the linear vector
|
||||
* term, and f the constant term.
|
||||
*/
|
||||
HessianFactor(Index j1, Index j2, Index j3,
|
||||
const Matrix& G11, const Matrix& G12, const Matrix& G13, const Vector& g1,
|
||||
const Matrix& G22, const Matrix& G23, const Vector& g2,
|
||||
const Matrix& G33, const Vector& g3, double f);
|
||||
|
||||
/** Construct an n-way factor. Gs contains the upper-triangle blocks of the
|
||||
* quadratic term (the Hessian matrix) provided in row-order, gs the pieces
|
||||
* of the linear vector term, and f the constant term.
|
||||
*/
|
||||
HessianFactor(const std::vector<Key>& js, const std::vector<Matrix>& Gs,
|
||||
const std::vector<Vector>& gs, double f);
|
||||
|
||||
/** Construct from a JacobianFactor (or from a GaussianConditional since it derives from it) */
|
||||
explicit HessianFactor(const JacobianFactor& cg);
|
||||
|
||||
/** Attempt to construct from any GaussianFactor - currently supports JacobianFactor,
|
||||
* HessianFactor, GaussianConditional, or any derived classes. */
|
||||
explicit HessianFactor(const GaussianFactor& factor);
|
||||
|
||||
/** Combine a set of factors into a single dense HessianFactor */
|
||||
HessianFactor(const GaussianFactorGraph& factors);
|
||||
|
||||
/** Destructor */
|
||||
virtual ~HessianFactor() {}
|
||||
|
||||
/** Clone this HessianFactor */
|
||||
virtual GaussianFactor::shared_ptr clone() const {
|
||||
return boost::make_shared<HessianFactor>(*this); }
|
||||
|
||||
/** Print the factor for debugging and testing (implementing Testable) */
|
||||
virtual void print(const std::string& s = "",
|
||||
const IndexFormatter& formatter = DefaultKeyFormatter) const;
|
||||
|
||||
/** Compare to another factor for testing (implementing Testable) */
|
||||
virtual bool equals(const GaussianFactor& lf, double tol = 1e-9) const;
|
||||
|
||||
/** Evaluate the factor error f(x), see above. */
|
||||
virtual double error(const VectorValues& c) const; /** 0.5*[x -1]'*H*[x -1] (also see constructor documentation) */
|
||||
|
||||
/** Return the dimension of the variable pointed to by the given key iterator
|
||||
* todo: Remove this in favor of keeping track of dimensions with variables?
|
||||
* @param variable An iterator pointing to the slot in this factor. You can
|
||||
* use, for example, begin() + 2 to get the 3rd variable in this factor.
|
||||
*/
|
||||
virtual size_t getDim(const_iterator variable) const { return info_(variable-this->begin(), 0).rows(); }
|
||||
|
||||
/** Return the number of columns and rows of the Hessian matrix */
|
||||
size_t rows() const { return info_.rows(); }
|
||||
|
||||
/**
|
||||
* Construct the corresponding anti-factor to negate information
|
||||
* stored stored in this factor.
|
||||
* @return a HessianFactor with negated Hessian matrices
|
||||
*/
|
||||
virtual GaussianFactor::shared_ptr negate() const;
|
||||
|
||||
/** Check if the factor is empty. TODO: How should this be defined? */
|
||||
virtual bool empty() const { return size() == 0 /*|| rows() == 0*/; }
|
||||
|
||||
/** Return a view of the block at (j1,j2) of the <em>upper-triangular part</em> of the
|
||||
* information matrix \f$ H \f$, no data is copied. See HessianFactor class documentation
|
||||
* above to explain that only the upper-triangular part of the information matrix is stored
|
||||
* and returned by this function.
|
||||
* @param j1 Which block row to get, as an iterator pointing to the slot in this factor. You can
|
||||
* use, for example, begin() + 2 to get the 3rd variable in this factor.
|
||||
* @param j2 Which block column to get, as an iterator pointing to the slot in this factor. You can
|
||||
* use, for example, begin() + 2 to get the 3rd variable in this factor.
|
||||
* @return A view of the requested block, not a copy.
|
||||
*/
|
||||
constBlock info(const_iterator j1, const_iterator j2) const { return info_(j1-begin(), j2-begin()); }
|
||||
|
||||
/** Return a view of the block at (j1,j2) of the <em>upper-triangular part</em> of the
|
||||
* information matrix \f$ H \f$, no data is copied. See HessianFactor class documentation
|
||||
* above to explain that only the upper-triangular part of the information matrix is stored
|
||||
* and returned by this function.
|
||||
* @param j1 Which block row to get, as an iterator pointing to the slot in this factor. You can
|
||||
* use, for example, begin() + 2 to get the 3rd variable in this factor.
|
||||
* @param j2 Which block column to get, as an iterator pointing to the slot in this factor. You can
|
||||
* use, for example, begin() + 2 to get the 3rd variable in this factor.
|
||||
* @return A view of the requested block, not a copy.
|
||||
*/
|
||||
Block info(iterator j1, iterator j2) { return info_(j1-begin(), j2-begin()); }
|
||||
|
||||
/** Return the <em>upper-triangular part</em> of the full *augmented* information matrix,
|
||||
* as described above. See HessianFactor class documentation above to explain that only the
|
||||
* upper-triangular part of the information matrix is stored and returned by this function.
|
||||
*/
|
||||
constBlock info() const { return info_.full(); }
|
||||
|
||||
/** Return the <em>upper-triangular part</em> of the full *augmented* information matrix,
|
||||
* as described above. See HessianFactor class documentation above to explain that only the
|
||||
* upper-triangular part of the information matrix is stored and returned by this function.
|
||||
*/
|
||||
Block info() { return info_.full(); }
|
||||
|
||||
/** Return the constant term \f$ f \f$ as described above
|
||||
* @return The constant term \f$ f \f$
|
||||
*/
|
||||
double constantTerm() const { return info_(this->size(), this->size())(0,0); }
|
||||
|
||||
/** Return the constant term \f$ f \f$ as described above
|
||||
* @return The constant term \f$ f \f$
|
||||
*/
|
||||
double& constantTerm() { return info_(this->size(), this->size())(0,0); }
|
||||
|
||||
/** Return the part of linear term \f$ g \f$ as described above corresponding to the requested variable.
|
||||
* @param j Which block row to get, as an iterator pointing to the slot in this factor. You can
|
||||
* use, for example, begin() + 2 to get the 3rd variable in this factor.
|
||||
* @return The linear term \f$ g \f$ */
|
||||
constBlock::ColXpr linearTerm(const_iterator j) const { return info_(j-begin(), size()).col(0); }
|
||||
|
||||
/** Return the part of linear term \f$ g \f$ as described above corresponding to the requested variable.
|
||||
* @param j Which block row to get, as an iterator pointing to the slot in this factor. You can
|
||||
* use, for example, begin() + 2 to get the 3rd variable in this factor.
|
||||
* @return The linear term \f$ g \f$ */
|
||||
Block::ColXpr linearTerm(iterator j) { return info_(j-begin(), size()).col(0); }
|
||||
|
||||
/** Return the complete linear term \f$ g \f$ as described above.
|
||||
* @return The linear term \f$ g \f$ */
|
||||
constBlock::ColXpr linearTerm() const { return info_.range(0, this->size(), this->size(), this->size() + 1).col(0); }
|
||||
|
||||
/** Return the complete linear term \f$ g \f$ as described above.
|
||||
* @return The linear term \f$ g \f$ */
|
||||
Block::ColXpr linearTerm() { return info_.range(0, this->size(), this->size(), this->size() + 1).col(0); }
|
||||
|
||||
/** Return the augmented information matrix represented by this GaussianFactor.
|
||||
* The augmented information matrix contains the information matrix with an
|
||||
* additional column holding the information vector, and an additional row
|
||||
* holding the transpose of the information vector. The lower-right entry
|
||||
* contains the constant error term (when \f$ \delta x = 0 \f$). The
|
||||
* augmented information matrix is described in more detail in HessianFactor,
|
||||
* which in fact stores an augmented information matrix.
|
||||
*
|
||||
* For HessianFactor, this is the same as info() except that this function
|
||||
* returns a complete symmetric matrix whereas info() returns a matrix where
|
||||
* only the upper triangle is valid, but should be interpreted as symmetric.
|
||||
* This is because info() returns only a reference to the internal
|
||||
* representation of the augmented information matrix, which stores only the
|
||||
* upper triangle.
|
||||
*/
|
||||
virtual Matrix augmentedInformation() const;
|
||||
|
||||
/** Return the non-augmented information matrix represented by this
|
||||
* GaussianFactor.
|
||||
*/
|
||||
virtual Matrix information() const;
|
||||
|
||||
/**
|
||||
* Return (dense) matrix associated with factor
|
||||
* @param ordering of variables needed for matrix column order
|
||||
* @param set weight to true to bake in the weights
|
||||
*/
|
||||
virtual std::pair<Matrix, Vector> jacobian() const;
|
||||
|
||||
/**
|
||||
* Return (dense) matrix associated with factor
|
||||
* The returned system is an augmented matrix: [A b]
|
||||
* @param set weight to use whitening to bake in weights
|
||||
*/
|
||||
virtual Matrix augmentedJacobian() const;
|
||||
|
||||
// Friend unit test classes
|
||||
friend class ::HessianFactorConversionConstructorTest;
|
||||
friend class ::HessianFactorConstructor1Test;
|
||||
friend class ::HessianFactorConstructor1bTest;
|
||||
friend class ::HessianFactorcombineTest;
|
||||
|
||||
// used in eliminateCholesky:
|
||||
|
||||
/**
|
||||
* Do Cholesky. Note that after this, the lower triangle still contains
|
||||
* some untouched non-zeros that should be zero. We zero them while
|
||||
* extracting submatrices in splitEliminatedFactor. Frank says :-(
|
||||
*/
|
||||
void partialCholesky(size_t nrFrontals);
|
||||
|
||||
/** Update the factor by adding the information from the JacobianFactor
|
||||
* (used internally during elimination).
|
||||
* @param update The JacobianFactor containing the new information to add
|
||||
* @param scatter A mapping from variable index to slot index in this HessianFactor
|
||||
*/
|
||||
void updateATA(const JacobianFactor& update, const Scatter& scatter);
|
||||
|
||||
/** Update the factor by adding the information from the HessianFactor
|
||||
* (used internally during elimination).
|
||||
* @param update The HessianFactor containing the new information to add
|
||||
* @param scatter A mapping from variable index to slot index in this HessianFactor
|
||||
*/
|
||||
void updateATA(const HessianFactor& update, const Scatter& scatter);
|
||||
|
||||
/** assert invariants */
|
||||
void assertInvariants() const;
|
||||
|
||||
/**
|
||||
* Densely partially eliminate with Cholesky factorization. JacobianFactors
|
||||
* are left-multiplied with their transpose to form the Hessian using the
|
||||
* conversion constructor HessianFactor(const JacobianFactor&).
|
||||
*
|
||||
* If any factors contain constrained noise models, this function will fail
|
||||
* because our current implementation cannot handle constrained noise models
|
||||
* in Cholesky factorization. The function EliminatePreferCholesky()
|
||||
* automatically does QR instead when this is the case.
|
||||
*
|
||||
* Variables are eliminated in the natural order of the variable indices of in
|
||||
* the factors.
|
||||
* @param factors Factors to combine and eliminate
|
||||
* @param nrFrontals Number of frontal variables to eliminate.
|
||||
* @return The conditional and remaining factor
|
||||
|
||||
* \addtogroup LinearSolving
|
||||
*/
|
||||
friend GTSAM_EXPORT std::pair<boost::shared_ptr<GaussianConditional>, boost::shared_ptr<GaussianFactor> >
|
||||
EliminateCholeskyOrdered(const GaussianFactorGraph& factors, const Ordering& keys);
|
||||
|
||||
private:
|
||||
|
||||
/** split partially eliminated factor */
|
||||
boost::shared_ptr<GaussianConditional> splitEliminatedFactor(size_t nrFrontals);
|
||||
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
template<class ARCHIVE>
|
||||
void serialize(ARCHIVE & ar, const unsigned int version) {
|
||||
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(GaussianFactor);
|
||||
ar & BOOST_SERIALIZATION_NVP(info_);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
|
@ -47,19 +47,19 @@ using namespace std;
|
|||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
string SlotEntry::toString() const {
|
||||
string SlotEntryOrdered::toString() const {
|
||||
ostringstream oss;
|
||||
oss << "SlotEntry: slot=" << slot << ", dim=" << dimension;
|
||||
oss << "SlotEntryOrdered: slot=" << slot << ", dim=" << dimension;
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Scatter::Scatter(const FactorGraphOrdered<GaussianFactorOrdered>& gfg) {
|
||||
ScatterOrdered::ScatterOrdered(const FactorGraphOrdered<GaussianFactorOrdered>& gfg) {
|
||||
// First do the set union.
|
||||
BOOST_FOREACH(const GaussianFactorOrdered::shared_ptr& factor, gfg) {
|
||||
if(factor) {
|
||||
for(GaussianFactorOrdered::const_iterator variable = factor->begin(); variable != factor->end(); ++variable) {
|
||||
this->insert(make_pair(*variable, SlotEntry(0, factor->getDim(variable))));
|
||||
this->insert(make_pair(*variable, SlotEntryOrdered(0, factor->getDim(variable))));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -270,12 +270,12 @@ HessianFactorOrdered::HessianFactorOrdered(const GaussianFactorOrdered& gf) : in
|
|||
/* ************************************************************************* */
|
||||
HessianFactorOrdered::HessianFactorOrdered(const FactorGraphOrdered<GaussianFactorOrdered>& factors) : info_(matrix_)
|
||||
{
|
||||
Scatter scatter(factors);
|
||||
ScatterOrdered scatter(factors);
|
||||
|
||||
// Pull out keys and dimensions
|
||||
gttic(keys);
|
||||
vector<size_t> dimensions(scatter.size() + 1);
|
||||
BOOST_FOREACH(const Scatter::value_type& var_slot, scatter) {
|
||||
BOOST_FOREACH(const ScatterOrdered::value_type& var_slot, scatter) {
|
||||
dimensions[var_slot.second.slot] = var_slot.second.dimension;
|
||||
}
|
||||
// This is for the r.h.s. vector
|
||||
|
@ -288,7 +288,7 @@ HessianFactorOrdered::HessianFactorOrdered(const FactorGraphOrdered<GaussianFact
|
|||
info_.resize(dimensions.begin(), dimensions.end(), false);
|
||||
// Fill in keys
|
||||
keys_.resize(scatter.size());
|
||||
std::transform(scatter.begin(), scatter.end(), keys_.begin(), boost::bind(&Scatter::value_type::first, ::_1));
|
||||
std::transform(scatter.begin(), scatter.end(), keys_.begin(), boost::bind(&ScatterOrdered::value_type::first, ::_1));
|
||||
gttoc(allocate);
|
||||
gttic(zero);
|
||||
matrix_.noalias() = Matrix::Zero(matrix_.rows(),matrix_.cols());
|
||||
|
@ -367,7 +367,7 @@ double HessianFactorOrdered::error(const VectorValuesOrdered& c) const {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void HessianFactorOrdered::updateATA(const HessianFactorOrdered& update, const Scatter& scatter) {
|
||||
void HessianFactorOrdered::updateATA(const HessianFactorOrdered& update, const ScatterOrdered& scatter) {
|
||||
|
||||
// This function updates 'combined' with the information in 'update'.
|
||||
// 'scatter' maps variables in the update factor to slots in the combined
|
||||
|
@ -420,7 +420,7 @@ void HessianFactorOrdered::updateATA(const HessianFactorOrdered& update, const S
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void HessianFactorOrdered::updateATA(const JacobianFactorOrdered& update, const Scatter& scatter) {
|
||||
void HessianFactorOrdered::updateATA(const JacobianFactorOrdered& update, const ScatterOrdered& scatter) {
|
||||
|
||||
// This function updates 'combined' with the information in 'update'.
|
||||
// 'scatter' maps variables in the update factor to slots in the combined
|
||||
|
|
|
@ -36,23 +36,23 @@ namespace gtsam {
|
|||
class GaussianConditionalOrdered;
|
||||
template<class C> class BayesNetOrdered;
|
||||
|
||||
// Definition of Scatter, which is an intermediate data structure used when
|
||||
// Definition of ScatterOrdered, which is an intermediate data structure used when
|
||||
// building a HessianFactor incrementally, to get the keys in the right
|
||||
// order.
|
||||
// The "scatter" is a map from global variable indices to slot indices in the
|
||||
// union of involved variables. We also include the dimensionality of the
|
||||
// variable.
|
||||
struct GTSAM_EXPORT SlotEntry {
|
||||
struct GTSAM_EXPORT SlotEntryOrdered {
|
||||
size_t slot;
|
||||
size_t dimension;
|
||||
SlotEntry(size_t _slot, size_t _dimension)
|
||||
SlotEntryOrdered(size_t _slot, size_t _dimension)
|
||||
: slot(_slot), dimension(_dimension) {}
|
||||
std::string toString() const;
|
||||
};
|
||||
class Scatter : public FastMap<Index, SlotEntry> {
|
||||
class ScatterOrdered : public FastMap<Index, SlotEntryOrdered> {
|
||||
public:
|
||||
Scatter() {}
|
||||
Scatter(const FactorGraphOrdered<GaussianFactorOrdered>& gfg);
|
||||
ScatterOrdered() {}
|
||||
ScatterOrdered(const FactorGraphOrdered<GaussianFactorOrdered>& gfg);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -350,14 +350,14 @@ namespace gtsam {
|
|||
* @param update The JacobianFactor containing the new information to add
|
||||
* @param scatter A mapping from variable index to slot index in this HessianFactor
|
||||
*/
|
||||
void updateATA(const JacobianFactorOrdered& update, const Scatter& scatter);
|
||||
void updateATA(const JacobianFactorOrdered& update, const ScatterOrdered& scatter);
|
||||
|
||||
/** Update the factor by adding the information from the HessianFactor
|
||||
* (used internally during elimination).
|
||||
* @param update The HessianFactor containing the new information to add
|
||||
* @param scatter A mapping from variable index to slot index in this HessianFactor
|
||||
*/
|
||||
void updateATA(const HessianFactorOrdered& update, const Scatter& scatter);
|
||||
void updateATA(const HessianFactorOrdered& update, const ScatterOrdered& scatter);
|
||||
|
||||
/** assert invariants */
|
||||
void assertInvariants() const;
|
||||
|
|
|
@ -65,7 +65,7 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
namespace {
|
||||
DenseIndex _getCols(const std::pair<Key,Matrix>& p) {
|
||||
DenseIndex _getColsJF(const std::pair<Key,Matrix>& p) {
|
||||
return p.second.cols();
|
||||
}
|
||||
}
|
||||
|
@ -86,9 +86,9 @@ namespace gtsam {
|
|||
// a single '1' to add a dimension for the b vector.
|
||||
{
|
||||
using boost::adaptors::transformed;
|
||||
using boost::join;
|
||||
using boost::assign::cref_list_of;
|
||||
Ab_ = VerticalBlockMatrix(join(terms | transformed(_getCols), cref_list_of<1,DenseIndex>(1)), b.size());
|
||||
namespace br = boost::range;
|
||||
Ab_ = VerticalBlockMatrix(br::join(terms | transformed(&_getColsJF), cref_list_of<1,DenseIndex>(1)), b.size());
|
||||
}
|
||||
|
||||
// Check and add terms
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#include <gtsam/linear/linearExceptions.h>
|
||||
#include <gtsam/linear/GaussianConditional.h>
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
//#include <gtsam/linear/HessianFactor.h>
|
||||
#include <gtsam/linear/HessianFactor.h>
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
#include <gtsam/inference/VariableSlots.h>
|
||||
|
@ -60,15 +60,17 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor() :
|
||||
Ab_(cref_list_of<1>(1), 0)
|
||||
{}
|
||||
{
|
||||
getb().setZero();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const GaussianFactor& gf) {
|
||||
// Copy the matrix data depending on what type of factor we're copying from
|
||||
if(const JacobianFactor* rhs = dynamic_cast<const JacobianFactor*>(&gf))
|
||||
*this = JacobianFactor(*rhs);
|
||||
//else if(const HessianFactor* rhs = dynamic_cast<const HessianFactor*>(&gf))
|
||||
// *this = JacobianFactor(*rhs);
|
||||
else if(const HessianFactor* rhs = dynamic_cast<const HessianFactor*>(&gf))
|
||||
*this = JacobianFactor(*rhs);
|
||||
else
|
||||
throw std::invalid_argument("In JacobianFactor(const GaussianFactor& rhs), rhs is neither a JacobianFactor nor a HessianFactor");
|
||||
}
|
||||
|
@ -441,24 +443,33 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
pair<Matrix,Vector> JacobianFactor::jacobian(bool weight) const {
|
||||
pair<Matrix,Vector> JacobianFactor::jacobian() const {
|
||||
pair<Matrix,Vector> result = jacobianUnweighted();
|
||||
// divide in sigma so error is indeed 0.5*|Ax-b|
|
||||
if (model_)
|
||||
model_->WhitenSystem(result.first, result.second);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
pair<Matrix,Vector> JacobianFactor::jacobianUnweighted() const {
|
||||
Matrix A(Ab_.range(0, size()));
|
||||
Vector b(getb());
|
||||
// divide in sigma so error is indeed 0.5*|Ax-b|
|
||||
if (weight && model_)
|
||||
model_->WhitenSystem(A,b);
|
||||
return make_pair(A, b);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix JacobianFactor::augmentedJacobian(bool weight) const {
|
||||
if (weight && model_) {
|
||||
Matrix Ab(Ab_.range(0,Ab_.nBlocks()));
|
||||
Matrix JacobianFactor::augmentedJacobian() const {
|
||||
Matrix Ab = augmentedJacobianUnweighted();
|
||||
if (model_) {
|
||||
model_->WhitenInPlace(Ab);
|
||||
return Ab;
|
||||
} else {
|
||||
return Ab_.range(0, Ab_.nBlocks());
|
||||
}
|
||||
return Ab;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix JacobianFactor::augmentedJacobianUnweighted() const {
|
||||
return Ab_.range(0, Ab_.nBlocks());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -472,10 +483,10 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
//GaussianFactor::shared_ptr JacobianFactor::negate() const {
|
||||
// HessianFactor hessian(*this);
|
||||
// return hessian.negate();
|
||||
//}
|
||||
GaussianFactor::shared_ptr JacobianFactor::negate() const {
|
||||
HessianFactor hessian(*this);
|
||||
return hessian.negate();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::pair<boost::shared_ptr<GaussianConditional>, boost::shared_ptr<JacobianFactor> >
|
||||
|
|
|
@ -88,8 +88,8 @@ namespace gtsam {
|
|||
public:
|
||||
typedef VerticalBlockMatrix::Block ABlock;
|
||||
typedef VerticalBlockMatrix::constBlock constABlock;
|
||||
typedef VerticalBlockMatrix::Column BVector;
|
||||
typedef VerticalBlockMatrix::constColumn constBVector;
|
||||
typedef ABlock::ColXpr BVector;
|
||||
typedef constABlock::ConstColXpr constBVector;
|
||||
|
||||
/** Convert from other GaussianFactor */
|
||||
explicit JacobianFactor(const GaussianFactor& gf);
|
||||
|
@ -178,26 +178,37 @@ namespace gtsam {
|
|||
* @param ordering of variables needed for matrix column order
|
||||
* @param set weight to true to bake in the weights
|
||||
*/
|
||||
virtual std::pair<Matrix, Vector> jacobian(bool weight = true) const;
|
||||
virtual std::pair<Matrix, Vector> jacobian() const;
|
||||
|
||||
/**
|
||||
* Return (dense) matrix associated with factor
|
||||
* @param ordering of variables needed for matrix column order
|
||||
* @param set weight to true to bake in the weights
|
||||
*/
|
||||
std::pair<Matrix, Vector> jacobianUnweighted() const;
|
||||
|
||||
/**
|
||||
* Return (dense) matrix associated with factor
|
||||
* The returned system is an augmented matrix: [A b]
|
||||
* @param set weight to use whitening to bake in weights
|
||||
*/
|
||||
virtual Matrix augmentedJacobian(bool weight = true) const;
|
||||
virtual Matrix augmentedJacobian() const;
|
||||
|
||||
/**
|
||||
* Return (dense) matrix associated with factor
|
||||
* The returned system is an augmented matrix: [A b]
|
||||
* @param set weight to use whitening to bake in weights
|
||||
*/
|
||||
Matrix augmentedJacobianUnweighted() const;
|
||||
|
||||
/**
|
||||
* Construct the corresponding anti-factor to negate information
|
||||
* stored stored in this factor.
|
||||
* @return a HessianFactor with negated Hessian matrices
|
||||
*/
|
||||
//virtual GaussianFactor::shared_ptr negate() const;
|
||||
virtual GaussianFactor::shared_ptr negate() const;
|
||||
|
||||
/** Check if the factor contains no information, i.e. zero rows. This does
|
||||
* not necessarily mean that the factor involves no variables (to check for
|
||||
* involving no variables use keys().empty()).
|
||||
*/
|
||||
/** Check if the factor is empty. TODO: How should this be defined? */
|
||||
virtual bool empty() const { return size() == 0 /*|| rows() == 0*/; }
|
||||
|
||||
/** is noise model constrained ? */
|
||||
|
@ -284,7 +295,7 @@ namespace gtsam {
|
|||
* factor to be the remaining component. Performs same operation as eliminate(),
|
||||
* but without running QR.
|
||||
*/
|
||||
boost::shared_ptr<GaussianConditional> splitConditional(size_t nrFrontals = 1);
|
||||
boost::shared_ptr<GaussianConditional> splitConditional(size_t nrFrontals);
|
||||
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
|
|
|
@ -250,8 +250,8 @@ namespace gtsam {
|
|||
VectorValues operator*(const double a, const VectorValues &v)
|
||||
{
|
||||
VectorValues result;
|
||||
BOOST_FOREACH(const VectorValues::KeyValuePair& v, v)
|
||||
result.values_.insert(result.values_.end(), make_pair(v.first, a * v.second));
|
||||
BOOST_FOREACH(const VectorValues::KeyValuePair& key_v, v)
|
||||
result.values_.insert(result.values_.end(), make_pair(key_v.first, a * key_v.second));
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -197,11 +197,11 @@ namespace gtsam {
|
|||
|
||||
iterator begin() { return values_.begin(); } ///< Iterator over variables
|
||||
const_iterator begin() const { return values_.begin(); } ///< Iterator over variables
|
||||
iterator end() { return values_.end(); } ///< Iterator over variables
|
||||
iterator end() { return values_.end(); } ///< Iterator over variables
|
||||
const_iterator end() const { return values_.end(); } ///< Iterator over variables
|
||||
reverse_iterator rbegin() { return values_.rbegin(); } ///< Reverse iterator over variables
|
||||
reverse_iterator rbegin() { return values_.rbegin(); } ///< Reverse iterator over variables
|
||||
const_reverse_iterator rbegin() const { return values_.rbegin(); } ///< Reverse iterator over variables
|
||||
reverse_iterator rend() { return values_.rend(); } ///< Reverse iterator over variables
|
||||
reverse_iterator rend() { return values_.rend(); } ///< Reverse iterator over variables
|
||||
const_reverse_iterator rend() const { return values_.rend(); } ///< Reverse iterator over variables
|
||||
|
||||
/** Return the iterator corresponding to the requested key, or end() if no variable is present with this key. */
|
||||
|
|
|
@ -0,0 +1,638 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testCholeskyFactor.cpp
|
||||
* @author Richard Roberts
|
||||
* @date Dec 15, 2010
|
||||
*/
|
||||
|
||||
#include <vector>
|
||||
#include <utility>
|
||||
|
||||
#include <boost/assign/std/vector.hpp>
|
||||
#include <boost/assign/std/map.hpp>
|
||||
|
||||
#include <gtsam/base/debug.h>
|
||||
#include <gtsam/linear/HessianFactor.h>
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
|
||||
#include <gtsam/base/TestableAssertions.h>
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace boost::assign;
|
||||
using namespace gtsam;
|
||||
|
||||
const double tol = 1e-5;
|
||||
|
||||
#if 0
|
||||
/* ************************************************************************* */
|
||||
TEST(HessianFactor, emptyConstructor) {
|
||||
HessianFactor f;
|
||||
DOUBLES_EQUAL(0.0, f.constantTerm(), 1e-9); // Constant term should be zero
|
||||
EXPECT(assert_equal(Vector(), f.linearTerm())); // Linear term should be empty
|
||||
EXPECT(assert_equal(zeros(1,1), f.info())); // Full matrix should be 1-by-1 zero matrix
|
||||
DOUBLES_EQUAL(0.0, f.error(VectorValuesOrdered()), 1e-9); // Should have zero error
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(HessianFactor, ConversionConstructor) {
|
||||
|
||||
HessianFactorOrdered expected;
|
||||
expected.keys_.push_back(0);
|
||||
expected.keys_.push_back(1);
|
||||
size_t dims[] = { 2, 4, 1 };
|
||||
expected.info_.resize(dims, dims+3, false);
|
||||
expected.matrix_ = Matrix_(7,7,
|
||||
125.0000, 0.0, -25.0000, 0.0, -100.0000, 0.0, 25.0000,
|
||||
0.0, 125.0000, 0.0, -25.0000, 0.0, -100.0000, -17.5000,
|
||||
-25.0000, 0.0, 25.0000, 0.0, 0.0, 0.0, -5.0000,
|
||||
0.0, -25.0000, 0.0, 25.0000, 0.0, 0.0, 7.5000,
|
||||
-100.0000, 0.0, 0.0, 0.0, 100.0000, 0.0, -20.0000,
|
||||
0.0, -100.0000, 0.0, 0.0, 0.0, 100.0000, 10.0000,
|
||||
25.0000, -17.5000, -5.0000, 7.5000, -20.0000, 10.0000, 8.2500);
|
||||
|
||||
// sigmas
|
||||
double sigma1 = 0.2;
|
||||
double sigma2 = 0.1;
|
||||
Vector sigmas = Vector_(4, sigma1, sigma1, sigma2, sigma2);
|
||||
|
||||
// the combined linear factor
|
||||
Matrix Ax2 = Matrix_(4,2,
|
||||
// x2
|
||||
-1., 0.,
|
||||
+0.,-1.,
|
||||
1., 0.,
|
||||
+0.,1.
|
||||
);
|
||||
|
||||
Matrix Al1x1 = Matrix_(4,4,
|
||||
// l1 x1
|
||||
1., 0., 0.00, 0., // f4
|
||||
0., 1., 0.00, 0., // f4
|
||||
0., 0., -1., 0., // f2
|
||||
0., 0., 0.00,-1. // f2
|
||||
);
|
||||
|
||||
// the RHS
|
||||
Vector b2(4);
|
||||
b2(0) = -0.2;
|
||||
b2(1) = 0.3;
|
||||
b2(2) = 0.2;
|
||||
b2(3) = -0.1;
|
||||
|
||||
vector<pair<Index, Matrix> > meas;
|
||||
meas.push_back(make_pair(0, Ax2));
|
||||
meas.push_back(make_pair(1, Al1x1));
|
||||
JacobianFactorOrdered combined(meas, b2, noiseModel::Diagonal::Sigmas(sigmas));
|
||||
|
||||
HessianFactorOrdered actual(combined);
|
||||
|
||||
VectorValuesOrdered values(std::vector<size_t>(dims, dims+2));
|
||||
values[0] = Vector_(2, 1.0, 2.0);
|
||||
values[1] = Vector_(4, 3.0, 4.0, 5.0, 6.0);
|
||||
|
||||
EXPECT_LONGS_EQUAL(2, actual.size());
|
||||
|
||||
EXPECT(assert_equal(expected, actual, 1e-9));
|
||||
|
||||
// error terms
|
||||
EXPECT_DOUBLES_EQUAL(combined.error(values), actual.error(values), 1e-9);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(HessianFactorOrdered, Constructor1)
|
||||
{
|
||||
Matrix G = Matrix_(2,2, 3.0, 5.0, 0.0, 6.0);
|
||||
Vector g = Vector_(2, -8.0, -9.0);
|
||||
double f = 10.0;
|
||||
|
||||
Vector dxv = Vector_(2, 1.5, 2.5);
|
||||
|
||||
vector<size_t> dims;
|
||||
dims.push_back(2);
|
||||
VectorValuesOrdered dx(dims);
|
||||
|
||||
dx[0] = dxv;
|
||||
|
||||
HessianFactorOrdered factor(0, G, g, f);
|
||||
|
||||
// extract underlying parts
|
||||
Matrix info_matrix = factor.info_.range(0, 1, 0, 1);
|
||||
EXPECT(assert_equal(Matrix(G), info_matrix));
|
||||
EXPECT_DOUBLES_EQUAL(f, factor.constantTerm(), 1e-10);
|
||||
EXPECT(assert_equal(g, Vector(factor.linearTerm()), 1e-10));
|
||||
EXPECT_LONGS_EQUAL(1, factor.size());
|
||||
|
||||
// error 0.5*(f - 2*x'*g + x'*G*x)
|
||||
double expected = 80.375;
|
||||
double actual = factor.error(dx);
|
||||
double expected_manual = 0.5 * (f - 2.0 * dxv.dot(g) + dxv.transpose() * G.selfadjointView<Eigen::Upper>() * dxv);
|
||||
EXPECT_DOUBLES_EQUAL(expected, expected_manual, 1e-10);
|
||||
DOUBLES_EQUAL(expected, actual, 1e-10);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(HessianFactorOrdered, Constructor1b)
|
||||
{
|
||||
Vector mu = Vector_(2,1.0,2.0);
|
||||
Matrix Sigma = eye(2,2);
|
||||
|
||||
HessianFactorOrdered factor(0, mu, Sigma);
|
||||
|
||||
Matrix G = eye(2,2);
|
||||
Vector g = G*mu;
|
||||
double f = dot(g,mu);
|
||||
|
||||
// Check
|
||||
Matrix info_matrix = factor.info_.range(0, 1, 0, 1);
|
||||
EXPECT(assert_equal(Matrix(G), info_matrix));
|
||||
EXPECT_DOUBLES_EQUAL(f, factor.constantTerm(), 1e-10);
|
||||
EXPECT(assert_equal(g, Vector(factor.linearTerm()), 1e-10));
|
||||
EXPECT_LONGS_EQUAL(1, factor.size());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(HessianFactorOrdered, Constructor2)
|
||||
{
|
||||
Matrix G11 = Matrix_(1,1, 1.0);
|
||||
Matrix G12 = Matrix_(1,2, 2.0, 4.0);
|
||||
Matrix G22 = Matrix_(2,2, 3.0, 5.0, 0.0, 6.0);
|
||||
Vector g1 = Vector_(1, -7.0);
|
||||
Vector g2 = Vector_(2, -8.0, -9.0);
|
||||
double f = 10.0;
|
||||
|
||||
Vector dx0 = Vector_(1, 0.5);
|
||||
Vector dx1 = Vector_(2, 1.5, 2.5);
|
||||
|
||||
vector<size_t> dims;
|
||||
dims.push_back(1);
|
||||
dims.push_back(2);
|
||||
VectorValuesOrdered dx(dims);
|
||||
|
||||
dx[0] = dx0;
|
||||
dx[1] = dx1;
|
||||
|
||||
HessianFactorOrdered factor(0, 1, G11, G12, g1, G22, g2, f);
|
||||
|
||||
double expected = 90.5;
|
||||
double actual = factor.error(dx);
|
||||
|
||||
DOUBLES_EQUAL(expected, actual, 1e-10);
|
||||
LONGS_EQUAL(4, factor.rows());
|
||||
DOUBLES_EQUAL(10.0, factor.constantTerm(), 1e-10);
|
||||
|
||||
Vector linearExpected(3); linearExpected << g1, g2;
|
||||
EXPECT(assert_equal(linearExpected, factor.linearTerm()));
|
||||
|
||||
EXPECT(assert_equal(G11, factor.info(factor.begin(), factor.begin())));
|
||||
EXPECT(assert_equal(G12, factor.info(factor.begin(), factor.begin()+1)));
|
||||
EXPECT(assert_equal(G22, factor.info(factor.begin()+1, factor.begin()+1)));
|
||||
|
||||
// Check case when vector values is larger than factor
|
||||
dims.push_back(2);
|
||||
VectorValuesOrdered dxLarge(dims);
|
||||
dxLarge[0] = dx0;
|
||||
dxLarge[1] = dx1;
|
||||
dxLarge[2] = Vector_(2, 0.1, 0.2);
|
||||
EXPECT_DOUBLES_EQUAL(expected, factor.error(dxLarge), 1e-10);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(HessianFactorOrdered, Constructor3)
|
||||
{
|
||||
Matrix G11 = Matrix_(1,1, 1.0);
|
||||
Matrix G12 = Matrix_(1,2, 2.0, 4.0);
|
||||
Matrix G13 = Matrix_(1,3, 3.0, 6.0, 9.0);
|
||||
|
||||
Matrix G22 = Matrix_(2,2, 3.0, 5.0, 0.0, 6.0);
|
||||
Matrix G23 = Matrix_(2,3, 4.0, 6.0, 8.0, 1.0, 2.0, 4.0);
|
||||
|
||||
Matrix G33 = Matrix_(3,3, 1.0, 2.0, 3.0, 0.0, 5.0, 6.0, 0.0, 0.0, 9.0);
|
||||
|
||||
Vector g1 = Vector_(1, -7.0);
|
||||
Vector g2 = Vector_(2, -8.0, -9.0);
|
||||
Vector g3 = Vector_(3, 1.0, 2.0, 3.0);
|
||||
|
||||
double f = 10.0;
|
||||
|
||||
Vector dx0 = Vector_(1, 0.5);
|
||||
Vector dx1 = Vector_(2, 1.5, 2.5);
|
||||
Vector dx2 = Vector_(3, 1.5, 2.5, 3.5);
|
||||
|
||||
vector<size_t> dims;
|
||||
dims.push_back(1);
|
||||
dims.push_back(2);
|
||||
dims.push_back(3);
|
||||
VectorValuesOrdered dx(dims);
|
||||
|
||||
dx[0] = dx0;
|
||||
dx[1] = dx1;
|
||||
dx[2] = dx2;
|
||||
|
||||
HessianFactorOrdered factor(0, 1, 2, G11, G12, G13, g1, G22, G23, g2, G33, g3, f);
|
||||
|
||||
double expected = 371.3750;
|
||||
double actual = factor.error(dx);
|
||||
|
||||
DOUBLES_EQUAL(expected, actual, 1e-10);
|
||||
LONGS_EQUAL(7, factor.rows());
|
||||
DOUBLES_EQUAL(10.0, factor.constantTerm(), 1e-10);
|
||||
|
||||
Vector linearExpected(6); linearExpected << g1, g2, g3;
|
||||
EXPECT(assert_equal(linearExpected, factor.linearTerm()));
|
||||
|
||||
EXPECT(assert_equal(G11, factor.info(factor.begin()+0, factor.begin()+0)));
|
||||
EXPECT(assert_equal(G12, factor.info(factor.begin()+0, factor.begin()+1)));
|
||||
EXPECT(assert_equal(G13, factor.info(factor.begin()+0, factor.begin()+2)));
|
||||
EXPECT(assert_equal(G22, factor.info(factor.begin()+1, factor.begin()+1)));
|
||||
EXPECT(assert_equal(G23, factor.info(factor.begin()+1, factor.begin()+2)));
|
||||
EXPECT(assert_equal(G33, factor.info(factor.begin()+2, factor.begin()+2)));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(HessianFactorOrdered, ConstructorNWay)
|
||||
{
|
||||
Matrix G11 = Matrix_(1,1, 1.0);
|
||||
Matrix G12 = Matrix_(1,2, 2.0, 4.0);
|
||||
Matrix G13 = Matrix_(1,3, 3.0, 6.0, 9.0);
|
||||
|
||||
Matrix G22 = Matrix_(2,2, 3.0, 5.0, 0.0, 6.0);
|
||||
Matrix G23 = Matrix_(2,3, 4.0, 6.0, 8.0, 1.0, 2.0, 4.0);
|
||||
|
||||
Matrix G33 = Matrix_(3,3, 1.0, 2.0, 3.0, 0.0, 5.0, 6.0, 0.0, 0.0, 9.0);
|
||||
|
||||
Vector g1 = Vector_(1, -7.0);
|
||||
Vector g2 = Vector_(2, -8.0, -9.0);
|
||||
Vector g3 = Vector_(3, 1.0, 2.0, 3.0);
|
||||
|
||||
double f = 10.0;
|
||||
|
||||
Vector dx0 = Vector_(1, 0.5);
|
||||
Vector dx1 = Vector_(2, 1.5, 2.5);
|
||||
Vector dx2 = Vector_(3, 1.5, 2.5, 3.5);
|
||||
|
||||
vector<size_t> dims;
|
||||
dims.push_back(1);
|
||||
dims.push_back(2);
|
||||
dims.push_back(3);
|
||||
VectorValuesOrdered dx(dims);
|
||||
|
||||
dx[0] = dx0;
|
||||
dx[1] = dx1;
|
||||
dx[2] = dx2;
|
||||
|
||||
|
||||
std::vector<Index> js;
|
||||
js.push_back(0); js.push_back(1); js.push_back(2);
|
||||
std::vector<Matrix> Gs;
|
||||
Gs.push_back(G11); Gs.push_back(G12); Gs.push_back(G13); Gs.push_back(G22); Gs.push_back(G23); Gs.push_back(G33);
|
||||
std::vector<Vector> gs;
|
||||
gs.push_back(g1); gs.push_back(g2); gs.push_back(g3);
|
||||
HessianFactorOrdered factor(js, Gs, gs, f);
|
||||
|
||||
double expected = 371.3750;
|
||||
double actual = factor.error(dx);
|
||||
|
||||
DOUBLES_EQUAL(expected, actual, 1e-10);
|
||||
LONGS_EQUAL(7, factor.rows());
|
||||
DOUBLES_EQUAL(10.0, factor.constantTerm(), 1e-10);
|
||||
|
||||
Vector linearExpected(6); linearExpected << g1, g2, g3;
|
||||
EXPECT(assert_equal(linearExpected, factor.linearTerm()));
|
||||
|
||||
EXPECT(assert_equal(G11, factor.info(factor.begin()+0, factor.begin()+0)));
|
||||
EXPECT(assert_equal(G12, factor.info(factor.begin()+0, factor.begin()+1)));
|
||||
EXPECT(assert_equal(G13, factor.info(factor.begin()+0, factor.begin()+2)));
|
||||
EXPECT(assert_equal(G22, factor.info(factor.begin()+1, factor.begin()+1)));
|
||||
EXPECT(assert_equal(G23, factor.info(factor.begin()+1, factor.begin()+2)));
|
||||
EXPECT(assert_equal(G33, factor.info(factor.begin()+2, factor.begin()+2)));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST_UNSAFE(HessianFactorOrdered, CopyConstructor_and_assignment)
|
||||
{
|
||||
Matrix G11 = Matrix_(1,1, 1.0);
|
||||
Matrix G12 = Matrix_(1,2, 2.0, 4.0);
|
||||
Matrix G22 = Matrix_(2,2, 3.0, 5.0, 0.0, 6.0);
|
||||
Vector g1 = Vector_(1, -7.0);
|
||||
Vector g2 = Vector_(2, -8.0, -9.0);
|
||||
double f = 10.0;
|
||||
|
||||
Vector dx0 = Vector_(1, 0.5);
|
||||
Vector dx1 = Vector_(2, 1.5, 2.5);
|
||||
|
||||
vector<size_t> dims;
|
||||
dims.push_back(1);
|
||||
dims.push_back(2);
|
||||
VectorValuesOrdered dx(dims);
|
||||
|
||||
dx[0] = dx0;
|
||||
dx[1] = dx1;
|
||||
|
||||
HessianFactorOrdered originalFactor(0, 1, G11, G12, g1, G22, g2, f);
|
||||
|
||||
// Make a copy
|
||||
HessianFactorOrdered copy1(originalFactor);
|
||||
|
||||
double expected = 90.5;
|
||||
double actual = copy1.error(dx);
|
||||
|
||||
DOUBLES_EQUAL(expected, actual, 1e-10);
|
||||
LONGS_EQUAL(4, copy1.rows());
|
||||
DOUBLES_EQUAL(10.0, copy1.constantTerm(), 1e-10);
|
||||
|
||||
Vector linearExpected(3); linearExpected << g1, g2;
|
||||
EXPECT(assert_equal(linearExpected, copy1.linearTerm()));
|
||||
|
||||
EXPECT(assert_equal(G11, copy1.info(copy1.begin(), copy1.begin())));
|
||||
EXPECT(assert_equal(G12, copy1.info(copy1.begin(), copy1.begin()+1)));
|
||||
EXPECT(assert_equal(G22, copy1.info(copy1.begin()+1, copy1.begin()+1)));
|
||||
|
||||
// Make a copy using the assignment operator
|
||||
HessianFactorOrdered copy2;
|
||||
copy2 = HessianFactorOrdered(originalFactor); // Make a temporary to make sure copying does not shared references
|
||||
|
||||
actual = copy2.error(dx);
|
||||
DOUBLES_EQUAL(expected, actual, 1e-10);
|
||||
LONGS_EQUAL(4, copy2.rows());
|
||||
DOUBLES_EQUAL(10.0, copy2.constantTerm(), 1e-10);
|
||||
|
||||
EXPECT(assert_equal(linearExpected, copy2.linearTerm()));
|
||||
|
||||
EXPECT(assert_equal(G11, copy2.info(copy2.begin(), copy2.begin())));
|
||||
EXPECT(assert_equal(G12, copy2.info(copy2.begin(), copy2.begin()+1)));
|
||||
EXPECT(assert_equal(G22, copy2.info(copy2.begin()+1, copy2.begin()+1)));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST_UNSAFE(HessianFactorOrdered, CombineAndEliminate)
|
||||
{
|
||||
Matrix A01 = Matrix_(3,3,
|
||||
1.0, 0.0, 0.0,
|
||||
0.0, 1.0, 0.0,
|
||||
0.0, 0.0, 1.0);
|
||||
Vector b0 = Vector_(3, 1.5, 1.5, 1.5);
|
||||
Vector s0 = Vector_(3, 1.6, 1.6, 1.6);
|
||||
|
||||
Matrix A10 = Matrix_(3,3,
|
||||
2.0, 0.0, 0.0,
|
||||
0.0, 2.0, 0.0,
|
||||
0.0, 0.0, 2.0);
|
||||
Matrix A11 = Matrix_(3,3,
|
||||
-2.0, 0.0, 0.0,
|
||||
0.0, -2.0, 0.0,
|
||||
0.0, 0.0, -2.0);
|
||||
Vector b1 = Vector_(3, 2.5, 2.5, 2.5);
|
||||
Vector s1 = Vector_(3, 2.6, 2.6, 2.6);
|
||||
|
||||
Matrix A21 = Matrix_(3,3,
|
||||
3.0, 0.0, 0.0,
|
||||
0.0, 3.0, 0.0,
|
||||
0.0, 0.0, 3.0);
|
||||
Vector b2 = Vector_(3, 3.5, 3.5, 3.5);
|
||||
Vector s2 = Vector_(3, 3.6, 3.6, 3.6);
|
||||
|
||||
GaussianFactorGraphOrdered gfg;
|
||||
gfg.add(1, A01, b0, noiseModel::Diagonal::Sigmas(s0, true));
|
||||
gfg.add(0, A10, 1, A11, b1, noiseModel::Diagonal::Sigmas(s1, true));
|
||||
gfg.add(1, A21, b2, noiseModel::Diagonal::Sigmas(s2, true));
|
||||
|
||||
Matrix zero3x3 = zeros(3,3);
|
||||
Matrix A0 = gtsam::stack(3, &A10, &zero3x3, &zero3x3);
|
||||
Matrix A1 = gtsam::stack(3, &A11, &A01, &A21);
|
||||
Vector b = gtsam::concatVectors(3, &b1, &b0, &b2);
|
||||
Vector sigmas = gtsam::concatVectors(3, &s1, &s0, &s2);
|
||||
|
||||
// create a full, uneliminated version of the factor
|
||||
JacobianFactorOrdered expectedFactor(0, A0, 1, A1, b, noiseModel::Diagonal::Sigmas(sigmas, true));
|
||||
|
||||
// perform elimination
|
||||
GaussianConditionalOrdered::shared_ptr expectedBN = expectedFactor.eliminate(1);
|
||||
|
||||
// create expected Hessian after elimination
|
||||
HessianFactorOrdered expectedCholeskyFactor(expectedFactor);
|
||||
|
||||
// Convert all factors to hessians
|
||||
FactorGraphOrdered<HessianFactorOrdered> hessians;
|
||||
BOOST_FOREACH(const GaussianFactorGraphOrdered::sharedFactor& factor, gfg) {
|
||||
if(boost::shared_ptr<HessianFactorOrdered> hf = boost::dynamic_pointer_cast<HessianFactorOrdered>(factor))
|
||||
hessians.push_back(hf);
|
||||
else if(boost::shared_ptr<JacobianFactorOrdered> jf = boost::dynamic_pointer_cast<JacobianFactorOrdered>(factor))
|
||||
hessians.push_back(boost::make_shared<HessianFactorOrdered>(*jf));
|
||||
else
|
||||
CHECK(false);
|
||||
}
|
||||
|
||||
// Eliminate
|
||||
GaussianFactorGraphOrdered::EliminationResult actualCholesky = EliminateCholeskyOrdered(gfg, 1);
|
||||
HessianFactorOrdered::shared_ptr actualFactor = boost::dynamic_pointer_cast<HessianFactorOrdered>(actualCholesky.second);
|
||||
|
||||
EXPECT(assert_equal(*expectedBN, *actualCholesky.first, 1e-6));
|
||||
EXPECT(assert_equal(expectedCholeskyFactor, *actualFactor, 1e-6));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(HessianFactorOrdered, eliminate2 )
|
||||
{
|
||||
// sigmas
|
||||
double sigma1 = 0.2;
|
||||
double sigma2 = 0.1;
|
||||
Vector sigmas = Vector_(4, sigma1, sigma1, sigma2, sigma2);
|
||||
|
||||
// the combined linear factor
|
||||
Matrix Ax2 = Matrix_(4,2,
|
||||
// x2
|
||||
-1., 0.,
|
||||
+0.,-1.,
|
||||
1., 0.,
|
||||
+0.,1.
|
||||
);
|
||||
|
||||
Matrix Al1x1 = Matrix_(4,4,
|
||||
// l1 x1
|
||||
1., 0., 0.00, 0., // f4
|
||||
0., 1., 0.00, 0., // f4
|
||||
0., 0., -1., 0., // f2
|
||||
0., 0., 0.00,-1. // f2
|
||||
);
|
||||
|
||||
// the RHS
|
||||
Vector b2(4);
|
||||
b2(0) = -0.2;
|
||||
b2(1) = 0.3;
|
||||
b2(2) = 0.2;
|
||||
b2(3) = -0.1;
|
||||
|
||||
vector<pair<Index, Matrix> > meas;
|
||||
meas.push_back(make_pair(0, Ax2));
|
||||
meas.push_back(make_pair(1, Al1x1));
|
||||
JacobianFactorOrdered combined(meas, b2, noiseModel::Diagonal::Sigmas(sigmas));
|
||||
|
||||
// eliminate the combined factor
|
||||
HessianFactorOrdered::shared_ptr combinedLF_Chol(new HessianFactorOrdered(combined));
|
||||
FactorGraphOrdered<HessianFactorOrdered> combinedLFG_Chol;
|
||||
combinedLFG_Chol.push_back(combinedLF_Chol);
|
||||
|
||||
GaussianFactorGraphOrdered::EliminationResult actual_Chol = EliminateCholeskyOrdered(
|
||||
combinedLFG_Chol, 1);
|
||||
HessianFactorOrdered::shared_ptr actualFactor = boost::dynamic_pointer_cast<
|
||||
HessianFactorOrdered>(actual_Chol.second);
|
||||
|
||||
// create expected Conditional Gaussian
|
||||
double oldSigma = 0.0894427; // from when R was made unit
|
||||
Matrix R11 = Matrix_(2,2,
|
||||
1.00, 0.00,
|
||||
0.00, 1.00
|
||||
)/oldSigma;
|
||||
Matrix S12 = Matrix_(2,4,
|
||||
-0.20, 0.00,-0.80, 0.00,
|
||||
+0.00,-0.20,+0.00,-0.80
|
||||
)/oldSigma;
|
||||
Vector d = Vector_(2,0.2,-0.14)/oldSigma;
|
||||
GaussianConditionalOrdered expectedCG(0,d,R11,1,S12,ones(2));
|
||||
EXPECT(assert_equal(expectedCG,*actual_Chol.first,1e-4));
|
||||
|
||||
// the expected linear factor
|
||||
double sigma = 0.2236;
|
||||
Matrix Bl1x1 = Matrix_(2,4,
|
||||
// l1 x1
|
||||
1.00, 0.00, -1.00, 0.00,
|
||||
0.00, 1.00, +0.00, -1.00
|
||||
)/sigma;
|
||||
Vector b1 = Vector_(2,0.0,0.894427);
|
||||
JacobianFactorOrdered expectedLF(1, Bl1x1, b1, noiseModel::Isotropic::Sigma(2,1.0));
|
||||
EXPECT(assert_equal(HessianFactorOrdered(expectedLF), *actualFactor, 1.5e-3));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(HessianFactorOrdered, eliminateUnsorted) {
|
||||
|
||||
JacobianFactorOrdered::shared_ptr factor1(
|
||||
new JacobianFactorOrdered(0,
|
||||
Matrix_(3,3,
|
||||
44.7214, 0.0, 0.0,
|
||||
0.0, 44.7214, 0.0,
|
||||
0.0, 0.0, 44.7214),
|
||||
1,
|
||||
Matrix_(3,3,
|
||||
-0.179168, -44.721, 0.717294,
|
||||
44.721, -0.179168, -44.9138,
|
||||
0.0, 0.0, -44.7214),
|
||||
Vector_(3, 1.98916e-17, -4.96503e-15, -7.75792e-17),
|
||||
noiseModel::Unit::Create(3)));
|
||||
HessianFactorOrdered::shared_ptr unsorted_factor2(
|
||||
new HessianFactorOrdered(JacobianFactorOrdered(0,
|
||||
Matrix_(6,3,
|
||||
25.8367, 0.1211, 0.0593,
|
||||
0.0, 23.4099, 30.8733,
|
||||
0.0, 0.0, 25.8729,
|
||||
0.0, 0.0, 0.0,
|
||||
0.0, 0.0, 0.0,
|
||||
0.0, 0.0, 0.0),
|
||||
1,
|
||||
Matrix_(6,3,
|
||||
25.7429, -1.6897, 0.4587,
|
||||
1.6400, 23.3095, -8.4816,
|
||||
0.0034, 0.0509, -25.7855,
|
||||
0.9997, -0.0002, 0.0824,
|
||||
0.0, 0.9973, 0.9517,
|
||||
0.0, 0.0, 0.9973),
|
||||
Vector_(6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
|
||||
noiseModel::Unit::Create(6))));
|
||||
Permutation permutation(2);
|
||||
permutation[0] = 1;
|
||||
permutation[1] = 0;
|
||||
unsorted_factor2->permuteWithInverse(permutation);
|
||||
|
||||
HessianFactorOrdered::shared_ptr sorted_factor2(
|
||||
new HessianFactorOrdered(JacobianFactorOrdered(0,
|
||||
Matrix_(6,3,
|
||||
25.7429, -1.6897, 0.4587,
|
||||
1.6400, 23.3095, -8.4816,
|
||||
0.0034, 0.0509, -25.7855,
|
||||
0.9997, -0.0002, 0.0824,
|
||||
0.0, 0.9973, 0.9517,
|
||||
0.0, 0.0, 0.9973),
|
||||
1,
|
||||
Matrix_(6,3,
|
||||
25.8367, 0.1211, 0.0593,
|
||||
0.0, 23.4099, 30.8733,
|
||||
0.0, 0.0, 25.8729,
|
||||
0.0, 0.0, 0.0,
|
||||
0.0, 0.0, 0.0,
|
||||
0.0, 0.0, 0.0),
|
||||
Vector_(6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
|
||||
noiseModel::Unit::Create(6))));
|
||||
|
||||
GaussianFactorGraphOrdered sortedGraph;
|
||||
// sortedGraph.push_back(factor1);
|
||||
sortedGraph.push_back(sorted_factor2);
|
||||
|
||||
GaussianFactorGraphOrdered unsortedGraph;
|
||||
// unsortedGraph.push_back(factor1);
|
||||
unsortedGraph.push_back(unsorted_factor2);
|
||||
|
||||
GaussianConditionalOrdered::shared_ptr expected_bn;
|
||||
GaussianFactorOrdered::shared_ptr expected_factor;
|
||||
boost::tie(expected_bn, expected_factor) =
|
||||
EliminatePreferCholeskyOrdered(sortedGraph, 1);
|
||||
|
||||
GaussianConditionalOrdered::shared_ptr actual_bn;
|
||||
GaussianFactorOrdered::shared_ptr actual_factor;
|
||||
boost::tie(actual_bn, actual_factor) =
|
||||
EliminatePreferCholeskyOrdered(unsortedGraph, 1);
|
||||
|
||||
EXPECT(assert_equal(*expected_bn, *actual_bn, 1e-10));
|
||||
EXPECT(assert_equal(*expected_factor, *actual_factor, 1e-10));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(HessianFactorOrdered, combine) {
|
||||
|
||||
// update the information matrix with a single jacobian factor
|
||||
Matrix A0 = Matrix_(2, 2,
|
||||
11.1803399, 0.0,
|
||||
0.0, 11.1803399);
|
||||
Matrix A1 = Matrix_(2, 2,
|
||||
-2.23606798, 0.0,
|
||||
0.0, -2.23606798);
|
||||
Matrix A2 = Matrix_(2, 2,
|
||||
-8.94427191, 0.0,
|
||||
0.0, -8.94427191);
|
||||
Vector b = Vector_(2, 2.23606798,-1.56524758);
|
||||
SharedDiagonal model = noiseModel::Diagonal::Sigmas(ones(2));
|
||||
GaussianFactorOrdered::shared_ptr f(new JacobianFactorOrdered(0, A0, 1, A1, 2, A2, b, model));
|
||||
FactorGraphOrdered<GaussianFactorOrdered> factors;
|
||||
factors.push_back(f);
|
||||
|
||||
// Form Ab' * Ab
|
||||
HessianFactorOrdered actual(factors);
|
||||
|
||||
Matrix expected = Matrix_(7, 7,
|
||||
125.0000, 0.0, -25.0000, 0.0, -100.0000, 0.0, 25.0000,
|
||||
0.0, 125.0000, 0.0, -25.0000, 0.0, -100.0000, -17.5000,
|
||||
-25.0000, 0.0, 5.0000, 0.0, 20.0000, 0.0, -5.0000,
|
||||
0.0, -25.0000, 0.0, 5.0000, 0.0, 20.0000, 3.5000,
|
||||
-100.0000, 0.0, 20.0000, 0.0, 80.0000, 0.0, -20.0000,
|
||||
0.0, -100.0000, 0.0, 20.0000, 0.0, 80.0000, 14.0000,
|
||||
25.0000, -17.5000, -5.0000, 3.5000, -20.0000, 14.0000, 7.4500);
|
||||
EXPECT(assert_equal(Matrix(expected.triangularView<Eigen::Upper>()),
|
||||
Matrix(actual.matrix_.triangularView<Eigen::Upper>()), tol));
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
|
||||
/* ************************************************************************* */
|
|
@ -16,6 +16,8 @@
|
|||
* @date Feb 26, 2012
|
||||
*/
|
||||
|
||||
#if 0
|
||||
|
||||
#include <gtsam/nonlinear/DoglegOptimizer.h>
|
||||
#include <gtsam/nonlinear/DoglegOptimizerImpl.h>
|
||||
#include <gtsam/linear/GaussianBayesTree.h>
|
||||
|
@ -95,3 +97,5 @@ DoglegParams DoglegOptimizer::ensureHasOrdering(DoglegParams params, const Nonli
|
|||
}
|
||||
|
||||
} /* namespace gtsam */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#if 0
|
||||
|
||||
#include <gtsam/nonlinear/SuccessiveLinearizationOptimizer.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
@ -162,3 +164,5 @@ protected:
|
|||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -61,7 +61,7 @@ public:
|
|||
*/
|
||||
GaussNewtonOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
|
||||
const GaussNewtonParams& params = GaussNewtonParams()) :
|
||||
NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph, initialValues)), state_(graph, initialValues) {}
|
||||
NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph)), state_(graph, initialValues) {}
|
||||
|
||||
/** Standard constructor, requires a nonlinear factor graph, initial
|
||||
* variable assignments, and optimization parameters. For convenience this
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
* @author Richard Roberts
|
||||
*/
|
||||
|
||||
#if 0
|
||||
|
||||
#include <gtsam/nonlinear/ISAM2-impl.h>
|
||||
#include <gtsam/nonlinear/Symbol.h> // for selective linearization thresholds
|
||||
#include <gtsam/base/debug.h>
|
||||
|
@ -493,3 +495,5 @@ size_t ISAM2::Impl::UpdateDoglegDeltas(const ISAM2& isam, double wildfireThresho
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#if 0
|
||||
|
||||
#include <gtsam/linear/GaussianBayesTreeOrdered.h>
|
||||
#include <gtsam/nonlinear/ISAM2.h>
|
||||
|
||||
|
@ -154,3 +156,5 @@ struct GTSAM_EXPORT ISAM2::Impl {
|
|||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
* @author Michael Kaess, Richard Roberts
|
||||
*/
|
||||
|
||||
#if 0
|
||||
|
||||
#include <boost/foreach.hpp>
|
||||
#include <boost/assign/std/list.hpp> // for operator +=
|
||||
using namespace boost::assign;
|
||||
|
@ -1150,3 +1152,5 @@ void gradientAtZero(const ISAM2& bayesTree, VectorValuesOrdered& g) {
|
|||
|
||||
}
|
||||
/// namespace gtsam
|
||||
|
||||
#endif
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#if 0
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/DoglegOptimizerImpl.h>
|
||||
#include <gtsam/linear/GaussianBayesTreeOrdered.h>
|
||||
|
@ -729,3 +731,5 @@ GTSAM_EXPORT void gradientAtZero(const ISAM2& bayesTree, VectorValuesOrdered& g)
|
|||
|
||||
#include <gtsam/nonlinear/ISAM2-inl.h>
|
||||
#include <gtsam/nonlinear/ISAM2-impl.h>
|
||||
|
||||
#endif
|
|
@ -16,6 +16,8 @@
|
|||
* @date Feb 26, 2012
|
||||
*/
|
||||
|
||||
#if 0
|
||||
|
||||
#include <cmath>
|
||||
|
||||
#include <gtsam/linear/linearExceptions.h>
|
||||
|
@ -179,3 +181,6 @@ LevenbergMarquardtParams LevenbergMarquardtOptimizer::ensureHasOrdering(
|
|||
}
|
||||
|
||||
} /* namespace gtsam */
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#if 0
|
||||
|
||||
#include <gtsam/nonlinear/SuccessiveLinearizationOptimizer.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
@ -112,7 +114,7 @@ public:
|
|||
*/
|
||||
LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues,
|
||||
const LevenbergMarquardtParams& params = LevenbergMarquardtParams()) :
|
||||
NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph, initialValues)),
|
||||
NonlinearOptimizer(graph), params_(ensureHasOrdering(params, graph)),
|
||||
state_(graph, initialValues, params_), dimensions_(initialValues.dims(*params_.ordering)) {}
|
||||
|
||||
/** Standard constructor, requires a nonlinear factor graph, initial
|
||||
|
@ -122,7 +124,7 @@ public:
|
|||
* @param graph The nonlinear factor graph to optimize
|
||||
* @param initialValues The initial variable assignments
|
||||
*/
|
||||
LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const OrderingOrdered& ordering) :
|
||||
LevenbergMarquardtOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, const Ordering& ordering) :
|
||||
NonlinearOptimizer(graph), dimensions_(initialValues.dims(ordering)) {
|
||||
params_.ordering = ordering;
|
||||
state_ = LevenbergMarquardtState(graph, initialValues, params_); }
|
||||
|
@ -176,3 +178,5 @@ protected:
|
|||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -7,10 +7,13 @@
|
|||
|
||||
#include <gtsam/nonlinear/LinearContainerFactor.h>
|
||||
#include <gtsam/linear/HessianFactor.h>
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
|
||||
#include <boost/foreach.hpp>
|
||||
|
||||
#if 0
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -55,18 +58,6 @@ LinearContainerFactor::LinearContainerFactor(
|
|||
initializeLinearizationPoint(linearizationPoint);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
LinearContainerFactor::LinearContainerFactor(
|
||||
const GaussianFactorOrdered::shared_ptr& factor,
|
||||
const Values& linearizationPoint)
|
||||
: factor_(factor->clone())
|
||||
{
|
||||
// Extract keys stashed in linear factor
|
||||
BOOST_FOREACH(const Index& idx, factor_->keys())
|
||||
keys_.push_back(idx);
|
||||
initializeLinearizationPoint(linearizationPoint);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void LinearContainerFactor::print(const std::string& s, const KeyFormatter& keyFormatter) const {
|
||||
Base::print(s+"LinearContainerFactor", keyFormatter);
|
||||
|
@ -137,7 +128,7 @@ GaussianFactor::shared_ptr LinearContainerFactor::linearize(const Values& c) con
|
|||
HessianFactor::shared_ptr hesFactor = boost::dynamic_pointer_cast<HessianFactor>(linFactor);
|
||||
size_t dim = hesFactor->linearTerm().size();
|
||||
Eigen::Block<HessianFactor::Block> Gview = hesFactor->info().block(0, 0, dim, dim);
|
||||
Vector deltaVector = delta.asVector();
|
||||
Vector deltaVector = delta.vector();
|
||||
Vector G_delta = Gview.selfadjointView<Eigen::Upper>() * deltaVector;
|
||||
hesFactor->constantTerm() += deltaVector.dot(G_delta) - 2.0 * deltaVector.dot(hesFactor->linearTerm());
|
||||
hesFactor->linearTerm() -= G_delta;
|
||||
|
@ -192,3 +183,4 @@ NonlinearFactorGraph LinearContainerFactor::convertLinearGraph(
|
|||
/* ************************************************************************* */
|
||||
} // \namespace gtsam
|
||||
|
||||
#endif
|
||||
|
|
|
@ -11,9 +11,12 @@
|
|||
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
|
||||
#if 0
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
// Forward declarations
|
||||
class JacobianFactor;
|
||||
class HessianFactor;
|
||||
|
||||
/**
|
||||
|
@ -45,9 +48,6 @@ public:
|
|||
/** Constructor from shared_ptr */
|
||||
LinearContainerFactor(const GaussianFactor::shared_ptr& factor, const Values& linearizationPoint = Values());
|
||||
|
||||
/** Constructor from re-keyed factor: all indices assumed replaced with Key */
|
||||
LinearContainerFactor(const GaussianFactor::shared_ptr& factor, const Values& linearizationPoint = Values());
|
||||
|
||||
// Access
|
||||
|
||||
const GaussianFactor::shared_ptr& factor() const { return factor_; }
|
||||
|
@ -129,10 +129,10 @@ public:
|
|||
bool isHessian() const;
|
||||
|
||||
/** Casts to JacobianFactor */
|
||||
JacobianFactor::shared_ptr toJacobian() const;
|
||||
boost::shared_ptr<JacobianFactor> toJacobian() const;
|
||||
|
||||
/** Casts to HessianFactor */
|
||||
HessianFactor::shared_ptr toHessian() const;
|
||||
boost::shared_ptr<HessianFactor> toHessian() const;
|
||||
|
||||
/**
|
||||
* Utility function for converting linear graphs to nonlinear graphs
|
||||
|
@ -160,4 +160,4 @@ private:
|
|||
|
||||
} // \namespace gtsam
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
* @date May 14, 2012
|
||||
*/
|
||||
|
||||
#if 0
|
||||
|
||||
#include <gtsam/base/timing.h>
|
||||
#include <gtsam/linear/GaussianSequentialSolver.h>
|
||||
#include <gtsam/linear/GaussianMultifrontalSolver.h>
|
||||
|
@ -175,3 +177,5 @@ void JointMarginal::print(const std::string& s, const KeyFormatter& formatter) c
|
|||
}
|
||||
|
||||
} /* namespace gtsam */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#if 0
|
||||
|
||||
#include <gtsam/base/blockMatrices.h>
|
||||
#include <gtsam/linear/GaussianBayesTreeOrdered.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
|
@ -141,3 +143,5 @@ protected:
|
|||
};
|
||||
|
||||
} /* namespace gtsam */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
* @date Jun 11, 2012
|
||||
*/
|
||||
|
||||
#if 0
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h>
|
||||
#include <gtsam/nonlinear/OrderingOrdered.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
|
@ -62,3 +64,5 @@ const Values& NonlinearConjugateGradientOptimizer::optimize() {
|
|||
}
|
||||
|
||||
} /* namespace gtsam */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#if 0
|
||||
|
||||
#include <gtsam/base/Manifold.h>
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
#include <boost/tuple/tuple.hpp>
|
||||
|
@ -185,9 +187,6 @@ boost::tuple<V, size_t> nonlinearConjugateGradient(const S &system, const V &ini
|
|||
return boost::tie(currentValues, iteration);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif
|
|
@ -305,7 +305,7 @@ public:
|
|||
|
||||
this->noiseModel_->WhitenSystem(A,b);
|
||||
|
||||
std::vector<std::pair<Index, Matrix> > terms(this->size());
|
||||
std::vector<std::pair<Key, Matrix> > terms(this->size());
|
||||
// Fill in terms
|
||||
for(size_t j=0; j<this->size(); ++j) {
|
||||
terms[j].first = this->keys()[j];
|
||||
|
|
|
@ -62,7 +62,7 @@ namespace gtsam {
|
|||
* tangent vector space at the linearization point. Because the tangent space is a true
|
||||
* vector space, the config type will be an VectorValues in that linearized factor graph.
|
||||
*/
|
||||
class NonlinearFactorGraph: public FactorGraph<NonlinearFactor> {
|
||||
class GTSAM_EXPORT NonlinearFactorGraph: public FactorGraph<NonlinearFactor> {
|
||||
|
||||
public:
|
||||
|
||||
|
@ -86,26 +86,26 @@ namespace gtsam {
|
|||
NonlinearFactorGraph(const FactorGraph<DERIVEDFACTOR>& graph) : Base(graph) {}
|
||||
|
||||
/** print just calls base class */
|
||||
GTSAM_EXPORT void print(const std::string& str = "NonlinearFactorGraph: ", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
void print(const std::string& str = "NonlinearFactorGraph: ", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
|
||||
/** Write the graph in GraphViz format for visualization */
|
||||
GTSAM_EXPORT void saveGraph(std::ostream& stm, const Values& values = Values(),
|
||||
void saveGraph(std::ostream& stm, const Values& values = Values(),
|
||||
const GraphvizFormatting& graphvizFormatting = GraphvizFormatting(),
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||
|
||||
/** return keys as an ordered set - ordering is by key value */
|
||||
GTSAM_EXPORT FastSet<Key> keys() const;
|
||||
FastSet<Key> keys() const;
|
||||
|
||||
/** unnormalized error, \f$ 0.5 \sum_i (h_i(X_i)-z)^2/\sigma^2 \f$ in the most common case */
|
||||
GTSAM_EXPORT double error(const Values& c) const;
|
||||
double error(const Values& c) const;
|
||||
|
||||
/** Unnormalized probability. O(n) */
|
||||
GTSAM_EXPORT double probPrime(const Values& c) const;
|
||||
double probPrime(const Values& c) const;
|
||||
|
||||
/**
|
||||
* Create a symbolic factor graph using an existing ordering
|
||||
*/
|
||||
//GTSAM_EXPORT SymbolicFactorGraph::shared_ptr symbolic() const;
|
||||
//SymbolicFactorGraph::shared_ptr symbolic() const;
|
||||
|
||||
/**
|
||||
* Create a symbolic factor graph and initial variable ordering that can
|
||||
|
@ -113,13 +113,13 @@ namespace gtsam {
|
|||
* The graph and ordering should be permuted after such a fill-reducing
|
||||
* ordering is found.
|
||||
*/
|
||||
//GTSAM_EXPORT std::pair<SymbolicFactorGraph::shared_ptr, Ordering::shared_ptr>
|
||||
//std::pair<SymbolicFactorGraph::shared_ptr, Ordering::shared_ptr>
|
||||
// symbolic(const Values& config) const;
|
||||
|
||||
/**
|
||||
* Compute a fill-reducing ordering using COLAMD.
|
||||
*/
|
||||
GTSAM_EXPORT Ordering orderingCOLAMD() const;
|
||||
Ordering orderingCOLAMD() const;
|
||||
|
||||
/**
|
||||
* Compute a fill-reducing ordering with constraints using CCOLAMD
|
||||
|
@ -129,17 +129,17 @@ namespace gtsam {
|
|||
* indices need to appear in the constraints, unconstrained is assumed for all
|
||||
* other variables
|
||||
*/
|
||||
GTSAM_EXPORT Ordering orderingCOLAMDConstrained(const FastMap<Key, int>& constraints) const;
|
||||
Ordering orderingCOLAMDConstrained(const FastMap<Key, int>& constraints) const;
|
||||
|
||||
/**
|
||||
* linearize a nonlinear factor graph
|
||||
*/
|
||||
GTSAM_EXPORT boost::shared_ptr<GaussianFactorGraph> linearize(const Values& linearizationPoint) const;
|
||||
boost::shared_ptr<GaussianFactorGraph> linearize(const Values& linearizationPoint) const;
|
||||
|
||||
/**
|
||||
* Clone() performs a deep-copy of the graph, including all of the factors
|
||||
*/
|
||||
GTSAM_EXPORT NonlinearFactorGraph clone() const;
|
||||
NonlinearFactorGraph clone() const;
|
||||
|
||||
/**
|
||||
* Rekey() performs a deep-copy of all of the factors, and changes
|
||||
|
@ -150,7 +150,7 @@ namespace gtsam {
|
|||
* @param rekey_mapping is a map of old->new keys
|
||||
* @result a cloned graph with updated keys
|
||||
*/
|
||||
GTSAM_EXPORT NonlinearFactorGraph rekey(const std::map<Key,Key>& rekey_mapping) const;
|
||||
NonlinearFactorGraph rekey(const std::map<Key,Key>& rekey_mapping) const;
|
||||
|
||||
private:
|
||||
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
* @author Viorela Ila and Richard Roberts
|
||||
*/
|
||||
|
||||
#if 0
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearISAM.h>
|
||||
|
||||
#include <gtsam/linear/GaussianFactorGraphOrdered.h>
|
||||
|
@ -122,3 +124,5 @@ void NonlinearISAM::printStats() const {
|
|||
/* ************************************************************************* */
|
||||
|
||||
}///\ namespace gtsam
|
||||
|
||||
#endif
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
* @author Viorela Ila and Richard Roberts
|
||||
*/
|
||||
|
||||
#if 0
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
|
@ -116,3 +118,4 @@ public:
|
|||
|
||||
} // \namespace gtsam
|
||||
|
||||
#endif
|
||||
|
|
|
@ -63,14 +63,16 @@ VectorValues solveGaussianFactorGraph(const GaussianFactorGraph &gfg, const Succ
|
|||
delta = gfg.eliminateSequential(*params.ordering, params.getEliminationFunction())->optimize();
|
||||
}
|
||||
else if ( params.isCG() ) {
|
||||
if ( !params.iterativeParams ) throw std::runtime_error("solveGaussianFactorGraph: cg parameter has to be assigned ...");
|
||||
if ( boost::dynamic_pointer_cast<SubgraphSolverParameters>(params.iterativeParams) ) {
|
||||
SubgraphSolver solver (gfg, *boost::dynamic_pointer_cast<SubgraphSolverParameters>(params.iterativeParams));
|
||||
delta = solver.optimize();
|
||||
}
|
||||
else {
|
||||
throw std::runtime_error("solveGaussianFactorGraph: special cg parameter type is not handled in LM solver ...");
|
||||
}
|
||||
throw std::runtime_error("Not implemented");
|
||||
|
||||
//if ( !params.iterativeParams ) throw std::runtime_error("solveGaussianFactorGraph: cg parameter has to be assigned ...");
|
||||
//if ( boost::dynamic_pointer_cast<SubgraphSolverParameters>(params.iterativeParams) ) {
|
||||
// SubgraphSolver solver (gfg, *boost::dynamic_pointer_cast<SubgraphSolverParameters>(params.iterativeParams));
|
||||
// delta = solver.optimize();
|
||||
//}
|
||||
//else {
|
||||
// throw std::runtime_error("solveGaussianFactorGraph: special cg parameter type is not handled in LM solver ...");
|
||||
//}
|
||||
}
|
||||
else {
|
||||
throw std::runtime_error("solveGaussianFactorGraph: Optimization parameter is invalid");
|
||||
|
|
|
@ -20,10 +20,15 @@
|
|||
|
||||
#include <gtsam/nonlinear/NonlinearOptimizer.h>
|
||||
#include <gtsam/linear/SubgraphSolver.h>
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
#include <gtsam/linear/GaussianConditional.h>
|
||||
#include <gtsam/inference/Ordering.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
// Forward declarations
|
||||
|
||||
class GTSAM_EXPORT SuccessiveLinearizationParams : public NonlinearOptimizerParams {
|
||||
public:
|
||||
/** See SuccessiveLinearizationParams::linearSolverType */
|
||||
|
@ -55,19 +60,20 @@ public:
|
|||
|
||||
virtual void print(const std::string& str) const;
|
||||
|
||||
GaussianFactorGraphOrdered::Eliminate getEliminationFunction() const {
|
||||
GaussianFactorGraph::Eliminate getEliminationFunction() const {
|
||||
switch (linearSolverType) {
|
||||
case MULTIFRONTAL_CHOLESKY:
|
||||
case SEQUENTIAL_CHOLESKY:
|
||||
return EliminatePreferCholeskyOrdered;
|
||||
throw std::runtime_error("Not implemented");
|
||||
//return EliminatePreferCholeskyOrdered;
|
||||
|
||||
case MULTIFRONTAL_QR:
|
||||
case SEQUENTIAL_QR:
|
||||
return EliminateQROrdered;
|
||||
return EliminateQR;
|
||||
|
||||
default:
|
||||
throw std::runtime_error("Nonlinear optimization parameter \"factorization\" is invalid");
|
||||
return EliminateQROrdered;
|
||||
return EliminateQR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ namespace gtsam {
|
|||
Values result;
|
||||
|
||||
for(const_iterator key_value = begin(); key_value != end(); ++key_value) {
|
||||
const Vector& singleDelta = delta[ordering[key_value->key]]; // Delta for this value
|
||||
const Vector& singleDelta = delta[key_value->key]; // Delta for this value
|
||||
Key key = key_value->key; // Non-const duplicate to deal with non-const insert argument
|
||||
Value* retractedValue(key_value->value.retract_(singleDelta)); // Retract
|
||||
result.values_.insert(key, retractedValue); // Add retracted result directly to result values
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
* @author Alex Cunningham
|
||||
*/
|
||||
|
||||
#if 0
|
||||
|
||||
#include <gtsam/linear/GaussianSequentialSolver.h>
|
||||
#include <gtsam/nonlinear/summarization.h>
|
||||
#include <gtsam/nonlinear/LinearContainerFactor.h>
|
||||
|
@ -84,3 +86,4 @@ NonlinearFactorGraph summarizeAsNonlinearContainer(
|
|||
/* ************************************************************************* */
|
||||
} // \namespace gtsam
|
||||
|
||||
#endif
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#if 0
|
||||
|
||||
#include <gtsam/dllexport.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
|
||||
|
@ -58,4 +60,4 @@ NonlinearFactorGraph GTSAM_EXPORT summarizeAsNonlinearContainer(
|
|||
|
||||
} // \namespace gtsam
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -179,7 +179,7 @@ pair<NonlinearFactorGraph::shared_ptr, Values::shared_ptr> load2D(
|
|||
|
||||
noiseModel::Diagonal::shared_ptr measurementNoise =
|
||||
noiseModel::Diagonal::Sigmas(Vector_(2, bearing_std, range_std));
|
||||
graph->add(BearingRangeFactor<Pose2, Point2>(id1, id2, bearing, range, measurementNoise));
|
||||
*graph += BearingRangeFactor<Pose2, Point2>(id1, id2, bearing, range, measurementNoise);
|
||||
|
||||
// Insert poses or points if they do not exist yet
|
||||
if (!initial->exists(id1))
|
||||
|
@ -222,7 +222,7 @@ pair<NonlinearFactorGraph::shared_ptr, Values::shared_ptr> load2D(
|
|||
}
|
||||
|
||||
// Add to graph
|
||||
graph->add(BearingRangeFactor<Pose2, Point2>(id1, L(id2), bearing, range, measurementNoise));
|
||||
*graph += BearingRangeFactor<Pose2, Point2>(id1, L(id2), bearing, range, measurementNoise);
|
||||
}
|
||||
is.ignore(LINESIZE, '\n');
|
||||
}
|
||||
|
@ -386,7 +386,7 @@ pair<NonlinearFactorGraph::shared_ptr, Values::shared_ptr> load2D_robust(
|
|||
|
||||
noiseModel::Diagonal::shared_ptr measurementNoise =
|
||||
noiseModel::Diagonal::Sigmas(Vector_(2, bearing_std, range_std));
|
||||
graph->add(BearingRangeFactor<Pose2, Point2>(id1, id2, bearing, range, measurementNoise));
|
||||
*graph += BearingRangeFactor<Pose2, Point2>(id1, id2, bearing, range, measurementNoise);
|
||||
|
||||
// Insert poses or points if they do not exist yet
|
||||
if (!initial->exists(id1))
|
||||
|
|
Loading…
Reference in New Issue