Created new dense matrix functions in GaussianFactorGraph returning pair<Matrix,Vector> for easier access, and renamed functions to augmentedJacobian, augmentedHessian, jacobian, hessian
parent
abd07e553e
commit
73f8c0830b
6
gtsam.h
6
gtsam.h
|
|
@ -1022,8 +1022,10 @@ class GaussianFactorGraph {
|
|||
|
||||
// Conversion to matrices
|
||||
Matrix sparseJacobian_() const;
|
||||
Matrix denseJacobian() const;
|
||||
Matrix denseHessian() const;
|
||||
Matrix augmentedJacobian() const;
|
||||
pair<Matrix,Vector> jacobian() const;
|
||||
Matrix augmentedHessian() const;
|
||||
pair<Matrix,Vector> hessian() const;
|
||||
};
|
||||
|
||||
class GaussianISAM {
|
||||
|
|
|
|||
|
|
@ -166,7 +166,7 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix GaussianFactorGraph::denseJacobian() const {
|
||||
Matrix GaussianFactorGraph::augmentedJacobian() const {
|
||||
// Convert to Jacobians
|
||||
FactorGraph<JacobianFactor> jfg;
|
||||
jfg.reserve(this->size());
|
||||
|
|
@ -182,6 +182,14 @@ namespace gtsam {
|
|||
return combined.matrix_augmented();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::pair<Matrix,Vector> GaussianFactorGraph::jacobian() const {
|
||||
Matrix augmented = augmentedJacobian();
|
||||
return make_pair(
|
||||
augmented.leftCols(augmented.cols()-1),
|
||||
augmented.col(augmented.cols()-1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Helper functions for Combine
|
||||
static boost::tuple<vector<size_t>, size_t, size_t> countDims(const FactorGraph<JacobianFactor>& factors, const VariableSlots& variableSlots) {
|
||||
|
|
@ -317,9 +325,7 @@ break;
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
static
|
||||
FastMap<Index, SlotEntry> findScatterAndDims
|
||||
(const FactorGraph<GaussianFactor>& factors) {
|
||||
static FastMap<Index, SlotEntry> findScatterAndDims(const FactorGraph<GaussianFactor>& factors) {
|
||||
|
||||
const bool debug = ISDEBUG("findScatterAndDims");
|
||||
|
||||
|
|
@ -349,7 +355,7 @@ break;
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix GaussianFactorGraph::denseHessian() const {
|
||||
Matrix GaussianFactorGraph::augmentedHessian() const {
|
||||
|
||||
Scatter scatter = findScatterAndDims(*this);
|
||||
|
||||
|
|
@ -367,6 +373,14 @@ break;
|
|||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::pair<Matrix,Vector> GaussianFactorGraph::hessian() const {
|
||||
Matrix augmented = augmentedHessian();
|
||||
return make_pair(
|
||||
augmented.topLeftCorner(augmented.rows()-1, augmented.rows()-1),
|
||||
augmented.col(augmented.rows()-1).head(augmented.rows()-1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
GaussianFactorGraph::EliminationResult EliminateCholesky(const FactorGraph<
|
||||
GaussianFactor>& factors, size_t nrFrontals) {
|
||||
|
|
|
|||
|
|
@ -188,15 +188,43 @@ namespace gtsam {
|
|||
Matrix sparseJacobian_() const;
|
||||
|
||||
/**
|
||||
* Return a dense \f$ m \times n \f$ Jacobian matrix, augmented with b
|
||||
* with standard deviations are baked into A and b
|
||||
* Return a dense \f$ [ \;A\;b\; ] \in \mathbb{R}^{m \times n+1} \f$
|
||||
* Jacobian matrix, augmented with b with the noise models baked
|
||||
* into A and b. The negative log-likelihood is
|
||||
* \f$ \frac{1}{2} \Vert Ax-b \Vert^2 \f$. See also
|
||||
* GaussianFactorGraph::jacobian and GaussianFactorGraph::sparseJacobian.
|
||||
*/
|
||||
Matrix denseJacobian() const;
|
||||
Matrix augmentedJacobian() const;
|
||||
|
||||
/**
|
||||
* Return the dense Jacobian \f$ A \f$ and right-hand-side \f$ b \f$,
|
||||
* with the noise models baked into A and b. The negative log-likelihood
|
||||
* is \f$ \frac{1}{2} \Vert Ax-b \Vert^2 \f$. See also
|
||||
* GaussianFactorGraph::augmentedJacobian and
|
||||
* GaussianFactorGraph::sparseJacobian.
|
||||
*/
|
||||
std::pair<Matrix,Vector> jacobian() const;
|
||||
|
||||
/**
|
||||
* Return a dense \f$ n \times n \f$ Hessian matrix, augmented with \f$ A^T b \f$
|
||||
* Return a dense \f$ \Lambda \in \mathbb{R}^{n+1 \times n+1} \f$ Hessian
|
||||
* matrix, augmented with the information vector \f$ \eta \f$. The
|
||||
* augmented Hessian is
|
||||
\f[ \left[ \begin{array}{ccc}
|
||||
\Lambda & \eta \\
|
||||
\eta^T & c
|
||||
\end{array} \right] \f]
|
||||
and the negative log-likelihood is
|
||||
\f$ \frac{1}{2} x^T \Lambda x + \eta^T x + c \f$.
|
||||
*/
|
||||
Matrix denseHessian() const;
|
||||
Matrix augmentedHessian() const;
|
||||
|
||||
/**
|
||||
* Return the dense Hessian \f$ \Lambda \f$ and information vector
|
||||
* \f$ \eta \f$, with the noise models baked in. The negative log-likelihood
|
||||
* is \frac{1}{2} x^T \Lambda x + \eta^T x + c. See also
|
||||
* GaussianFactorGraph::augmentedHessian.
|
||||
*/
|
||||
std::pair<Matrix,Vector> hessian() const;
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
|
|
|
|||
|
|
@ -410,7 +410,7 @@ TEST(GaussianFactor, eliminateFrontals)
|
|||
factors.push_back(factor4);
|
||||
|
||||
// extract the dense matrix for the graph
|
||||
Matrix actualDense = factors.denseJacobian();
|
||||
Matrix actualDense = factors.augmentedJacobian();
|
||||
EXPECT(assert_equal(2.0 * Ab, actualDense));
|
||||
|
||||
// Convert to Jacobians, inefficient copy of all factors instead of selectively converting only Hessians
|
||||
|
|
@ -619,7 +619,7 @@ TEST(GaussianFactorGraph, sparseJacobian) {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(GaussianFactorGraph, denseHessian) {
|
||||
TEST(GaussianFactorGraph, matrices) {
|
||||
// Create factor graph:
|
||||
// x1 x2 x3 x4 x5 b
|
||||
// 1 2 3 0 0 4
|
||||
|
|
@ -639,9 +639,24 @@ TEST(GaussianFactorGraph, denseHessian) {
|
|||
9,10, 0,11,12,13,
|
||||
0, 0, 0,14,15,16;
|
||||
|
||||
Matrix expectedJacobian = jacobian;
|
||||
Matrix expectedHessian = jacobian.transpose() * jacobian;
|
||||
Matrix actualHessian = gfg.denseHessian();
|
||||
Matrix expectedA = jacobian.leftCols(jacobian.cols()-1);
|
||||
Vector expectedb = jacobian.col(jacobian.cols()-1);
|
||||
Matrix expectedL = expectedA.transpose() * expectedA;
|
||||
Vector expectedeta = expectedA.transpose() * expectedb;
|
||||
|
||||
Matrix actualJacobian = gfg.augmentedJacobian();
|
||||
Matrix actualHessian = gfg.augmentedHessian();
|
||||
Matrix actualA; Vector actualb; boost::tie(actualA,actualb) = gfg.jacobian();
|
||||
Matrix actualL; Vector actualeta; boost::tie(actualL,actualeta) = gfg.hessian();
|
||||
|
||||
EXPECT(assert_equal(expectedJacobian, actualJacobian));
|
||||
EXPECT(assert_equal(expectedHessian, actualHessian));
|
||||
EXPECT(assert_equal(expectedA, actualA));
|
||||
EXPECT(assert_equal(expectedb, actualb));
|
||||
EXPECT(assert_equal(expectedL, actualL));
|
||||
EXPECT(assert_equal(expectedeta, actualeta));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -152,7 +152,7 @@ JointMarginal Marginals::jointMarginalInformation(const std::vector<Key>& variab
|
|||
}
|
||||
|
||||
// Get information matrix
|
||||
Matrix augmentedInfo = jointFG.denseHessian();
|
||||
Matrix augmentedInfo = jointFG.augmentedHessian();
|
||||
Matrix info = augmentedInfo.topLeftCorner(augmentedInfo.rows()-1, augmentedInfo.cols()-1);
|
||||
|
||||
return JointMarginal(info, dims, variableConversion);
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ TEST(DoglegOptimizer, ComputeSteepestDescentPoint) {
|
|||
gradientValues.vector() = gradient;
|
||||
|
||||
// Compute the gradient using dense matrices
|
||||
Matrix augmentedHessian = GaussianFactorGraph(gbn).denseHessian();
|
||||
Matrix augmentedHessian = GaussianFactorGraph(gbn).augmentedHessian();
|
||||
LONGS_EQUAL(11, augmentedHessian.cols());
|
||||
VectorValues denseMatrixGradient = *allocateVectorValues(gbn);
|
||||
denseMatrixGradient.vector() = -augmentedHessian.col(10).segment(0,10);
|
||||
|
|
@ -200,7 +200,7 @@ TEST(DoglegOptimizer, BT_BN_equivalency) {
|
|||
GaussianFactorGraph expected(gbn);
|
||||
GaussianFactorGraph actual(bt);
|
||||
|
||||
EXPECT(assert_equal(expected.denseHessian(), actual.denseHessian()));
|
||||
EXPECT(assert_equal(expected.augmentedHessian(), actual.augmentedHessian()));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -276,7 +276,7 @@ TEST(DoglegOptimizer, ComputeSteepestDescentPointBT) {
|
|||
gradientValues.vector() = gradient;
|
||||
|
||||
// Compute the gradient using dense matrices
|
||||
Matrix augmentedHessian = GaussianFactorGraph(bt).denseHessian();
|
||||
Matrix augmentedHessian = GaussianFactorGraph(bt).augmentedHessian();
|
||||
LONGS_EQUAL(11, augmentedHessian.cols());
|
||||
VectorValues denseMatrixGradient = *allocateVectorValues(bt);
|
||||
denseMatrixGradient.vector() = -augmentedHessian.col(10).segment(0,10);
|
||||
|
|
|
|||
Loading…
Reference in New Issue