compute gradient wrt a key
parent
0dcb3e209d
commit
5e8c36b3ca
|
|
@ -133,6 +133,9 @@ namespace gtsam {
|
|||
/// A'*b for Jacobian, eta for Hessian (raw memory version)
|
||||
virtual void gradientAtZero(double* d) const = 0;
|
||||
|
||||
/// Gradient wrt a key at any values
|
||||
virtual Vector gradient(Key key, const VectorValues& x) const = 0;
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
|
|
|
|||
|
|
@ -622,6 +622,30 @@ void HessianFactor::gradientAtZero(double* d) const {
|
|||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Vector HessianFactor::gradient(Key key, const VectorValues& x) const {
|
||||
Factor::const_iterator i = find(key);
|
||||
// Sum over G_ij*xj for all xj connecting to xi
|
||||
Vector b = zero(x.at(key).size());
|
||||
for (Factor::const_iterator j = begin(); j != end(); ++j) {
|
||||
// Obtain Gij from the Hessian factor
|
||||
// Hessian factor only stores an upper triangular matrix, so be careful when i>j
|
||||
Matrix Gij;
|
||||
if (i > j) {
|
||||
Matrix Gji = info(j, i);
|
||||
Gij = Gji.transpose();
|
||||
}
|
||||
else {
|
||||
Gij = info(i, j);
|
||||
}
|
||||
// Accumulate Gij*xj to gradf
|
||||
b += Gij * x.at(*j);
|
||||
}
|
||||
// Subtract the linear term gi
|
||||
b += -linearTerm(i);
|
||||
return b;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::pair<boost::shared_ptr<GaussianConditional>, boost::shared_ptr<HessianFactor> >
|
||||
EliminateCholesky(const GaussianFactorGraph& factors, const Ordering& keys)
|
||||
|
|
|
|||
|
|
@ -389,6 +389,12 @@ namespace gtsam {
|
|||
|
||||
virtual void gradientAtZero(double* d) const;
|
||||
|
||||
/**
|
||||
* Compute the gradient at a key:
|
||||
* \grad f(x_i) = \sum_j G_ij*x_j - g_i
|
||||
*/
|
||||
Vector gradient(Key key, const VectorValues& x) const;
|
||||
|
||||
/**
|
||||
* Densely partially eliminate with Cholesky factorization. JacobianFactors are
|
||||
* left-multiplied with their transpose to form the Hessian using the conversion constructor
|
||||
|
|
|
|||
|
|
@ -454,6 +454,32 @@ TEST(HessianFactor, gradientAtZero)
|
|||
EXPECT(assert_equal(expectedGscaled, actualGscaled));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(HessianFactor, gradient)
|
||||
{
|
||||
Matrix G11 = (Matrix(1, 1) << 1);
|
||||
Matrix G12 = (Matrix(1, 2) << 0, 0);
|
||||
Matrix G22 = (Matrix(2, 2) << 1, 0, 0, 1);
|
||||
Vector g1 = (Vector(1) << -7);
|
||||
Vector g2 = (Vector(2) << -8, -9);
|
||||
double f = 194;
|
||||
|
||||
HessianFactor factor(0, 1, G11, G12, g1, G22, g2, f);
|
||||
|
||||
// test gradient
|
||||
Vector x0 = (Vector(1) << 3.0);
|
||||
Vector x1 = (Vector(2) << -3.5, 7.1);
|
||||
VectorValues x = pair_list_of<Key, Vector>(0, x0) (1, x1);
|
||||
|
||||
Vector expectedGrad0 = (Vector(1) << 10.0);
|
||||
Vector expectedGrad1 = (Vector(2) << 4.5, 16.1);
|
||||
Vector grad0 = factor.gradient(0, x);
|
||||
Vector grad1 = factor.gradient(1, x);
|
||||
|
||||
EXPECT(assert_equal(expectedGrad0, grad0));
|
||||
EXPECT(assert_equal(expectedGrad1, grad1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(HessianFactor, hessianDiagonal)
|
||||
{
|
||||
|
|
|
|||
Loading…
Reference in New Issue