(in branch) Separate gradient functions for FactorGraph, BayesNet, BayesTree, and BayesTree with ISAM2 Cliques (specialized for using stored gradient contributions in ISAM2 cliques)
parent
2ef911b7e9
commit
6e1136ba20
|
|
@ -19,6 +19,7 @@
|
|||
#include <boost/foreach.hpp>
|
||||
#include <boost/tuple/tuple.hpp>
|
||||
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
#include <gtsam/linear/GaussianBayesNet.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
|
||||
|
|
@ -213,6 +214,16 @@ double determinant(const GaussianBayesNet& bayesNet) {
|
|||
return exp(logDet);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues gradient(const GaussianBayesNet& bayesNet, const VectorValues& x0) {
|
||||
return gradient(FactorGraph<JacobianFactor>(bayesNet), x0);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void gradientAtZero(const GaussianBayesNet& bayesNet, VectorValues& g) {
|
||||
gradientAtZero(FactorGraph<JacobianFactor>(bayesNet), g);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
|||
|
|
@ -64,19 +64,19 @@ namespace gtsam {
|
|||
*/
|
||||
boost::shared_ptr<VectorValues> optimize_(const GaussianBayesNet& bn);
|
||||
|
||||
/*
|
||||
/**
|
||||
* Backsubstitute
|
||||
* (R*x)./sigmas = y by solving x=inv(R)*(y.*sigmas)
|
||||
* @param y is the RHS of the system
|
||||
*/
|
||||
VectorValues backSubstitute(const GaussianBayesNet& bn, const VectorValues& y);
|
||||
|
||||
/*
|
||||
/**
|
||||
* Backsubstitute in place, y starts as RHS and is replaced with solution
|
||||
*/
|
||||
void backSubstituteInPlace(const GaussianBayesNet& bn, VectorValues& y);
|
||||
|
||||
/*
|
||||
/**
|
||||
* Transpose Backsubstitute
|
||||
* gy=inv(L)*gx by solving L*gy=gx.
|
||||
* gy=inv(R'*inv(Sigma))*gx
|
||||
|
|
@ -108,4 +108,26 @@ namespace gtsam {
|
|||
*/
|
||||
double determinant(const GaussianBayesNet& bayesNet);
|
||||
|
||||
/**
|
||||
* Compute the gradient of the energy function,
|
||||
* \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
|
||||
* centered around \f$ x = x_0 \f$.
|
||||
* The gradient is \f$ R^T(Rx-d) \f$.
|
||||
* @param bayesNet The Gaussian Bayes net $(R,d)$
|
||||
* @param x0 The center about which to compute the gradient
|
||||
* @return The gradient as a VectorValues
|
||||
*/
|
||||
VectorValues gradient(const GaussianBayesNet& bayesNet, const VectorValues& x0);
|
||||
|
||||
/**
|
||||
* Compute the gradient of the energy function,
|
||||
* \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
|
||||
* centered around zero.
|
||||
* The gradient about zero is \f$ -R^T d \f$. See also gradient(const GaussianBayesNet&, const VectorValues&).
|
||||
* @param bayesNet The Gaussian Bayes net $(R,d)$
|
||||
* @param [output] g A VectorValues to store the gradient, which must be preallocated, see allocateVectorValues
|
||||
* @return The gradient as a VectorValues
|
||||
*/
|
||||
void gradientAtZero(const GaussianBayesNet& bayesNet, VectorValues& g);
|
||||
|
||||
} /// namespace gtsam
|
||||
|
|
|
|||
|
|
@ -582,17 +582,28 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues gradient(const FactorGraph<JacobianFactor>& fg, const VectorValues& x) {
|
||||
VectorValues gradient(const FactorGraph<JacobianFactor>& fg, const VectorValues& x0) {
|
||||
// It is crucial for performance to make a zero-valued clone of x
|
||||
VectorValues g = VectorValues::Zero(x);
|
||||
VectorValues g = VectorValues::Zero(x0);
|
||||
Errors e;
|
||||
BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, fg) {
|
||||
e.push_back(factor->error_vector(x));
|
||||
e.push_back(factor->error_vector(x0));
|
||||
}
|
||||
transposeMultiplyAdd(fg, 1.0, e, g);
|
||||
return g;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void gradientAtZero(const FactorGraph<JacobianFactor>& fg, VectorValues& g) {
|
||||
// Zero-out the gradient
|
||||
g.setZero();
|
||||
Errors e;
|
||||
BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, fg) {
|
||||
e.push_back(-factor->getb());
|
||||
}
|
||||
transposeMultiplyAdd(fg, 1.0, e, g);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void residual(const FactorGraph<JacobianFactor>& fg, const VectorValues &x, VectorValues &r) {
|
||||
Index i = 0 ;
|
||||
|
|
|
|||
|
|
@ -318,11 +318,26 @@ namespace gtsam {
|
|||
void transposeMultiplyAdd(const FactorGraph<JacobianFactor>& fg, double alpha, const Errors& e, VectorValues& x);
|
||||
|
||||
/**
|
||||
* Calculate Gradient of A^(A*x-b) for a given config
|
||||
* @param x: VectorValues specifying where to calculate gradient
|
||||
* @return gradient, as a VectorValues as well
|
||||
* Compute the gradient of the energy function,
|
||||
* \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} A x - b \right\Vert^2 \f$,
|
||||
* centered around \f$ x = x_0 \f$.
|
||||
* The gradient is \f$ A^T(Ax-b) \f$.
|
||||
* @param fg The Jacobian factor graph $(A,b)$
|
||||
* @param x0 The center about which to compute the gradient
|
||||
* @return The gradient as a VectorValues
|
||||
*/
|
||||
VectorValues gradient(const FactorGraph<JacobianFactor>& fg, const VectorValues& x);
|
||||
VectorValues gradient(const FactorGraph<JacobianFactor>& fg, const VectorValues& x0);
|
||||
|
||||
/**
|
||||
* Compute the gradient of the energy function,
|
||||
* \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} A x - b \right\Vert^2 \f$,
|
||||
* centered around zero.
|
||||
* The gradient is \f$ A^T(Ax-b) \f$.
|
||||
* @param fg The Jacobian factor graph $(A,b)$
|
||||
* @param [output] g A VectorValues to store the gradient, which must be preallocated, see allocateVectorValues
|
||||
* @return The gradient as a VectorValues
|
||||
*/
|
||||
void gradientAtZero(const FactorGraph<JacobianFactor>& fg, VectorValues& g);
|
||||
|
||||
// matrix-vector operations
|
||||
void residual(const FactorGraph<JacobianFactor>& fg, const VectorValues &x, VectorValues &r);
|
||||
|
|
|
|||
|
|
@ -233,14 +233,14 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
|
|||
template<class M>
|
||||
VectorValues DoglegOptimizerImpl::ComputeSteepestDescentPoint(const M& Rd) {
|
||||
|
||||
// Compute gradient
|
||||
// Convert to JacobianFactor's to use existing gradient function
|
||||
FactorGraph<JacobianFactor> jfg(Rd);
|
||||
VectorValues grad = gradient(jfg, VectorValues::Zero(*allocateVectorValues(Rd)));
|
||||
// Compute gradient (call gradientAtZero function, which is defined for various linear systems)
|
||||
VectorValues grad = *allocateVectorValues(Rd);
|
||||
gradientAtZero(Rd, grad);
|
||||
double gradientSqNorm = grad.dot(grad);
|
||||
|
||||
// Compute R * g
|
||||
Errors Rg = jfg * grad;
|
||||
FactorGraph<JacobianFactor> Rd_jfg(Rd);
|
||||
Errors Rg = Rd_jfg * grad;
|
||||
|
||||
// Compute minimizing step size
|
||||
double step = -gradientSqNorm / dot(Rg, Rg);
|
||||
|
|
|
|||
|
|
@ -1,10 +1,22 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file GaussianISAM2
|
||||
* @brief Full non-linear ISAM
|
||||
* @author Michael Kaess
|
||||
*/
|
||||
|
||||
#include <gtsam/nonlinear/GaussianISAM2.h>
|
||||
#include <gtsam/inference/FactorGraph.h>
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
|
@ -13,9 +25,9 @@ using namespace gtsam;
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
/* ************************************************************************* */
|
||||
template<class CLIQUE>
|
||||
void optimize2(const boost::shared_ptr<CLIQUE>& clique, double threshold,
|
||||
void optimize2(const boost::shared_ptr<CLIQUE>& clique, double threshold,
|
||||
vector<bool>& changed, const vector<bool>& replaced, Permuted<VectorValues>& delta, int& count) {
|
||||
// if none of the variables in this clique (frontal and separator!) changed
|
||||
// significantly, then by the running intersection property, none of the
|
||||
|
|
@ -89,12 +101,12 @@ void optimize2(const boost::shared_ptr<CLIQUE>& clique, double threshold,
|
|||
optimize2(child, threshold, changed, replaced, delta, count);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// fast full version without threshold
|
||||
/* ************************************************************************* */
|
||||
// fast full version without threshold
|
||||
template<class CLIQUE>
|
||||
void optimize2(const boost::shared_ptr<CLIQUE>& clique, VectorValues& delta) {
|
||||
void optimize2(const boost::shared_ptr<CLIQUE>& clique, VectorValues& delta) {
|
||||
|
||||
// parents are assumed to already be solved and available in result
|
||||
(*clique)->rhs(delta);
|
||||
|
|
@ -104,30 +116,30 @@ void optimize2(const boost::shared_ptr<CLIQUE>& clique, VectorValues& delta) {
|
|||
BOOST_FOREACH(const typename CLIQUE::shared_ptr& child, clique->children_) {
|
||||
optimize2(child, delta);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///* ************************************************************************* */
|
||||
//boost::shared_ptr<VectorValues> optimize2(const GaussianISAM2::sharedClique& root) {
|
||||
// boost::shared_ptr<VectorValues> delta(new VectorValues());
|
||||
// set<Symbol> changed;
|
||||
// // starting from the root, call optimize on each conditional
|
||||
// optimize2(root, delta);
|
||||
// return delta;
|
||||
//}
|
||||
///* ************************************************************************* */
|
||||
//boost::shared_ptr<VectorValues> optimize2(const GaussianISAM2::sharedClique& root) {
|
||||
// boost::shared_ptr<VectorValues> delta(new VectorValues());
|
||||
// set<Symbol> changed;
|
||||
// // starting from the root, call optimize on each conditional
|
||||
// optimize2(root, delta);
|
||||
// return delta;
|
||||
//}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/* ************************************************************************* */
|
||||
template<class CLIQUE>
|
||||
int optimize2(const boost::shared_ptr<CLIQUE>& root, double threshold, const vector<bool>& keys, Permuted<VectorValues>& delta) {
|
||||
int optimize2(const boost::shared_ptr<CLIQUE>& root, double threshold, const vector<bool>& keys, Permuted<VectorValues>& delta) {
|
||||
vector<bool> changed(keys.size(), false);
|
||||
int count = 0;
|
||||
// starting from the root, call optimize on each conditional
|
||||
optimize2(root, threshold, changed, keys, delta, count);
|
||||
return count;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/* ************************************************************************* */
|
||||
template<class CLIQUE>
|
||||
void nnz_internal(const boost::shared_ptr<CLIQUE>& clique, int& result) {
|
||||
void nnz_internal(const boost::shared_ptr<CLIQUE>& clique, int& result) {
|
||||
int dimR = (*clique)->dim();
|
||||
int dimSep = (*clique)->get_S().cols() - dimR;
|
||||
result += ((dimR+1)*dimR)/2 + dimSep*dimR;
|
||||
|
|
@ -135,15 +147,15 @@ void nnz_internal(const boost::shared_ptr<CLIQUE>& clique, int& result) {
|
|||
BOOST_FOREACH(const typename CLIQUE::shared_ptr& child, clique->children_) {
|
||||
nnz_internal(child, result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/* ************************************************************************* */
|
||||
template<class CLIQUE>
|
||||
int calculate_nnz(const boost::shared_ptr<CLIQUE>& clique) {
|
||||
int calculate_nnz(const boost::shared_ptr<CLIQUE>& clique) {
|
||||
int result = 0;
|
||||
// starting from the root, add up entries of frontal and conditional matrices of each conditional
|
||||
nnz_internal(clique, result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
} /// namespace gtsam
|
||||
|
|
|
|||
|
|
@ -0,0 +1,61 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file GaussianISAM2
|
||||
* @brief Full non-linear ISAM
|
||||
* @author Michael Kaess
|
||||
*/
|
||||
|
||||
#include <gtsam/inference/FactorGraph.h>
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
#include <boost/bind.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues gradient(const BayesTree<GaussianConditional>& bayesTree, const VectorValues& x0) {
|
||||
return gradient(FactorGraph<JacobianFactor>(bayesTree), x0);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void gradientAtZero(const BayesTree<GaussianConditional>& bayesTree, VectorValues& g) {
|
||||
gradientAtZero(FactorGraph<JacobianFactor>(bayesTree), g);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues gradient(const BayesTree<GaussianConditional, ISAM2Clique<GaussianConditional> >& bayesTree, const VectorValues& x0) {
|
||||
return gradient(FactorGraph<JacobianFactor>(bayesTree));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void gradientAtZero(const BayesTree<GaussianConditional, ISAM2Clique<GaussianConditional> >& bayesTree, VectorValues& g) {
|
||||
// Zero-out gradient
|
||||
g.setZero();
|
||||
|
||||
// Sum up contributions for each clique
|
||||
typedef boost::shared_ptr<ISAM2Clique<GaussianConditional> > sharedClique;
|
||||
BOOST_FOREACH(const sharedClique& clique, bayesTree.nodes()) {
|
||||
// Loop through variables in each clique, adding contributions
|
||||
int variablePosition = 0;
|
||||
for(GaussianConditional::const_iterator jit = clique->conditional()->beginFrontals(); jit != clique->conditional()->endFrontals(); ++jit) {
|
||||
const int dim = clique->conditional()->dim(jit);
|
||||
x0[*jit] += clique->gradientContribution().segment(variablePosition, dim);
|
||||
variablePosition += dim;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -1,3 +1,14 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file GaussianISAM
|
||||
* @brief Full non-linear ISAM.
|
||||
|
|
@ -41,26 +52,74 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
// optimize the BayesTree, starting from the root
|
||||
/** optimize the BayesTree, starting from the root */
|
||||
template<class CLIQUE>
|
||||
void optimize2(const boost::shared_ptr<CLIQUE>& root, VectorValues& delta);
|
||||
|
||||
// optimize the BayesTree, starting from the root; "replaced" needs to contain
|
||||
// all variables that are contained in the top of the Bayes tree that has been
|
||||
// redone; "delta" is the current solution, an offset from the linearization
|
||||
// point; "threshold" is the maximum change against the PREVIOUS delta for
|
||||
// non-replaced variables that can be ignored, ie. the old delta entry is kept
|
||||
// and recursive backsubstitution might eventually stop if none of the changed
|
||||
// variables are contained in the subtree.
|
||||
// returns the number of variables that were solved for
|
||||
/// optimize the BayesTree, starting from the root; "replaced" needs to contain
|
||||
/// all variables that are contained in the top of the Bayes tree that has been
|
||||
/// redone; "delta" is the current solution, an offset from the linearization
|
||||
/// point; "threshold" is the maximum change against the PREVIOUS delta for
|
||||
/// non-replaced variables that can be ignored, ie. the old delta entry is kept
|
||||
/// and recursive backsubstitution might eventually stop if none of the changed
|
||||
/// variables are contained in the subtree.
|
||||
/// returns the number of variables that were solved for
|
||||
template<class CLIQUE>
|
||||
int optimize2(const boost::shared_ptr<CLIQUE>& root,
|
||||
double threshold, const std::vector<bool>& replaced, Permuted<VectorValues>& delta);
|
||||
|
||||
// calculate the number of non-zero entries for the tree starting at clique (use root for complete matrix)
|
||||
/// calculate the number of non-zero entries for the tree starting at clique (use root for complete matrix)
|
||||
template<class CLIQUE>
|
||||
int calculate_nnz(const boost::shared_ptr<CLIQUE>& clique);
|
||||
|
||||
/**
|
||||
* Compute the gradient of the energy function,
|
||||
* \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
|
||||
* centered around \f$ x = x_0 \f$.
|
||||
* The gradient is \f$ R^T(Rx-d) \f$.
|
||||
* @param bayesTree The Gaussian Bayes Tree $(R,d)$
|
||||
* @param x0 The center about which to compute the gradient
|
||||
* @return The gradient as a VectorValues
|
||||
*/
|
||||
VectorValues gradient(const BayesTree<GaussianConditional>& bayesTree, const VectorValues& x0);
|
||||
|
||||
/**
|
||||
* Compute the gradient of the energy function,
|
||||
* \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
|
||||
* centered around zero.
|
||||
* The gradient about zero is \f$ -R^T d \f$. See also gradient(const GaussianBayesNet&, const VectorValues&).
|
||||
* @param bayesTree The Gaussian Bayes Tree $(R,d)$
|
||||
* @param [output] g A VectorValues to store the gradient, which must be preallocated, see allocateVectorValues
|
||||
* @return The gradient as a VectorValues
|
||||
*/
|
||||
VectorValues gradientAtZero(const BayesTree<GaussianConditional>& bayesTree, VectorValues& g);
|
||||
|
||||
/**
|
||||
* Compute the gradient of the energy function,
|
||||
* \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
|
||||
* centered around \f$ x = x_0 \f$.
|
||||
* The gradient is \f$ R^T(Rx-d) \f$.
|
||||
* This specialized version is used with ISAM2, where each clique stores its
|
||||
* gradient contribution.
|
||||
* @param bayesTree The Gaussian Bayes Tree $(R,d)$
|
||||
* @param x0 The center about which to compute the gradient
|
||||
* @return The gradient as a VectorValues
|
||||
*/
|
||||
VectorValues gradient(const BayesTree<GaussianConditional, ISAM2Clique<GaussianConditional> >& bayesTree, const VectorValues& x0);
|
||||
|
||||
/**
|
||||
* Compute the gradient of the energy function,
|
||||
* \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
|
||||
* centered around zero.
|
||||
* The gradient about zero is \f$ -R^T d \f$. See also gradient(const GaussianBayesNet&, const VectorValues&).
|
||||
* This specialized version is used with ISAM2, where each clique stores its
|
||||
* gradient contribution.
|
||||
* @param bayesTree The Gaussian Bayes Tree $(R,d)$
|
||||
* @param [output] g A VectorValues to store the gradient, which must be preallocated, see allocateVectorValues
|
||||
* @return The gradient as a VectorValues
|
||||
*/
|
||||
VectorValues gradientAtZero(const BayesTree<GaussianConditional, ISAM2Clique<GaussianConditional> >& bayesTree, VectorValues& g);
|
||||
|
||||
}/// namespace gtsam
|
||||
|
||||
#include <gtsam/nonlinear/GaussianISAM2-inl.h>
|
||||
|
|
|
|||
|
|
@ -30,7 +30,8 @@ headers += DoglegOptimizer.h DoglegOptimizer-inl.h
|
|||
# Nonlinear iSAM(2)
|
||||
headers += NonlinearISAM.h NonlinearISAM-inl.h
|
||||
headers += ISAM2.h ISAM2-inl.h ISAM2-impl-inl.h
|
||||
headers += GaussianISAM2.h GaussianISAM2-inl.h
|
||||
sources += GaussianISAM2.cpp
|
||||
headers += GaussianISAM2-inl.h
|
||||
|
||||
# Nonlinear constraints
|
||||
headers += NonlinearEquality.h
|
||||
|
|
|
|||
Loading…
Reference in New Issue