Consistent 'optimize', 'optimizeInPlace', 'optimizeGradientSearch', and 'optimizeGradientSearchInPlace' functions for GBN, GBT, and ISAM2. Reorganized some existing ones and added some new ones to do this.

release/4.3a0
Richard Roberts 2012-03-16 16:16:27 +00:00
parent 30557ce4a5
commit e3016baf1b
14 changed files with 338 additions and 141 deletions

View File

@ -122,6 +122,42 @@ VectorValues backSubstituteTranspose(const GaussianBayesNet& bn,
return gy; return gy;
} }
/* ************************************************************************* */
VectorValues optimizeGradientSearch(const GaussianBayesNet& Rd) {
tic(0, "Allocate VectorValues");
VectorValues grad = *allocateVectorValues(Rd);
toc(0, "Allocate VectorValues");
optimizeGradientSearchInPlace(Rd, grad);
return grad;
}
/* ************************************************************************* */
void optimizeGradientSearchInPlace(const GaussianBayesNet& Rd, VectorValues& grad) {
tic(1, "Compute Gradient");
// Compute gradient (call gradientAtZero function, which is defined for various linear systems)
gradientAtZero(Rd, grad);
double gradientSqNorm = grad.dot(grad);
toc(1, "Compute Gradient");
tic(2, "Compute R*g");
// Compute R * g
FactorGraph<JacobianFactor> Rd_jfg(Rd);
Errors Rg = Rd_jfg * grad;
toc(2, "Compute R*g");
tic(3, "Compute minimizing step size");
// Compute minimizing step size
double step = -gradientSqNorm / dot(Rg, Rg);
toc(3, "Compute minimizing step size");
tic(4, "Compute point");
// Compute steepest descent point
scal(step, grad);
toc(4, "Compute point");
}
/* ************************************************************************* */ /* ************************************************************************* */
pair<Matrix,Vector> matrix(const GaussianBayesNet& bn) { pair<Matrix,Vector> matrix(const GaussianBayesNet& bn) {

View File

@ -69,6 +69,43 @@ namespace gtsam {
*/ */
void optimizeInPlace(const GaussianBayesNet& bn, VectorValues& x); void optimizeInPlace(const GaussianBayesNet& bn, VectorValues& x);
/**
* Optimize along the gradient direction, with a closed-form computation to
* perform the line search. The gradient is computed about \f$ \delta x=0 \f$.
*
* This function returns \f$ \delta x \f$ that minimizes a reparametrized
* problem. The error function of a GaussianBayesNet is
*
* \f[ f(\delta x) = \frac{1}{2} |R \delta x - d|^2 = \frac{1}{2}d^T d - d^T R \delta x + \frac{1}{2} \delta x^T R^T R \delta x \f]
*
* with gradient and Hessian
*
* \f[ g(\delta x) = R^T(R\delta x - d), \qquad G(\delta x) = R^T R. \f]
*
* This function performs the line search in the direction of the
* gradient evaluated at \f$ g = g(\delta x = 0) \f$ with step size
* \f$ \alpha \f$ that minimizes \f$ f(\delta x = \alpha g) \f$:
*
* \f[ f(\alpha) = \frac{1}{2} d^T d + g^T \delta x + \frac{1}{2} \alpha^2 g^T G g \f]
*
* Optimizing by setting the derivative to zero yields
* \f$ \hat \alpha = (-g^T g) / (g^T G g) \f$. For efficiency, this function
* evaluates the denominator without computing the Hessian \f$ G \f$, returning
*
* \f[ \delta x = \hat\alpha g = \frac{-g^T g}{(R g)^T(R g)} \f]
*
* @param bn The GaussianBayesNet on which to perform this computation
* @return The resulting \f$ \delta x \f$ as described above
*/
VectorValues optimizeGradientSearch(const GaussianBayesNet& bn);
/** In-place version of optimizeGradientSearch(const GaussianBayesNet&) requiring pre-allocated VectorValues \c grad
*
* @param bn The GaussianBayesNet on which to perform this computation
* @param [out] grad The resulting \f$ \delta x \f$ as described in optimizeGradientSearch(const GaussianBayesNet&)
* */
void optimizeGradientSearchInPlace(const GaussianBayesNet& bn, VectorValues& grad);
/** /**
* Transpose Backsubstitute * Transpose Backsubstitute
* gy=inv(L)*gx by solving L*gy=gx. * gy=inv(L)*gx by solving L*gy=gx.

View File

@ -25,31 +25,4 @@
namespace gtsam { namespace gtsam {
/* ************************************************************************* */
namespace internal {
template<class CLIQUE>
inline static void optimizeInPlace(const boost::shared_ptr<CLIQUE>& clique, VectorValues& result) {
// parents are assumed to already be solved and available in result
clique->conditional()->solveInPlace(result);
// starting from the root, call optimize on each conditional
BOOST_FOREACH(const boost::shared_ptr<CLIQUE>& child, clique->children_)
optimizeInPlace(child, result);
}
}
/* ************************************************************************* */
template<class CLIQUE>
VectorValues optimize(const BayesTree<GaussianConditional, CLIQUE>& bayesTree) {
VectorValues result = *allocateVectorValues(bayesTree);
internal::optimizeInPlace(bayesTree.root(), result);
return result;
}
/* ************************************************************************* */
template<class CLIQUE>
void optimizeInPlace(const BayesTree<GaussianConditional, CLIQUE>& bayesTree, VectorValues& result) {
internal::optimizeInPlace(bayesTree.root(), result);
}
} }

View File

@ -0,0 +1,99 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file GaussianBayesTree.cpp
* @brief Gaussian Bayes Tree, the result of eliminating a GaussianJunctionTree
* @brief GaussianBayesTree
* @author Frank Dellaert
* @author Richard Roberts
*/
#include <gtsam/linear/GaussianBayesTree.h>
#include <gtsam/linear/JacobianFactor.h>
namespace gtsam {
/* ************************************************************************* */
namespace internal {
void optimizeInPlace(const boost::shared_ptr<BayesTreeClique<GaussianConditional> >& clique, VectorValues& result) {
// parents are assumed to already be solved and available in result
clique->conditional()->solveInPlace(result);
// starting from the root, call optimize on each conditional
BOOST_FOREACH(const boost::shared_ptr<BayesTreeClique<GaussianConditional> >& child, clique->children_)
optimizeInPlace(child, result);
}
}
/* ************************************************************************* */
VectorValues optimize(const GaussianBayesTree& bayesTree) {
VectorValues result = *allocateVectorValues(bayesTree);
internal::optimizeInPlace(bayesTree.root(), result);
return result;
}
/* ************************************************************************* */
VectorValues optimizeGradientSearch(const GaussianBayesTree& Rd) {
tic(0, "Allocate VectorValues");
VectorValues grad = *allocateVectorValues(Rd);
toc(0, "Allocate VectorValues");
optimizeGradientSearchInPlace(Rd, grad);
return grad;
}
/* ************************************************************************* */
void optimizeGradientSearchInPlace(const GaussianBayesTree& Rd, VectorValues& grad) {
tic(1, "Compute Gradient");
// Compute gradient (call gradientAtZero function, which is defined for various linear systems)
gradientAtZero(Rd, grad);
double gradientSqNorm = grad.dot(grad);
toc(1, "Compute Gradient");
tic(2, "Compute R*g");
// Compute R * g
FactorGraph<JacobianFactor> Rd_jfg(Rd);
Errors Rg = Rd_jfg * grad;
toc(2, "Compute R*g");
tic(3, "Compute minimizing step size");
// Compute minimizing step size
double step = -gradientSqNorm / dot(Rg, Rg);
toc(3, "Compute minimizing step size");
tic(4, "Compute point");
// Compute steepest descent point
scal(step, grad);
toc(4, "Compute point");
}
/* ************************************************************************* */
void optimizeInPlace(const GaussianBayesTree& bayesTree, VectorValues& result) {
internal::optimizeInPlace(bayesTree.root(), result);
}
/* ************************************************************************* */
VectorValues gradient(const GaussianBayesTree& bayesTree, const VectorValues& x0) {
return gradient(FactorGraph<JacobianFactor>(bayesTree), x0);
}
/* ************************************************************************* */
void gradientAtZero(const GaussianBayesTree& bayesTree, VectorValues& g) {
gradientAtZero(FactorGraph<JacobianFactor>(bayesTree), g);
}
}

View File

@ -21,18 +21,73 @@
#include <gtsam/inference/BayesTree.h> #include <gtsam/inference/BayesTree.h>
#include <gtsam/linear/GaussianConditional.h> #include <gtsam/linear/GaussianConditional.h>
#include <gtsam/linear/GaussianFactor.h>
namespace gtsam { namespace gtsam {
typedef BayesTree<GaussianConditional> GaussianBayesTree; typedef BayesTree<GaussianConditional> GaussianBayesTree;
/// optimize the BayesTree, starting from the root /// optimize the BayesTree, starting from the root
template<class CLIQUE> VectorValues optimize(const GaussianBayesTree& bayesTree);
VectorValues optimize(const BayesTree<GaussianConditional, CLIQUE>& bayesTree);
/// recursively optimize this conditional and all subtrees /// recursively optimize this conditional and all subtrees
template<class CLIQUE> void optimizeInPlace(const GaussianBayesTree& clique, VectorValues& result);
void optimizeInPlace(const BayesTree<GaussianConditional, CLIQUE>& clique, VectorValues& result);
namespace internal {
void optimizeInPlace(const boost::shared_ptr<BayesTreeClique<GaussianConditional> >& clique, VectorValues& result);
}
/**
* Optimize along the gradient direction, with a closed-form computation to
* perform the line search. The gradient is computed about \f$ \delta x=0 \f$.
*
* This function returns \f$ \delta x \f$ that minimizes a reparametrized
* problem. The error function of a GaussianBayesNet is
*
* \f[ f(\delta x) = \frac{1}{2} |R \delta x - d|^2 = \frac{1}{2}d^T d - d^T R \delta x + \frac{1}{2} \delta x^T R^T R \delta x \f]
*
* with gradient and Hessian
*
* \f[ g(\delta x) = R^T(R\delta x - d), \qquad G(\delta x) = R^T R. \f]
*
* This function performs the line search in the direction of the
* gradient evaluated at \f$ g = g(\delta x = 0) \f$ with step size
* \f$ \alpha \f$ that minimizes \f$ f(\delta x = \alpha g) \f$:
*
* \f[ f(\alpha) = \frac{1}{2} d^T d + g^T \delta x + \frac{1}{2} \alpha^2 g^T G g \f]
*
* Optimizing by setting the derivative to zero yields
* \f$ \hat \alpha = (-g^T g) / (g^T G g) \f$. For efficiency, this function
* evaluates the denominator without computing the Hessian \f$ G \f$, returning
*
* \f[ \delta x = \hat\alpha g = \frac{-g^T g}{(R g)^T(R g)} \f]
*/
VectorValues optimizeGradientSearch(const GaussianBayesTree& bn);
/** In-place version of optimizeGradientSearch requiring pre-allocated VectorValues \c x */
void optimizeGradientSearchInPlace(const GaussianBayesTree& bn, VectorValues& grad);
/**
* Compute the gradient of the energy function,
* \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
* centered around \f$ x = x_0 \f$.
* The gradient is \f$ R^T(Rx-d) \f$.
* @param bayesTree The Gaussian Bayes Tree $(R,d)$
* @param x0 The center about which to compute the gradient
* @return The gradient as a VectorValues
*/
VectorValues gradient(const GaussianBayesTree& bayesTree, const VectorValues& x0);
/**
* Compute the gradient of the energy function,
* \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
* centered around zero.
* The gradient about zero is \f$ -R^T d \f$. See also gradient(const GaussianBayesNet&, const VectorValues&).
* @param bayesTree The Gaussian Bayes Tree $(R,d)$
* @param [output] g A VectorValues to store the gradient, which must be preallocated, see allocateVectorValues
* @return The gradient as a VectorValues
*/
void gradientAtZero(const GaussianBayesTree& bayesTree, VectorValues& g);
} }

View File

@ -43,7 +43,7 @@ class JacobianFactor;
/** /**
* A conditional Gaussian functions as the node in a Bayes network * A conditional Gaussian functions as the node in a Bayes network
* It has a set of parents y,z, etc. and implements a probability density on x. * It has a set of parents y,z, etc. and implements a probability density on x.
* The negative log-probability is given by \f$ |Rx - (d - Sy - Tz - ...)|^2 \f$ * The negative log-probability is given by \f$ \frac{1}{2} |Rx - (d - Sy - Tz - ...)|^2 \f$
*/ */
class GaussianConditional : public IndexConditional { class GaussianConditional : public IndexConditional {

View File

@ -115,29 +115,6 @@ struct DoglegOptimizerImpl {
*/ */
static VectorValues ComputeDoglegPoint(double Delta, const VectorValues& dx_u, const VectorValues& dx_n, const bool verbose=false); static VectorValues ComputeDoglegPoint(double Delta, const VectorValues& dx_u, const VectorValues& dx_n, const bool verbose=false);
/** Compute the minimizer \f$ \delta x_u \f$ of the line search along the gradient direction \f$ g \f$ of
* the function
* \f[
* M(\delta x) = (R \delta x - d)^T (R \delta x - d)
* \f]
* where \f$ R \f$ is an upper-triangular matrix and \f$ d \f$ is a vector.
* Together \f$ (R,d) \f$ are either a Bayes' net or a Bayes' tree.
*
* The same quadratic error function written as a Taylor expansion of the original
* non-linear error function is
* \f[
* M(\delta x) = f(x_0) + g(x_0) + \frac{1}{2} \delta x^T G(x_0) \delta x,
* \f]
* @tparam M The type of the Bayes' net or tree, currently
* either BayesNet<GaussianConditional> (or GaussianBayesNet) or BayesTree<GaussianConditional>.
* @param Rd The Bayes' net or tree \f$ (R,d) \f$ as described above, currently
* this must be of type BayesNet<GaussianConditional> (or GaussianBayesNet) or
* BayesTree<GaussianConditional>.
* @return The minimizer \f$ \delta x_u \f$ along the gradient descent direction.
*/
template<class M>
static VectorValues ComputeSteepestDescentPoint(const M& Rd);
/** Compute the point on the line between the steepest descent point and the /** Compute the point on the line between the steepest descent point and the
* Newton's method point intersecting the trust region boundary. * Newton's method point intersecting the trust region boundary.
* Mathematically, computes \f$ \tau \f$ such that \f$ 0<\tau<1 \f$ and * Mathematically, computes \f$ \tau \f$ such that \f$ 0<\tau<1 \f$ and
@ -159,7 +136,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
// Compute steepest descent and Newton's method points // Compute steepest descent and Newton's method points
tic(0, "Steepest Descent"); tic(0, "Steepest Descent");
VectorValues dx_u = ComputeSteepestDescentPoint(Rd); VectorValues dx_u = optimizeGradientSearch(Rd);
toc(0, "Steepest Descent"); toc(0, "Steepest Descent");
tic(1, "optimize"); tic(1, "optimize");
VectorValues dx_n = optimize(Rd); VectorValues dx_n = optimize(Rd);
@ -274,33 +251,4 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
return result; return result;
} }
/* ************************************************************************* */
template<class M>
VectorValues DoglegOptimizerImpl::ComputeSteepestDescentPoint(const M& Rd) {
tic(0, "Compute Gradient");
// Compute gradient (call gradientAtZero function, which is defined for various linear systems)
VectorValues grad = *allocateVectorValues(Rd);
gradientAtZero(Rd, grad);
double gradientSqNorm = grad.dot(grad);
toc(0, "Compute Gradient");
tic(1, "Compute R*g");
// Compute R * g
FactorGraph<JacobianFactor> Rd_jfg(Rd);
Errors Rg = Rd_jfg * grad;
toc(1, "Compute R*g");
tic(2, "Compute minimizing step size");
// Compute minimizing step size
double step = -gradientSqNorm / dot(Rg, Rg);
toc(2, "Compute minimizing step size");
tic(3, "Compute point");
// Compute steepest descent point
scal(step, grad);
toc(3, "Compute point");
return grad;
}
} }

View File

@ -114,6 +114,44 @@ namespace gtsam {
return count; return count;
} }
/* ************************************************************************* */
template<class GRAPH>
VectorValues optimizeGradientSearch(const ISAM2<GaussianConditional, GRAPH>& isam) {
tic(0, "Allocate VectorValues");
VectorValues grad = *allocateVectorValues(isam);
toc(0, "Allocate VectorValues");
optimizeGradientSearchInPlace(isam, grad);
return grad;
}
/* ************************************************************************* */
template<class GRAPH>
void optimizeGradientSearchInPlace(const ISAM2<GaussianConditional, GRAPH>& Rd, VectorValues& grad) {
tic(1, "Compute Gradient");
// Compute gradient (call gradientAtZero function, which is defined for various linear systems)
gradientAtZero(Rd, grad);
double gradientSqNorm = grad.dot(grad);
toc(1, "Compute Gradient");
tic(2, "Compute R*g");
// Compute R * g
FactorGraph<JacobianFactor> Rd_jfg(Rd);
Errors Rg = Rd_jfg * grad;
toc(2, "Compute R*g");
tic(3, "Compute minimizing step size");
// Compute minimizing step size
double step = -gradientSqNorm / dot(Rg, Rg);
toc(3, "Compute minimizing step size");
tic(4, "Compute point");
// Compute steepest descent point
scal(step, grad);
toc(4, "Compute point");
}
/* ************************************************************************* */ /* ************************************************************************* */
template<class CLIQUE> template<class CLIQUE>
void nnz_internal(const boost::shared_ptr<CLIQUE>& clique, int& result) { void nnz_internal(const boost::shared_ptr<CLIQUE>& clique, int& result) {

View File

@ -26,16 +26,6 @@ using namespace gtsam;
namespace gtsam { namespace gtsam {
/* ************************************************************************* */
VectorValues gradient(const BayesTree<GaussianConditional>& bayesTree, const VectorValues& x0) {
return gradient(FactorGraph<JacobianFactor>(bayesTree), x0);
}
/* ************************************************************************* */
void gradientAtZero(const BayesTree<GaussianConditional>& bayesTree, VectorValues& g) {
gradientAtZero(FactorGraph<JacobianFactor>(bayesTree), g);
}
/* ************************************************************************* */ /* ************************************************************************* */
VectorValues gradient(const BayesTree<GaussianConditional, ISAM2Clique<GaussianConditional> >& bayesTree, const VectorValues& x0) { VectorValues gradient(const BayesTree<GaussianConditional, ISAM2Clique<GaussianConditional> >& bayesTree, const VectorValues& x0) {
return gradient(FactorGraph<JacobianFactor>(bayesTree), x0); return gradient(FactorGraph<JacobianFactor>(bayesTree), x0);

View File

@ -63,44 +63,65 @@ public:
}; };
/// optimize the BayesTree, starting from the root; "replaced" needs to contain /** Get the linear delta for the ISAM2 object, unpermuted the delta returned by ISAM2::getDelta() */
template<class GRAPH>
VectorValues optimize(const ISAM2<GaussianConditional, GRAPH>& isam) {
VectorValues delta = *allocateVectorValues(isam);
internal::optimizeInPlace(isam.root(), delta);
return delta;
}
/// Optimize the BayesTree, starting from the root.
/// @param replaced Needs to contain
/// all variables that are contained in the top of the Bayes tree that has been /// all variables that are contained in the top of the Bayes tree that has been
/// redone; "delta" is the current solution, an offset from the linearization /// redone.
/// point; "threshold" is the maximum change against the PREVIOUS delta for /// @param delta The current solution, an offset from the linearization
/// point.
/// @param threshold The maximum change against the PREVIOUS delta for
/// non-replaced variables that can be ignored, ie. the old delta entry is kept /// non-replaced variables that can be ignored, ie. the old delta entry is kept
/// and recursive backsubstitution might eventually stop if none of the changed /// and recursive backsubstitution might eventually stop if none of the changed
/// variables are contained in the subtree. /// variables are contained in the subtree.
/// returns the number of variables that were solved for /// @return The number of variables that were solved for
template<class CLIQUE> template<class CLIQUE>
int optimizeWildfire(const boost::shared_ptr<CLIQUE>& root, int optimizeWildfire(const boost::shared_ptr<CLIQUE>& root,
double threshold, const std::vector<bool>& replaced, Permuted<VectorValues>& delta); double threshold, const std::vector<bool>& replaced, Permuted<VectorValues>& delta);
/**
* Optimize along the gradient direction, with a closed-form computation to
* perform the line search. The gradient is computed about \f$ \delta x=0 \f$.
*
* This function returns \f$ \delta x \f$ that minimizes a reparametrized
* problem. The error function of a GaussianBayesNet is
*
* \f[ f(\delta x) = \frac{1}{2} |R \delta x - d|^2 = \frac{1}{2}d^T d - d^T R \delta x + \frac{1}{2} \delta x^T R^T R \delta x \f]
*
* with gradient and Hessian
*
* \f[ g(\delta x) = R^T(R\delta x - d), \qquad G(\delta x) = R^T R. \f]
*
* This function performs the line search in the direction of the
* gradient evaluated at \f$ g = g(\delta x = 0) \f$ with step size
* \f$ \alpha \f$ that minimizes \f$ f(\delta x = \alpha g) \f$:
*
* \f[ f(\alpha) = \frac{1}{2} d^T d + g^T \delta x + \frac{1}{2} \alpha^2 g^T G g \f]
*
* Optimizing by setting the derivative to zero yields
* \f$ \hat \alpha = (-g^T g) / (g^T G g) \f$. For efficiency, this function
* evaluates the denominator without computing the Hessian \f$ G \f$, returning
*
* \f[ \delta x = \hat\alpha g = \frac{-g^T g}{(R g)^T(R g)} \f]
*/
template<class GRAPH>
VectorValues optimizeGradientSearch(const ISAM2<GaussianConditional, GRAPH>& isam);
/** In-place version of optimizeGradientSearch requiring pre-allocated VectorValues \c x */
template<class GRAPH>
void optimizeGradientSearchInPlace(const ISAM2<GaussianConditional, GRAPH>& isam, VectorValues& grad);
/// calculate the number of non-zero entries for the tree starting at clique (use root for complete matrix) /// calculate the number of non-zero entries for the tree starting at clique (use root for complete matrix)
template<class CLIQUE> template<class CLIQUE>
int calculate_nnz(const boost::shared_ptr<CLIQUE>& clique); int calculate_nnz(const boost::shared_ptr<CLIQUE>& clique);
/**
* Compute the gradient of the energy function,
* \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
* centered around \f$ x = x_0 \f$.
* The gradient is \f$ R^T(Rx-d) \f$.
* @param bayesTree The Gaussian Bayes Tree $(R,d)$
* @param x0 The center about which to compute the gradient
* @return The gradient as a VectorValues
*/
VectorValues gradient(const BayesTree<GaussianConditional>& bayesTree, const VectorValues& x0);
/**
* Compute the gradient of the energy function,
* \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
* centered around zero.
* The gradient about zero is \f$ -R^T d \f$. See also gradient(const GaussianBayesNet&, const VectorValues&).
* @param bayesTree The Gaussian Bayes Tree $(R,d)$
* @param [output] g A VectorValues to store the gradient, which must be preallocated, see allocateVectorValues
* @return The gradient as a VectorValues
*/
void gradientAtZero(const BayesTree<GaussianConditional>& bayesTree, VectorValues& g);
/** /**
* Compute the gradient of the energy function, * Compute the gradient of the energy function,
* \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$, * \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,

View File

@ -361,6 +361,18 @@ ISAM2<CONDITIONAL, GRAPH>::Impl::PartialSolve(GaussianFactorGraph& factors,
return result; return result;
} }
/* ************************************************************************* */
namespace internal {
inline static void optimizeInPlace(const boost::shared_ptr<ISAM2Clique<GaussianConditional> >& clique, VectorValues& result) {
// parents are assumed to already be solved and available in result
clique->conditional()->solveInPlace(result);
// starting from the root, call optimize on each conditional
BOOST_FOREACH(const boost::shared_ptr<ISAM2Clique<GaussianConditional> >& child, clique->children_)
optimizeInPlace(child, result);
}
}
/* ************************************************************************* */ /* ************************************************************************* */
template<class CONDITIONAL, class GRAPH> template<class CONDITIONAL, class GRAPH>
size_t ISAM2<CONDITIONAL,GRAPH>::Impl::UpdateDelta(const boost::shared_ptr<ISAM2Clique<CONDITIONAL> >& root, std::vector<bool>& replacedKeys, Permuted<VectorValues>& delta, double wildfireThreshold) { size_t ISAM2<CONDITIONAL,GRAPH>::Impl::UpdateDelta(const boost::shared_ptr<ISAM2Clique<CONDITIONAL> >& root, std::vector<bool>& replacedKeys, Permuted<VectorValues>& delta, double wildfireThreshold) {

View File

@ -557,7 +557,6 @@ ISAM2Result ISAM2<CONDITIONAL, GRAPH>::update(
return result; return result;
} }
/* ************************************************************************* */ /* ************************************************************************* */
template<class CONDITIONAL, class GRAPH> template<class CONDITIONAL, class GRAPH>
void ISAM2<CONDITIONAL, GRAPH>::updateDelta(bool forceFullSolve) const { void ISAM2<CONDITIONAL, GRAPH>::updateDelta(bool forceFullSolve) const {
@ -630,13 +629,5 @@ const Permuted<VectorValues>& ISAM2<CONDITIONAL, GRAPH>::getDelta() const {
return delta_; return delta_;
} }
/* ************************************************************************* */
template<class CONDITIONAL, class GRAPH>
VectorValues optimize(const ISAM2<CONDITIONAL, GRAPH>& isam) {
VectorValues delta = *allocateVectorValues(isam);
internal::optimizeInPlace(isam.root(), delta);
return delta;
}
} }
/// namespace gtsam /// namespace gtsam

View File

@ -462,10 +462,6 @@ private:
}; // ISAM2 }; // ISAM2
/** Get the linear delta for the ISAM2 object, unpermuted the delta returned by ISAM2::getDelta() */
template<class CONDITIONAL, class GRAPH>
VectorValues optimize(const ISAM2<CONDITIONAL, GRAPH>& isam);
} /// namespace gtsam } /// namespace gtsam
#include <gtsam/nonlinear/ISAM2-inl.h> #include <gtsam/nonlinear/ISAM2-inl.h>

View File

@ -20,6 +20,7 @@
#include <gtsam/inference/BayesTree-inl.h> #include <gtsam/inference/BayesTree-inl.h>
#include <gtsam/linear/JacobianFactor.h> #include <gtsam/linear/JacobianFactor.h>
#include <gtsam/linear/GaussianSequentialSolver.h> #include <gtsam/linear/GaussianSequentialSolver.h>
#include <gtsam/linear/GaussianBayesTree.h>
#include <gtsam/nonlinear/DoglegOptimizerImpl.h> #include <gtsam/nonlinear/DoglegOptimizerImpl.h>
#include <gtsam/slam/pose2SLAM.h> #include <gtsam/slam/pose2SLAM.h>
#include <gtsam/slam/smallExample.h> #include <gtsam/slam/smallExample.h>
@ -102,7 +103,7 @@ TEST(DoglegOptimizer, ComputeSteepestDescentPoint) {
VectorValues expected = gradientValues; scal(step, expected); VectorValues expected = gradientValues; scal(step, expected);
// Compute the steepest descent point with the dogleg function // Compute the steepest descent point with the dogleg function
VectorValues actual = DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn); VectorValues actual = optimizeGradientSearch(gbn);
// Check that points agree // Check that points agree
EXPECT(assert_equal(expected, actual, 1e-5)); EXPECT(assert_equal(expected, actual, 1e-5));
@ -290,7 +291,7 @@ TEST(DoglegOptimizer, ComputeSteepestDescentPointBT) {
expectedFromBN[4] = Vector_(2, 0.300134, 0.423233); expectedFromBN[4] = Vector_(2, 0.300134, 0.423233);
// Compute the steepest descent point with the dogleg function // Compute the steepest descent point with the dogleg function
VectorValues actual = DoglegOptimizerImpl::ComputeSteepestDescentPoint(bt); VectorValues actual = optimizeGradientSearch(bt);
// Check that points agree // Check that points agree
EXPECT(assert_equal(expected, actual, 1e-5)); EXPECT(assert_equal(expected, actual, 1e-5));
@ -324,7 +325,7 @@ TEST(DoglegOptimizer, ComputeBlend) {
4, Vector_(2, 49.0,50.0), Matrix_(2,2, 51.0,52.0,0.0,54.0), ones(2))); 4, Vector_(2, 49.0,50.0), Matrix_(2,2, 51.0,52.0,0.0,54.0), ones(2)));
// Compute steepest descent point // Compute steepest descent point
VectorValues xu = DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn); VectorValues xu = optimizeGradientSearch(gbn);
// Compute Newton's method point // Compute Newton's method point
VectorValues xn = optimize(gbn); VectorValues xn = optimize(gbn);
@ -362,18 +363,18 @@ TEST(DoglegOptimizer, ComputeDoglegPoint) {
// Compute dogleg point for different deltas // Compute dogleg point for different deltas
double Delta1 = 0.5; // Less than steepest descent double Delta1 = 0.5; // Less than steepest descent
VectorValues actual1 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta1, DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn), optimize(gbn)); VectorValues actual1 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta1, optimizeGradientSearch(gbn), optimize(gbn));
DOUBLES_EQUAL(Delta1, actual1.vector().norm(), 1e-5); DOUBLES_EQUAL(Delta1, actual1.vector().norm(), 1e-5);
double Delta2 = 1.5; // Between steepest descent and Newton's method double Delta2 = 1.5; // Between steepest descent and Newton's method
VectorValues expected2 = DoglegOptimizerImpl::ComputeBlend(Delta2, DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn), optimize(gbn)); VectorValues expected2 = DoglegOptimizerImpl::ComputeBlend(Delta2, optimizeGradientSearch(gbn), optimize(gbn));
VectorValues actual2 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta2, DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn), optimize(gbn)); VectorValues actual2 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta2, optimizeGradientSearch(gbn), optimize(gbn));
DOUBLES_EQUAL(Delta2, actual2.vector().norm(), 1e-5); DOUBLES_EQUAL(Delta2, actual2.vector().norm(), 1e-5);
EXPECT(assert_equal(expected2, actual2)); EXPECT(assert_equal(expected2, actual2));
double Delta3 = 5.0; // Larger than Newton's method point double Delta3 = 5.0; // Larger than Newton's method point
VectorValues expected3 = optimize(gbn); VectorValues expected3 = optimize(gbn);
VectorValues actual3 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta3, DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn), optimize(gbn)); VectorValues actual3 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta3, optimizeGradientSearch(gbn), optimize(gbn));
EXPECT(assert_equal(expected3, actual3)); EXPECT(assert_equal(expected3, actual3));
} }