From e3016baf1bde9ca1bf2b030378306ff79794c743 Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Fri, 16 Mar 2012 16:16:27 +0000 Subject: [PATCH] Consistent 'optimize', 'optimizeInPlace', 'optimizeGradientSearch', and 'optimizeGradientSearchInPlace' functions for GBN, GBT, and ISAM2. Reorganized some existing ones and added some new ones to do this. --- gtsam/linear/GaussianBayesNet.cpp | 36 ++++++++++ gtsam/linear/GaussianBayesNet.h | 37 ++++++++++ gtsam/linear/GaussianBayesTree-inl.h | 27 -------- gtsam/linear/GaussianBayesTree.cpp | 99 +++++++++++++++++++++++++++ gtsam/linear/GaussianBayesTree.h | 63 +++++++++++++++-- gtsam/linear/GaussianConditional.h | 2 +- gtsam/nonlinear/DoglegOptimizerImpl.h | 54 +-------------- gtsam/nonlinear/GaussianISAM2-inl.h | 38 ++++++++++ gtsam/nonlinear/GaussianISAM2.cpp | 10 --- gtsam/nonlinear/GaussianISAM2.h | 73 +++++++++++++------- gtsam/nonlinear/ISAM2-impl-inl.h | 12 ++++ gtsam/nonlinear/ISAM2-inl.h | 9 --- gtsam/nonlinear/ISAM2.h | 4 -- tests/testDoglegOptimizer.cpp | 15 ++-- 14 files changed, 338 insertions(+), 141 deletions(-) create mode 100644 gtsam/linear/GaussianBayesTree.cpp diff --git a/gtsam/linear/GaussianBayesNet.cpp b/gtsam/linear/GaussianBayesNet.cpp index 13a3d4e0e..5721a4760 100644 --- a/gtsam/linear/GaussianBayesNet.cpp +++ b/gtsam/linear/GaussianBayesNet.cpp @@ -122,6 +122,42 @@ VectorValues backSubstituteTranspose(const GaussianBayesNet& bn, return gy; } +/* ************************************************************************* */ +VectorValues optimizeGradientSearch(const GaussianBayesNet& Rd) { + tic(0, "Allocate VectorValues"); + VectorValues grad = *allocateVectorValues(Rd); + toc(0, "Allocate VectorValues"); + + optimizeGradientSearchInPlace(Rd, grad); + + return grad; +} + +/* ************************************************************************* */ +void optimizeGradientSearchInPlace(const GaussianBayesNet& Rd, VectorValues& grad) { + tic(1, "Compute Gradient"); + // Compute gradient (call gradientAtZero function, which is defined for various linear systems) + gradientAtZero(Rd, grad); + double gradientSqNorm = grad.dot(grad); + toc(1, "Compute Gradient"); + + tic(2, "Compute R*g"); + // Compute R * g + FactorGraph Rd_jfg(Rd); + Errors Rg = Rd_jfg * grad; + toc(2, "Compute R*g"); + + tic(3, "Compute minimizing step size"); + // Compute minimizing step size + double step = -gradientSqNorm / dot(Rg, Rg); + toc(3, "Compute minimizing step size"); + + tic(4, "Compute point"); + // Compute steepest descent point + scal(step, grad); + toc(4, "Compute point"); +} + /* ************************************************************************* */ pair matrix(const GaussianBayesNet& bn) { diff --git a/gtsam/linear/GaussianBayesNet.h b/gtsam/linear/GaussianBayesNet.h index dda579741..1ef9081d4 100644 --- a/gtsam/linear/GaussianBayesNet.h +++ b/gtsam/linear/GaussianBayesNet.h @@ -69,6 +69,43 @@ namespace gtsam { */ void optimizeInPlace(const GaussianBayesNet& bn, VectorValues& x); + /** + * Optimize along the gradient direction, with a closed-form computation to + * perform the line search. The gradient is computed about \f$ \delta x=0 \f$. + * + * This function returns \f$ \delta x \f$ that minimizes a reparametrized + * problem. The error function of a GaussianBayesNet is + * + * \f[ f(\delta x) = \frac{1}{2} |R \delta x - d|^2 = \frac{1}{2}d^T d - d^T R \delta x + \frac{1}{2} \delta x^T R^T R \delta x \f] + * + * with gradient and Hessian + * + * \f[ g(\delta x) = R^T(R\delta x - d), \qquad G(\delta x) = R^T R. \f] + * + * This function performs the line search in the direction of the + * gradient evaluated at \f$ g = g(\delta x = 0) \f$ with step size + * \f$ \alpha \f$ that minimizes \f$ f(\delta x = \alpha g) \f$: + * + * \f[ f(\alpha) = \frac{1}{2} d^T d + g^T \delta x + \frac{1}{2} \alpha^2 g^T G g \f] + * + * Optimizing by setting the derivative to zero yields + * \f$ \hat \alpha = (-g^T g) / (g^T G g) \f$. For efficiency, this function + * evaluates the denominator without computing the Hessian \f$ G \f$, returning + * + * \f[ \delta x = \hat\alpha g = \frac{-g^T g}{(R g)^T(R g)} \f] + * + * @param bn The GaussianBayesNet on which to perform this computation + * @return The resulting \f$ \delta x \f$ as described above + */ + VectorValues optimizeGradientSearch(const GaussianBayesNet& bn); + + /** In-place version of optimizeGradientSearch(const GaussianBayesNet&) requiring pre-allocated VectorValues \c grad + * + * @param bn The GaussianBayesNet on which to perform this computation + * @param [out] grad The resulting \f$ \delta x \f$ as described in optimizeGradientSearch(const GaussianBayesNet&) + * */ + void optimizeGradientSearchInPlace(const GaussianBayesNet& bn, VectorValues& grad); + /** * Transpose Backsubstitute * gy=inv(L)*gx by solving L*gy=gx. diff --git a/gtsam/linear/GaussianBayesTree-inl.h b/gtsam/linear/GaussianBayesTree-inl.h index 1b936b638..9cb327bec 100644 --- a/gtsam/linear/GaussianBayesTree-inl.h +++ b/gtsam/linear/GaussianBayesTree-inl.h @@ -25,31 +25,4 @@ namespace gtsam { -/* ************************************************************************* */ -namespace internal { -template -inline static void optimizeInPlace(const boost::shared_ptr& clique, VectorValues& result) { - // parents are assumed to already be solved and available in result - clique->conditional()->solveInPlace(result); - - // starting from the root, call optimize on each conditional - BOOST_FOREACH(const boost::shared_ptr& child, clique->children_) - optimizeInPlace(child, result); -} -} - -/* ************************************************************************* */ -template -VectorValues optimize(const BayesTree& bayesTree) { - VectorValues result = *allocateVectorValues(bayesTree); - internal::optimizeInPlace(bayesTree.root(), result); - return result; -} - -/* ************************************************************************* */ -template -void optimizeInPlace(const BayesTree& bayesTree, VectorValues& result) { - internal::optimizeInPlace(bayesTree.root(), result); -} - } diff --git a/gtsam/linear/GaussianBayesTree.cpp b/gtsam/linear/GaussianBayesTree.cpp new file mode 100644 index 000000000..4243657e5 --- /dev/null +++ b/gtsam/linear/GaussianBayesTree.cpp @@ -0,0 +1,99 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file GaussianBayesTree.cpp + * @brief Gaussian Bayes Tree, the result of eliminating a GaussianJunctionTree + * @brief GaussianBayesTree + * @author Frank Dellaert + * @author Richard Roberts + */ + +#include +#include + +namespace gtsam { + +/* ************************************************************************* */ +namespace internal { +void optimizeInPlace(const boost::shared_ptr >& clique, VectorValues& result) { + // parents are assumed to already be solved and available in result + clique->conditional()->solveInPlace(result); + + // starting from the root, call optimize on each conditional + BOOST_FOREACH(const boost::shared_ptr >& child, clique->children_) + optimizeInPlace(child, result); +} +} + +/* ************************************************************************* */ +VectorValues optimize(const GaussianBayesTree& bayesTree) { + VectorValues result = *allocateVectorValues(bayesTree); + internal::optimizeInPlace(bayesTree.root(), result); + return result; +} + +/* ************************************************************************* */ +VectorValues optimizeGradientSearch(const GaussianBayesTree& Rd) { + tic(0, "Allocate VectorValues"); + VectorValues grad = *allocateVectorValues(Rd); + toc(0, "Allocate VectorValues"); + + optimizeGradientSearchInPlace(Rd, grad); + + return grad; +} + +/* ************************************************************************* */ +void optimizeGradientSearchInPlace(const GaussianBayesTree& Rd, VectorValues& grad) { + tic(1, "Compute Gradient"); + // Compute gradient (call gradientAtZero function, which is defined for various linear systems) + gradientAtZero(Rd, grad); + double gradientSqNorm = grad.dot(grad); + toc(1, "Compute Gradient"); + + tic(2, "Compute R*g"); + // Compute R * g + FactorGraph Rd_jfg(Rd); + Errors Rg = Rd_jfg * grad; + toc(2, "Compute R*g"); + + tic(3, "Compute minimizing step size"); + // Compute minimizing step size + double step = -gradientSqNorm / dot(Rg, Rg); + toc(3, "Compute minimizing step size"); + + tic(4, "Compute point"); + // Compute steepest descent point + scal(step, grad); + toc(4, "Compute point"); +} + +/* ************************************************************************* */ +void optimizeInPlace(const GaussianBayesTree& bayesTree, VectorValues& result) { + internal::optimizeInPlace(bayesTree.root(), result); +} + +/* ************************************************************************* */ +VectorValues gradient(const GaussianBayesTree& bayesTree, const VectorValues& x0) { + return gradient(FactorGraph(bayesTree), x0); +} + +/* ************************************************************************* */ +void gradientAtZero(const GaussianBayesTree& bayesTree, VectorValues& g) { + gradientAtZero(FactorGraph(bayesTree), g); +} + +} + + + + diff --git a/gtsam/linear/GaussianBayesTree.h b/gtsam/linear/GaussianBayesTree.h index 227cc94f9..bb15eb063 100644 --- a/gtsam/linear/GaussianBayesTree.h +++ b/gtsam/linear/GaussianBayesTree.h @@ -21,18 +21,73 @@ #include #include +#include namespace gtsam { typedef BayesTree GaussianBayesTree; /// optimize the BayesTree, starting from the root -template -VectorValues optimize(const BayesTree& bayesTree); +VectorValues optimize(const GaussianBayesTree& bayesTree); /// recursively optimize this conditional and all subtrees -template -void optimizeInPlace(const BayesTree& clique, VectorValues& result); +void optimizeInPlace(const GaussianBayesTree& clique, VectorValues& result); + +namespace internal { +void optimizeInPlace(const boost::shared_ptr >& clique, VectorValues& result); +} + +/** + * Optimize along the gradient direction, with a closed-form computation to + * perform the line search. The gradient is computed about \f$ \delta x=0 \f$. + * + * This function returns \f$ \delta x \f$ that minimizes a reparametrized + * problem. The error function of a GaussianBayesNet is + * + * \f[ f(\delta x) = \frac{1}{2} |R \delta x - d|^2 = \frac{1}{2}d^T d - d^T R \delta x + \frac{1}{2} \delta x^T R^T R \delta x \f] + * + * with gradient and Hessian + * + * \f[ g(\delta x) = R^T(R\delta x - d), \qquad G(\delta x) = R^T R. \f] + * + * This function performs the line search in the direction of the + * gradient evaluated at \f$ g = g(\delta x = 0) \f$ with step size + * \f$ \alpha \f$ that minimizes \f$ f(\delta x = \alpha g) \f$: + * + * \f[ f(\alpha) = \frac{1}{2} d^T d + g^T \delta x + \frac{1}{2} \alpha^2 g^T G g \f] + * + * Optimizing by setting the derivative to zero yields + * \f$ \hat \alpha = (-g^T g) / (g^T G g) \f$. For efficiency, this function + * evaluates the denominator without computing the Hessian \f$ G \f$, returning + * + * \f[ \delta x = \hat\alpha g = \frac{-g^T g}{(R g)^T(R g)} \f] + */ +VectorValues optimizeGradientSearch(const GaussianBayesTree& bn); + +/** In-place version of optimizeGradientSearch requiring pre-allocated VectorValues \c x */ +void optimizeGradientSearchInPlace(const GaussianBayesTree& bn, VectorValues& grad); + +/** + * Compute the gradient of the energy function, + * \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$, + * centered around \f$ x = x_0 \f$. + * The gradient is \f$ R^T(Rx-d) \f$. + * @param bayesTree The Gaussian Bayes Tree $(R,d)$ + * @param x0 The center about which to compute the gradient + * @return The gradient as a VectorValues + */ +VectorValues gradient(const GaussianBayesTree& bayesTree, const VectorValues& x0); + +/** + * Compute the gradient of the energy function, + * \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$, + * centered around zero. + * The gradient about zero is \f$ -R^T d \f$. See also gradient(const GaussianBayesNet&, const VectorValues&). + * @param bayesTree The Gaussian Bayes Tree $(R,d)$ + * @param [output] g A VectorValues to store the gradient, which must be preallocated, see allocateVectorValues + * @return The gradient as a VectorValues + */ +void gradientAtZero(const GaussianBayesTree& bayesTree, VectorValues& g); } diff --git a/gtsam/linear/GaussianConditional.h b/gtsam/linear/GaussianConditional.h index 248ac7a9e..b33098061 100644 --- a/gtsam/linear/GaussianConditional.h +++ b/gtsam/linear/GaussianConditional.h @@ -43,7 +43,7 @@ class JacobianFactor; /** * A conditional Gaussian functions as the node in a Bayes network * It has a set of parents y,z, etc. and implements a probability density on x. - * The negative log-probability is given by \f$ |Rx - (d - Sy - Tz - ...)|^2 \f$ + * The negative log-probability is given by \f$ \frac{1}{2} |Rx - (d - Sy - Tz - ...)|^2 \f$ */ class GaussianConditional : public IndexConditional { diff --git a/gtsam/nonlinear/DoglegOptimizerImpl.h b/gtsam/nonlinear/DoglegOptimizerImpl.h index af1e345b6..5026a155f 100644 --- a/gtsam/nonlinear/DoglegOptimizerImpl.h +++ b/gtsam/nonlinear/DoglegOptimizerImpl.h @@ -115,29 +115,6 @@ struct DoglegOptimizerImpl { */ static VectorValues ComputeDoglegPoint(double Delta, const VectorValues& dx_u, const VectorValues& dx_n, const bool verbose=false); - /** Compute the minimizer \f$ \delta x_u \f$ of the line search along the gradient direction \f$ g \f$ of - * the function - * \f[ - * M(\delta x) = (R \delta x - d)^T (R \delta x - d) - * \f] - * where \f$ R \f$ is an upper-triangular matrix and \f$ d \f$ is a vector. - * Together \f$ (R,d) \f$ are either a Bayes' net or a Bayes' tree. - * - * The same quadratic error function written as a Taylor expansion of the original - * non-linear error function is - * \f[ - * M(\delta x) = f(x_0) + g(x_0) + \frac{1}{2} \delta x^T G(x_0) \delta x, - * \f] - * @tparam M The type of the Bayes' net or tree, currently - * either BayesNet (or GaussianBayesNet) or BayesTree. - * @param Rd The Bayes' net or tree \f$ (R,d) \f$ as described above, currently - * this must be of type BayesNet (or GaussianBayesNet) or - * BayesTree. - * @return The minimizer \f$ \delta x_u \f$ along the gradient descent direction. - */ - template - static VectorValues ComputeSteepestDescentPoint(const M& Rd); - /** Compute the point on the line between the steepest descent point and the * Newton's method point intersecting the trust region boundary. * Mathematically, computes \f$ \tau \f$ such that \f$ 0<\tau<1 \f$ and @@ -159,7 +136,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate( // Compute steepest descent and Newton's method points tic(0, "Steepest Descent"); - VectorValues dx_u = ComputeSteepestDescentPoint(Rd); + VectorValues dx_u = optimizeGradientSearch(Rd); toc(0, "Steepest Descent"); tic(1, "optimize"); VectorValues dx_n = optimize(Rd); @@ -274,33 +251,4 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate( return result; } -/* ************************************************************************* */ -template -VectorValues DoglegOptimizerImpl::ComputeSteepestDescentPoint(const M& Rd) { - - tic(0, "Compute Gradient"); - // Compute gradient (call gradientAtZero function, which is defined for various linear systems) - VectorValues grad = *allocateVectorValues(Rd); - gradientAtZero(Rd, grad); - double gradientSqNorm = grad.dot(grad); - toc(0, "Compute Gradient"); - - tic(1, "Compute R*g"); - // Compute R * g - FactorGraph Rd_jfg(Rd); - Errors Rg = Rd_jfg * grad; - toc(1, "Compute R*g"); - - tic(2, "Compute minimizing step size"); - // Compute minimizing step size - double step = -gradientSqNorm / dot(Rg, Rg); - toc(2, "Compute minimizing step size"); - - tic(3, "Compute point"); - // Compute steepest descent point - scal(step, grad); - toc(3, "Compute point"); - return grad; -} - } diff --git a/gtsam/nonlinear/GaussianISAM2-inl.h b/gtsam/nonlinear/GaussianISAM2-inl.h index b52e22629..57801fbe4 100644 --- a/gtsam/nonlinear/GaussianISAM2-inl.h +++ b/gtsam/nonlinear/GaussianISAM2-inl.h @@ -114,6 +114,44 @@ namespace gtsam { return count; } + /* ************************************************************************* */ + template + VectorValues optimizeGradientSearch(const ISAM2& isam) { + tic(0, "Allocate VectorValues"); + VectorValues grad = *allocateVectorValues(isam); + toc(0, "Allocate VectorValues"); + + optimizeGradientSearchInPlace(isam, grad); + + return grad; + } + + /* ************************************************************************* */ + template + void optimizeGradientSearchInPlace(const ISAM2& Rd, VectorValues& grad) { + tic(1, "Compute Gradient"); + // Compute gradient (call gradientAtZero function, which is defined for various linear systems) + gradientAtZero(Rd, grad); + double gradientSqNorm = grad.dot(grad); + toc(1, "Compute Gradient"); + + tic(2, "Compute R*g"); + // Compute R * g + FactorGraph Rd_jfg(Rd); + Errors Rg = Rd_jfg * grad; + toc(2, "Compute R*g"); + + tic(3, "Compute minimizing step size"); + // Compute minimizing step size + double step = -gradientSqNorm / dot(Rg, Rg); + toc(3, "Compute minimizing step size"); + + tic(4, "Compute point"); + // Compute steepest descent point + scal(step, grad); + toc(4, "Compute point"); + } + /* ************************************************************************* */ template void nnz_internal(const boost::shared_ptr& clique, int& result) { diff --git a/gtsam/nonlinear/GaussianISAM2.cpp b/gtsam/nonlinear/GaussianISAM2.cpp index 34456ad36..624c5d4f0 100644 --- a/gtsam/nonlinear/GaussianISAM2.cpp +++ b/gtsam/nonlinear/GaussianISAM2.cpp @@ -26,16 +26,6 @@ using namespace gtsam; namespace gtsam { - /* ************************************************************************* */ - VectorValues gradient(const BayesTree& bayesTree, const VectorValues& x0) { - return gradient(FactorGraph(bayesTree), x0); - } - - /* ************************************************************************* */ - void gradientAtZero(const BayesTree& bayesTree, VectorValues& g) { - gradientAtZero(FactorGraph(bayesTree), g); - } - /* ************************************************************************* */ VectorValues gradient(const BayesTree >& bayesTree, const VectorValues& x0) { return gradient(FactorGraph(bayesTree), x0); diff --git a/gtsam/nonlinear/GaussianISAM2.h b/gtsam/nonlinear/GaussianISAM2.h index 7b2aa625d..a602d81a6 100644 --- a/gtsam/nonlinear/GaussianISAM2.h +++ b/gtsam/nonlinear/GaussianISAM2.h @@ -63,44 +63,65 @@ public: }; -/// optimize the BayesTree, starting from the root; "replaced" needs to contain +/** Get the linear delta for the ISAM2 object, unpermuted the delta returned by ISAM2::getDelta() */ +template +VectorValues optimize(const ISAM2& isam) { + VectorValues delta = *allocateVectorValues(isam); + internal::optimizeInPlace(isam.root(), delta); + return delta; +} + +/// Optimize the BayesTree, starting from the root. +/// @param replaced Needs to contain /// all variables that are contained in the top of the Bayes tree that has been -/// redone; "delta" is the current solution, an offset from the linearization -/// point; "threshold" is the maximum change against the PREVIOUS delta for +/// redone. +/// @param delta The current solution, an offset from the linearization +/// point. +/// @param threshold The maximum change against the PREVIOUS delta for /// non-replaced variables that can be ignored, ie. the old delta entry is kept /// and recursive backsubstitution might eventually stop if none of the changed /// variables are contained in the subtree. -/// returns the number of variables that were solved for +/// @return The number of variables that were solved for template int optimizeWildfire(const boost::shared_ptr& root, double threshold, const std::vector& replaced, Permuted& delta); +/** + * Optimize along the gradient direction, with a closed-form computation to + * perform the line search. The gradient is computed about \f$ \delta x=0 \f$. + * + * This function returns \f$ \delta x \f$ that minimizes a reparametrized + * problem. The error function of a GaussianBayesNet is + * + * \f[ f(\delta x) = \frac{1}{2} |R \delta x - d|^2 = \frac{1}{2}d^T d - d^T R \delta x + \frac{1}{2} \delta x^T R^T R \delta x \f] + * + * with gradient and Hessian + * + * \f[ g(\delta x) = R^T(R\delta x - d), \qquad G(\delta x) = R^T R. \f] + * + * This function performs the line search in the direction of the + * gradient evaluated at \f$ g = g(\delta x = 0) \f$ with step size + * \f$ \alpha \f$ that minimizes \f$ f(\delta x = \alpha g) \f$: + * + * \f[ f(\alpha) = \frac{1}{2} d^T d + g^T \delta x + \frac{1}{2} \alpha^2 g^T G g \f] + * + * Optimizing by setting the derivative to zero yields + * \f$ \hat \alpha = (-g^T g) / (g^T G g) \f$. For efficiency, this function + * evaluates the denominator without computing the Hessian \f$ G \f$, returning + * + * \f[ \delta x = \hat\alpha g = \frac{-g^T g}{(R g)^T(R g)} \f] + */ +template +VectorValues optimizeGradientSearch(const ISAM2& isam); + +/** In-place version of optimizeGradientSearch requiring pre-allocated VectorValues \c x */ +template +void optimizeGradientSearchInPlace(const ISAM2& isam, VectorValues& grad); + /// calculate the number of non-zero entries for the tree starting at clique (use root for complete matrix) template int calculate_nnz(const boost::shared_ptr& clique); -/** - * Compute the gradient of the energy function, - * \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$, - * centered around \f$ x = x_0 \f$. - * The gradient is \f$ R^T(Rx-d) \f$. - * @param bayesTree The Gaussian Bayes Tree $(R,d)$ - * @param x0 The center about which to compute the gradient - * @return The gradient as a VectorValues - */ -VectorValues gradient(const BayesTree& bayesTree, const VectorValues& x0); - -/** - * Compute the gradient of the energy function, - * \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$, - * centered around zero. - * The gradient about zero is \f$ -R^T d \f$. See also gradient(const GaussianBayesNet&, const VectorValues&). - * @param bayesTree The Gaussian Bayes Tree $(R,d)$ - * @param [output] g A VectorValues to store the gradient, which must be preallocated, see allocateVectorValues - * @return The gradient as a VectorValues - */ -void gradientAtZero(const BayesTree& bayesTree, VectorValues& g); - /** * Compute the gradient of the energy function, * \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$, diff --git a/gtsam/nonlinear/ISAM2-impl-inl.h b/gtsam/nonlinear/ISAM2-impl-inl.h index 08b73df21..40e87a09b 100644 --- a/gtsam/nonlinear/ISAM2-impl-inl.h +++ b/gtsam/nonlinear/ISAM2-impl-inl.h @@ -361,6 +361,18 @@ ISAM2::Impl::PartialSolve(GaussianFactorGraph& factors, return result; } +/* ************************************************************************* */ +namespace internal { +inline static void optimizeInPlace(const boost::shared_ptr >& clique, VectorValues& result) { + // parents are assumed to already be solved and available in result + clique->conditional()->solveInPlace(result); + + // starting from the root, call optimize on each conditional + BOOST_FOREACH(const boost::shared_ptr >& child, clique->children_) + optimizeInPlace(child, result); +} +} + /* ************************************************************************* */ template size_t ISAM2::Impl::UpdateDelta(const boost::shared_ptr >& root, std::vector& replacedKeys, Permuted& delta, double wildfireThreshold) { diff --git a/gtsam/nonlinear/ISAM2-inl.h b/gtsam/nonlinear/ISAM2-inl.h index 674496512..0d5f190a3 100644 --- a/gtsam/nonlinear/ISAM2-inl.h +++ b/gtsam/nonlinear/ISAM2-inl.h @@ -557,7 +557,6 @@ ISAM2Result ISAM2::update( return result; } - /* ************************************************************************* */ template void ISAM2::updateDelta(bool forceFullSolve) const { @@ -630,13 +629,5 @@ const Permuted& ISAM2::getDelta() const { return delta_; } -/* ************************************************************************* */ -template -VectorValues optimize(const ISAM2& isam) { - VectorValues delta = *allocateVectorValues(isam); - internal::optimizeInPlace(isam.root(), delta); - return delta; -} - } /// namespace gtsam diff --git a/gtsam/nonlinear/ISAM2.h b/gtsam/nonlinear/ISAM2.h index 1117a1a48..208f0748f 100644 --- a/gtsam/nonlinear/ISAM2.h +++ b/gtsam/nonlinear/ISAM2.h @@ -462,10 +462,6 @@ private: }; // ISAM2 -/** Get the linear delta for the ISAM2 object, unpermuted the delta returned by ISAM2::getDelta() */ -template -VectorValues optimize(const ISAM2& isam); - } /// namespace gtsam #include diff --git a/tests/testDoglegOptimizer.cpp b/tests/testDoglegOptimizer.cpp index 27070b801..b16ac0cdc 100644 --- a/tests/testDoglegOptimizer.cpp +++ b/tests/testDoglegOptimizer.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -102,7 +103,7 @@ TEST(DoglegOptimizer, ComputeSteepestDescentPoint) { VectorValues expected = gradientValues; scal(step, expected); // Compute the steepest descent point with the dogleg function - VectorValues actual = DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn); + VectorValues actual = optimizeGradientSearch(gbn); // Check that points agree EXPECT(assert_equal(expected, actual, 1e-5)); @@ -290,7 +291,7 @@ TEST(DoglegOptimizer, ComputeSteepestDescentPointBT) { expectedFromBN[4] = Vector_(2, 0.300134, 0.423233); // Compute the steepest descent point with the dogleg function - VectorValues actual = DoglegOptimizerImpl::ComputeSteepestDescentPoint(bt); + VectorValues actual = optimizeGradientSearch(bt); // Check that points agree EXPECT(assert_equal(expected, actual, 1e-5)); @@ -324,7 +325,7 @@ TEST(DoglegOptimizer, ComputeBlend) { 4, Vector_(2, 49.0,50.0), Matrix_(2,2, 51.0,52.0,0.0,54.0), ones(2))); // Compute steepest descent point - VectorValues xu = DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn); + VectorValues xu = optimizeGradientSearch(gbn); // Compute Newton's method point VectorValues xn = optimize(gbn); @@ -362,18 +363,18 @@ TEST(DoglegOptimizer, ComputeDoglegPoint) { // Compute dogleg point for different deltas double Delta1 = 0.5; // Less than steepest descent - VectorValues actual1 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta1, DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn), optimize(gbn)); + VectorValues actual1 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta1, optimizeGradientSearch(gbn), optimize(gbn)); DOUBLES_EQUAL(Delta1, actual1.vector().norm(), 1e-5); double Delta2 = 1.5; // Between steepest descent and Newton's method - VectorValues expected2 = DoglegOptimizerImpl::ComputeBlend(Delta2, DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn), optimize(gbn)); - VectorValues actual2 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta2, DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn), optimize(gbn)); + VectorValues expected2 = DoglegOptimizerImpl::ComputeBlend(Delta2, optimizeGradientSearch(gbn), optimize(gbn)); + VectorValues actual2 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta2, optimizeGradientSearch(gbn), optimize(gbn)); DOUBLES_EQUAL(Delta2, actual2.vector().norm(), 1e-5); EXPECT(assert_equal(expected2, actual2)); double Delta3 = 5.0; // Larger than Newton's method point VectorValues expected3 = optimize(gbn); - VectorValues actual3 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta3, DoglegOptimizerImpl::ComputeSteepestDescentPoint(gbn), optimize(gbn)); + VectorValues actual3 = DoglegOptimizerImpl::ComputeDoglegPoint(Delta3, optimizeGradientSearch(gbn), optimize(gbn)); EXPECT(assert_equal(expected3, actual3)); }