From 23ef1cf084420a46923b8c5c138a44be13cca299 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sat, 3 Sep 2011 04:48:06 +0000 Subject: [PATCH] Formatting and comments --- gtsam/nonlinear/ExtendedKalmanFilter-inl.h | 195 ++++++++++++--------- gtsam/nonlinear/ExtendedKalmanFilter.h | 66 +++---- 2 files changed, 141 insertions(+), 120 deletions(-) diff --git a/gtsam/nonlinear/ExtendedKalmanFilter-inl.h b/gtsam/nonlinear/ExtendedKalmanFilter-inl.h index cbb7a0af7..63be2960c 100644 --- a/gtsam/nonlinear/ExtendedKalmanFilter-inl.h +++ b/gtsam/nonlinear/ExtendedKalmanFilter-inl.h @@ -19,7 +19,6 @@ #pragma once #include -#include #include #include #include @@ -27,120 +26,142 @@ namespace gtsam { -// Function to compare Ordering entries by value instead of by key -bool compareOrderingValues(const Ordering::value_type& i, const Ordering::value_type& j) { - return i.second < j.second; -} + // Function to compare Ordering entries by value instead of by key + bool compareOrderingValues(const Ordering::value_type& i, + const Ordering::value_type& j) { + return i.second < j.second; + } -/* ************************************************************************* */ -template -typename ExtendedKalmanFilter::T ExtendedKalmanFilter::solve_(const GaussianFactorGraph& linearFactorGraph, - const Ordering& ordering, const VALUES& linearizationPoints, const KEY& lastKey, JacobianFactor::shared_ptr& newPrior) const { + /* ************************************************************************* */ + template + typename ExtendedKalmanFilter::T ExtendedKalmanFilter::solve_( + const GaussianFactorGraph& linearFactorGraph, const Ordering& ordering, + const VALUES& linearizationPoints, const KEY& lastKey, + JacobianFactor::shared_ptr& newPrior) const { - // Extract the Index of the provided last key - gtsam::Index lastIndex = ordering.at(lastKey); + // Extract the Index of the provided last key + gtsam::Index lastIndex = ordering.at(lastKey); - // Solve the linear factor graph, converting it into a linear Bayes Network ( P(x0,x1) = P(x0|x1)*P(x1) ) - GaussianSequentialSolver solver(linearFactorGraph); - GaussianBayesNet::shared_ptr linearBayesNet = solver.eliminate(); + // Solve the linear factor graph, converting it into a linear Bayes Network + // P(x0,x1) = P(x0|x1)*P(x1) + GaussianSequentialSolver solver(linearFactorGraph); + GaussianBayesNet::shared_ptr linearBayesNet = solver.eliminate(); - // Extract the current estimate of x1,P1 from the Bayes Network - VectorValues result = optimize(*linearBayesNet); - T x = linearizationPoints[lastKey].expmap(result[lastIndex]); + // Extract the current estimate of x1,P1 from the Bayes Network + VectorValues result = optimize(*linearBayesNet); + T x = linearizationPoints[lastKey].expmap(result[lastIndex]); - // Create a Jacobian Factor from the root node of the produced Bayes Net. This will act as a prior for the next iteration. - // The linearization point of this prior must be moved to the new estimate of x, and the key/index needs to be reset to 0, - // the first key in the next iteration - const GaussianConditional::shared_ptr& cg = linearBayesNet->back(); - assert(cg->nrFrontals() == 1); - assert(cg->nrParents() == 0); - // TODO: Find a way to create the correct Jacobian Factor in a single pass - JacobianFactor tmpPrior = JacobianFactor(*cg); - newPrior = JacobianFactor::shared_ptr( - new JacobianFactor(0, tmpPrior.getA(tmpPrior.begin()), tmpPrior.getb() - tmpPrior.getA(tmpPrior.begin()) * result[lastIndex], tmpPrior.get_model())); + // Create a Jacobian Factor from the root node of the produced Bayes Net. + // This will act as a prior for the next iteration. + // The linearization point of this prior must be moved to the new estimate of x, + // and the key/index needs to be reset to 0, the first key in the next iteration. + const GaussianConditional::shared_ptr& cg = linearBayesNet->back(); + assert(cg->nrFrontals() == 1); + assert(cg->nrParents() == 0); + // TODO: Find a way to create the correct Jacobian Factor in a single pass + JacobianFactor tmpPrior = JacobianFactor(*cg); + newPrior = JacobianFactor::shared_ptr( + new JacobianFactor( + 0, + tmpPrior.getA(tmpPrior.begin()), + tmpPrior.getb() + - tmpPrior.getA(tmpPrior.begin()) * result[lastIndex], + tmpPrior.get_model())); - return x; -} + return x; + } -/* ************************************************************************* */ -template -ExtendedKalmanFilter::ExtendedKalmanFilter(T x_initial, noiseModel::Gaussian::shared_ptr P_initial) { + /* ************************************************************************* */ + template + ExtendedKalmanFilter::ExtendedKalmanFilter(T x_initial, + noiseModel::Gaussian::shared_ptr P_initial) { - // Set the initial linearization point to the provided mean - x_ = x_initial; + // Set the initial linearization point to the provided mean + x_ = x_initial; - // Create a Jacobian Prior Factor directly P_initial. Since x0 is set to the provided mean, the b vector in the prior will be zero - priorFactor_ = JacobianFactor::shared_ptr(new JacobianFactor(0, P_initial->R(), Vector::Zero(x_initial.dim()), noiseModel::Unit::Create(P_initial->dim()))); -} -; + // Create a Jacobian Prior Factor directly P_initial. + // Since x0 is set to the provided mean, the b vector in the prior will be zero + // TODO Frank asks: is there a reason why noiseModel is not simply P_initial ? + priorFactor_ = JacobianFactor::shared_ptr( + new JacobianFactor(0, P_initial->R(), Vector::Zero(x_initial.dim()), + noiseModel::Unit::Create(P_initial->dim()))); + } -/* ************************************************************************* */ -template -typename ExtendedKalmanFilter::T ExtendedKalmanFilter::predict(const MotionFactor& motionFactor) { + /* ************************************************************************* */ + template + typename ExtendedKalmanFilter::T ExtendedKalmanFilter::predict( + const MotionFactor& motionFactor) { - // TODO: This implementation largely ignores the actual factor symbols. Calling predict() then update() with drastically - // different keys will still compute as if a common key-set was used + // TODO: This implementation largely ignores the actual factor symbols. + // Calling predict() then update() with drastically + // different keys will still compute as if a common key-set was used - // Create Keys - KEY x0 = motionFactor.key1(); - KEY x1 = motionFactor.key2(); + // Create Keys + KEY x0 = motionFactor.key1(); + KEY x1 = motionFactor.key2(); - // Create an elimination ordering - Ordering ordering; - ordering.insert(x0, 0); - ordering.insert(x1, 1); + // Create an elimination ordering + Ordering ordering; + ordering.insert(x0, 0); + ordering.insert(x1, 1); - // Create a set of linearization points - VALUES linearizationPoints; - linearizationPoints.insert(x0, x_); - linearizationPoints.insert(x1, x_); + // Create a set of linearization points + VALUES linearizationPoints; + linearizationPoints.insert(x0, x_); + linearizationPoints.insert(x1, x_); // TODO should this really be x_ ? - // Create a Gaussian Factor Graph - GaussianFactorGraph linearFactorGraph; + // Create a Gaussian Factor Graph + GaussianFactorGraph linearFactorGraph; - // Add in the prior on the first state - linearFactorGraph.push_back(priorFactor_); + // Add in previous posterior as prior on the first state + linearFactorGraph.push_back(priorFactor_); - // Linearize motion model and add it to the Kalman Filter graph - linearFactorGraph.push_back(motionFactor.linearize(linearizationPoints, ordering)); + // Linearize motion model and add it to the Kalman Filter graph + linearFactorGraph.push_back( + motionFactor.linearize(linearizationPoints, ordering)); - // Solve the factor graph and update the current state estimate and the prior factor for the next iteration - x_ = solve_(linearFactorGraph, ordering, linearizationPoints, x1, priorFactor_); + // Solve the factor graph and update the current state estimate + // and the posterior for the next iteration. + x_ = solve_(linearFactorGraph, ordering, linearizationPoints, x1, priorFactor_); - return x_; -} + return x_; + } -/* ************************************************************************* */ -template -typename ExtendedKalmanFilter::T ExtendedKalmanFilter::update(const MeasurementFactor& measurementFactor) { + /* ************************************************************************* */ + template + typename ExtendedKalmanFilter::T ExtendedKalmanFilter::update( + const MeasurementFactor& measurementFactor) { - // TODO: This implementation largely ignores the actual factor symbols. Calling predict() then update() with drastically - // different keys will still compute as if a common key-set was used + // TODO: This implementation largely ignores the actual factor symbols. + // Calling predict() then update() with drastically + // different keys will still compute as if a common key-set was used - // Create Keys - KEY x0 = measurementFactor.key(); + // Create Keys + KEY x0 = measurementFactor.key(); - // Create an elimination ordering - Ordering ordering; - ordering.insert(x0, 0); + // Create an elimination ordering + Ordering ordering; + ordering.insert(x0, 0); - // Create a set of linearization points - VALUES linearizationPoints; - linearizationPoints.insert(x0, x_); + // Create a set of linearization points + VALUES linearizationPoints; + linearizationPoints.insert(x0, x_); - // Create a Gaussian Factor Graph - GaussianFactorGraph linearFactorGraph; + // Create a Gaussian Factor Graph + GaussianFactorGraph linearFactorGraph; - // Add in the prior on the first state - linearFactorGraph.push_back(priorFactor_); + // Add in the prior on the first state + linearFactorGraph.push_back(priorFactor_); - // Linearize measurement factor and add it to the Kalman Filter graph - linearFactorGraph.push_back(measurementFactor.linearize(linearizationPoints, ordering)); + // Linearize measurement factor and add it to the Kalman Filter graph + linearFactorGraph.push_back( + measurementFactor.linearize(linearizationPoints, ordering)); - // Solve the factor graph and update the current state estimate and the prior factor for the next iteration - x_ = solve_(linearFactorGraph, ordering, linearizationPoints, x0, priorFactor_); + // Solve the factor graph and update the current state estimate + // and the prior factor for the next iteration + x_ = solve_(linearFactorGraph, ordering, linearizationPoints, x0, priorFactor_); - return x_; -} + return x_; + } } // namespace gtsam diff --git a/gtsam/nonlinear/ExtendedKalmanFilter.h b/gtsam/nonlinear/ExtendedKalmanFilter.h index 72cd87754..1de95ecf1 100644 --- a/gtsam/nonlinear/ExtendedKalmanFilter.h +++ b/gtsam/nonlinear/ExtendedKalmanFilter.h @@ -24,44 +24,44 @@ namespace gtsam { -/** - * This is a generic Extended Kalman Filter class implemented using nonlinear factors. GTSAM - * basically does SRIF with LDL to solve the filter problem, making this an efficient, numerically - * stable Kalman Filter implementation. - * - * The Kalman Filter relies on two models: a motion model that predicts the next state using - * the current state, and a measurement model that predicts the measurement value at a given - * state. Because these two models are situation-dependent, base classes for each have been - * provided above, from which the user must derive a class and incorporate the actual modeling - * equations. A pointer to an instance of each of these classes must be given to the Kalman - * Filter at creation, allowing the Kalman Filter to create factors as needed. - * - * The Kalman Filter class provides a "predict" function and an "update" function to perform - * these steps independently, as well as a "predictAndUpdate" that combines both steps for some - * computational savings. - */ + /** + * This is a generic Extended Kalman Filter class implemented using nonlinear factors. GTSAM + * basically does SRIF with LDL to solve the filter problem, making this an efficient, numerically + * stable Kalman Filter implementation. + * + * The Kalman Filter relies on two models: a motion model that predicts the next state using + * the current state, and a measurement model that predicts the measurement value at a given + * state. Because these two models are situation-dependent, base classes for each have been + * provided above, from which the user must derive a class and incorporate the actual modeling + * equations. + * + * The class provides a "predict" and "update" function to perform these steps independently. + * TODO: a "predictAndUpdate" that combines both steps for some computational savings. + */ -template -class ExtendedKalmanFilter { -public: + template + class ExtendedKalmanFilter { + public: - typedef boost::shared_ptr > shared_ptr; - typedef typename KEY::Value T; - typedef NonlinearFactor2 MotionFactor; - typedef NonlinearFactor1 MeasurementFactor; + typedef boost::shared_ptr > shared_ptr; + typedef typename KEY::Value T; + typedef NonlinearFactor2 MotionFactor; + typedef NonlinearFactor1 MeasurementFactor; -protected: - JacobianFactor::shared_ptr priorFactor_; - T x_; + protected: + T x_; // linearization point + JacobianFactor::shared_ptr priorFactor_; // density - T solve_(const GaussianFactorGraph& linearFactorGraph, const Ordering& ordering, const VALUES& linearizationPoints, const KEY& x, - JacobianFactor::shared_ptr& newPrior) const; + T solve_(const GaussianFactorGraph& linearFactorGraph, + const Ordering& ordering, const VALUES& linearizationPoints, + const KEY& x, JacobianFactor::shared_ptr& newPrior) const; -public: - ExtendedKalmanFilter(T x_initial, noiseModel::Gaussian::shared_ptr P_initial); + public: + ExtendedKalmanFilter(T x_initial, + noiseModel::Gaussian::shared_ptr P_initial); - T predict(const MotionFactor& motionFactor); - T update(const MeasurementFactor& measurementFactor); -}; + T predict(const MotionFactor& motionFactor); + T update(const MeasurementFactor& measurementFactor); + }; } // namespace