Comments and formatting, and much cleaner solve

release/4.3a0
Frank Dellaert 2013-12-10 05:23:28 +00:00
parent abbbd02979
commit f2da19e4de
2 changed files with 229 additions and 219 deletions

View File

@ -21,7 +21,7 @@
*/ */
#include <gtsam/linear/KalmanFilter.h> #include <gtsam/linear/KalmanFilter.h>
#include <gtsam/linear/GaussianFactorGraph.h> #include <gtsam/linear/GaussianBayesNet.h>
#include <gtsam/linear/JacobianFactor.h> #include <gtsam/linear/JacobianFactor.h>
#include <gtsam/linear/HessianFactor.h> #include <gtsam/linear/HessianFactor.h>
#include <gtsam/base/Testable.h> #include <gtsam/base/Testable.h>
@ -34,134 +34,145 @@ using namespace std;
namespace gtsam { namespace gtsam {
/// Auxiliary function to solve factor graph and return pointer to root conditional // Auxiliary function to solve factor graph and return pointer to root conditional
KalmanFilter::State solve(const GaussianFactorGraph& factorGraph, const GaussianFactorGraph::Eliminate& function) static KalmanFilter::State solve(const GaussianFactorGraph& factorGraph,
{ const GaussianFactorGraph::Eliminate& function) {
// Do a dense solve, e.g. don't use the multifrontal framework
// Eliminate the keys in increasing order so that the last key is the one we want to keep.
FastSet<Key> keysToEliminate = factorGraph.keys();
FastSet<Key>::const_iterator lastKeyPos = keysToEliminate.end();
-- lastKeyPos;
Key remainingKey = *lastKeyPos;
keysToEliminate.erase(lastKeyPos);
GaussianFactorGraph::EliminationResult result = function(factorGraph, Ordering(keysToEliminate));
// As this is a filter, all we need is the posterior P(x_t). Eliminate it to be // We will eliminate the keys in increasing order, except the last key
// upper-triangular. Ordering ordering1(factorGraph.keys());
GaussianFactorGraph graphOfRemainingFactor; Key lastKey = ordering1.back();
graphOfRemainingFactor += result.second; ordering1.pop_back();
GaussianDensity::shared_ptr state = boost::make_shared<GaussianDensity>(
*function(graphOfRemainingFactor, Ordering(cref_list_of<1>(remainingKey))).first);
return state; // Eliminate the graph using the provided Eliminate function
} GaussianConditional::shared_ptr conditional;
GaussianFactor::shared_ptr factor;
boost::tie(conditional, factor) = function(factorGraph, ordering1);
/* ************************************************************************* */ // As this is a filter, all we need is the posterior P(x_t).
KalmanFilter::State fuse(const KalmanFilter::State& p, GaussianFactor::shared_ptr newFactor, GaussianFactorGraph graphOfRemainingFactor;
const GaussianFactorGraph::Eliminate& function) graphOfRemainingFactor += factor;
{
// Create a factor graph
GaussianFactorGraph factorGraph;
factorGraph += p, newFactor;
// Eliminate graph in order x0, x1, to get Bayes net P(x0|x1)P(x1) // Eliminate it to be upper-triangular.
return solve(factorGraph, function); Ordering ordering2;
} ordering2 += lastKey;
boost::tie(conditional, factor) = function(graphOfRemainingFactor, ordering2);
/* ************************************************************************* */ return boost::make_shared<GaussianDensity>(*conditional);
GaussianFactorGraph::Eliminate KalmanFilter::factorization() const { }
return method_ == QR ?
GaussianFactorGraph::Eliminate(EliminateQR) :
GaussianFactorGraph::Eliminate(EliminateCholesky);
}
/* ************************************************************************* */ /* ************************************************************************* */
KalmanFilter::State KalmanFilter::init(const Vector& x0, const SharedDiagonal& P0) { static KalmanFilter::State fuse(const KalmanFilter::State& p,
GaussianFactor::shared_ptr newFactor,
const GaussianFactorGraph::Eliminate& function) {
// Create a factor graph
GaussianFactorGraph factorGraph;
factorGraph += p, newFactor;
// Create a factor graph f(x0), eliminate it into P(x0) // Eliminate graph in order x0, x1, to get Bayes net P(x0|x1)P(x1)
GaussianFactorGraph factorGraph; return solve(factorGraph, function);
factorGraph += JacobianFactor(0, I_, x0, P0); // |x-x0|^2_diagSigma }
return solve(factorGraph, factorization());
}
/* ************************************************************************* */ /* ************************************************************************* */
KalmanFilter::State KalmanFilter::init(const Vector& x, const Matrix& P0) { GaussianFactorGraph::Eliminate KalmanFilter::factorization() const {
return
method_ == QR ? GaussianFactorGraph::Eliminate(EliminateQR) :
GaussianFactorGraph::Eliminate(EliminateCholesky);
}
// Create a factor graph f(x0), eliminate it into P(x0) /* ************************************************************************* */
GaussianFactorGraph factorGraph; KalmanFilter::State KalmanFilter::init(const Vector& x0,
factorGraph += HessianFactor(0, x, P0); // 0.5*(x-x0)'*inv(Sigma)*(x-x0) const SharedDiagonal& P0) {
return solve(factorGraph, factorization());
}
/* ************************************************************************* */ // Create a factor graph f(x0), eliminate it into P(x0)
void KalmanFilter::print(const string& s) const { GaussianFactorGraph factorGraph;
cout << "KalmanFilter " << s << ", dim = " << n_ << endl; factorGraph += JacobianFactor(0, I_, x0, P0); // |x-x0|^2_diagSigma
} return solve(factorGraph, factorization());
}
/* ************************************************************************* */ /* ************************************************************************* */
KalmanFilter::State KalmanFilter::predict(const State& p, const Matrix& F, KalmanFilter::State KalmanFilter::init(const Vector& x, const Matrix& P0) {
const Matrix& B, const Vector& u, const SharedDiagonal& model) {
// The factor related to the motion model is defined as // Create a factor graph f(x0), eliminate it into P(x0)
// f2(x_{t},x_{t+1}) = (F*x_{t} + B*u - x_{t+1}) * Q^-1 * (F*x_{t} + B*u - x_{t+1})^T GaussianFactorGraph factorGraph;
Key k = step(p); factorGraph += HessianFactor(0, x, P0); // 0.5*(x-x0)'*inv(Sigma)*(x-x0)
return fuse(p, boost::make_shared<JacobianFactor>(k, -F, k + 1, I_, B * u, model), factorization()); return solve(factorGraph, factorization());
} }
/* ************************************************************************* */ /* ************************************************************************* */
KalmanFilter::State KalmanFilter::predictQ(const State& p, const Matrix& F, void KalmanFilter::print(const string& s) const {
const Matrix& B, const Vector& u, const Matrix& Q) { cout << "KalmanFilter " << s << ", dim = " << n_ << endl;
}
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::predict(const State& p, const Matrix& F,
const Matrix& B, const Vector& u, const SharedDiagonal& model) {
// The factor related to the motion model is defined as
// f2(x_{t},x_{t+1}) = (F*x_{t} + B*u - x_{t+1}) * Q^-1 * (F*x_{t} + B*u - x_{t+1})^T
Key k = step(p);
return fuse(p,
boost::make_shared<JacobianFactor>(k, -F, k + 1, I_, B * u, model),
factorization());
}
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::predictQ(const State& p, const Matrix& F,
const Matrix& B, const Vector& u, const Matrix& Q) {
#ifndef NDEBUG #ifndef NDEBUG
DenseIndex n = F.cols(); DenseIndex n = F.cols();
assert(F.rows() == n); assert(F.rows() == n);
assert(B.rows() == n); assert(B.rows() == n);
assert(B.cols() == u.size()); assert(B.cols() == u.size());
assert(Q.rows() == n); assert(Q.rows() == n);
assert(Q.cols() == n); assert(Q.cols() == n);
#endif #endif
// The factor related to the motion model is defined as // The factor related to the motion model is defined as
// f2(x_{t},x_{t+1}) = (F*x_{t} + B*u - x_{t+1}) * Q^-1 * (F*x_{t} + B*u - x_{t+1})^T // f2(x_{t},x_{t+1}) = (F*x_{t} + B*u - x_{t+1}) * Q^-1 * (F*x_{t} + B*u - x_{t+1})^T
// See documentation in HessianFactor, we have A1 = -F, A2 = I_, b = B*u: // See documentation in HessianFactor, we have A1 = -F, A2 = I_, b = B*u:
// TODO: starts to seem more elaborate than straight-up KF equations? // TODO: starts to seem more elaborate than straight-up KF equations?
Matrix M = inverse(Q), Ft = trans(F); Matrix M = inverse(Q), Ft = trans(F);
Matrix G12 = -Ft * M, G11 = -G12 * F, G22 = M; Matrix G12 = -Ft * M, G11 = -G12 * F, G22 = M;
Vector b = B * u, g2 = M * b, g1 = -Ft * g2; Vector b = B * u, g2 = M * b, g1 = -Ft * g2;
double f = dot(b, g2); double f = dot(b, g2);
Key k = step(p); Key k = step(p);
return fuse(p, boost::make_shared<HessianFactor>(k, k + 1, G11, G12, g1, G22, g2, f), factorization()); return fuse(p,
} boost::make_shared<HessianFactor>(k, k + 1, G11, G12, g1, G22, g2, f),
factorization());
}
/* ************************************************************************* */ /* ************************************************************************* */
KalmanFilter::State KalmanFilter::predict2(const State& p, const Matrix& A0, KalmanFilter::State KalmanFilter::predict2(const State& p, const Matrix& A0,
const Matrix& A1, const Vector& b, const SharedDiagonal& model) { const Matrix& A1, const Vector& b, const SharedDiagonal& model) {
// Nhe factor related to the motion model is defined as // Nhe factor related to the motion model is defined as
// f2(x_{t},x_{t+1}) = |A0*x_{t} + A1*x_{t+1} - b|^2 // f2(x_{t},x_{t+1}) = |A0*x_{t} + A1*x_{t+1} - b|^2
Key k = step(p); Key k = step(p);
return fuse(p, boost::make_shared<JacobianFactor>(k, A0, k + 1, A1, b, model), factorization()); return fuse(p, boost::make_shared<JacobianFactor>(k, A0, k + 1, A1, b, model),
} factorization());
}
/* ************************************************************************* */ /* ************************************************************************* */
KalmanFilter::State KalmanFilter::update(const State& p, const Matrix& H, KalmanFilter::State KalmanFilter::update(const State& p, const Matrix& H,
const Vector& z, const SharedDiagonal& model) { const Vector& z, const SharedDiagonal& model) {
// The factor related to the measurements would be defined as // The factor related to the measurements would be defined as
// f2 = (h(x_{t}) - z_{t}) * R^-1 * (h(x_{t}) - z_{t})^T // f2 = (h(x_{t}) - z_{t}) * R^-1 * (h(x_{t}) - z_{t})^T
// = (x_{t} - z_{t}) * R^-1 * (x_{t} - z_{t})^T // = (x_{t} - z_{t}) * R^-1 * (x_{t} - z_{t})^T
Key k = step(p); Key k = step(p);
return fuse(p, boost::make_shared<JacobianFactor>(k, H, z, model), factorization()); return fuse(p, boost::make_shared<JacobianFactor>(k, H, z, model),
} factorization());
}
/* ************************************************************************* */ /* ************************************************************************* */
KalmanFilter::State KalmanFilter::updateQ(const State& p, const Matrix& H, const Vector& z, KalmanFilter::State KalmanFilter::updateQ(const State& p, const Matrix& H,
const Matrix& Q) { const Vector& z, const Matrix& Q) {
Key k = step(p); Key k = step(p);
Matrix M = inverse(Q), Ht = trans(H); Matrix M = inverse(Q), Ht = trans(H);
Matrix G = Ht * M * H; Matrix G = Ht * M * H;
Vector g = Ht * M * z; Vector g = Ht * M * z;
double f = dot(z, M * z); double f = dot(z, M * z);
return fuse(p, boost::make_shared<HessianFactor>(k, G, g, f), factorization()); return fuse(p, boost::make_shared<HessianFactor>(k, G, g, f), factorization());
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -29,120 +29,119 @@
namespace gtsam { namespace gtsam {
/**
* Kalman Filter class
*
* Knows how to maintain a Gaussian density under linear-Gaussian motion and
* measurement models. It uses the square-root information form, as usual in GTSAM.
*
* The filter is functional, in that it does not have state: you call init() to create
* an initial state, then predict() and update() that create new states out of an old state.
*/
class GTSAM_EXPORT KalmanFilter {
public:
/** /**
* Kalman Filter class * This Kalman filter is a Square-root Information filter
* * The type below allows you to specify the factorization variant.
* Knows how to maintain a Gaussian density under linear-Gaussian motion and
* measurement models. It uses the square-root information form, as usual in GTSAM.
*
* The filter is functional, in that it does not have state: you call init() to create
* an initial state, then predict() and update() that create new states out of old.
*/ */
class GTSAM_EXPORT KalmanFilter { enum Factorization {
QR, CHOLESKY
public:
/**
* This Kalman filter is a Square-root Information filter
* The type below allows you to specify the factorization variant.
*/
enum Factorization {
QR, CHOLESKY
};
/**
* The Kalman filter state is simply a GaussianDensity
*/
typedef GaussianDensity::shared_ptr State;
private:
const size_t n_; /** dimensionality of state */
const Matrix I_; /** identity matrix of size n*n */
const Factorization method_; /** algorithm */
GaussianFactorGraph::Eliminate factorization() const;
public:
// private constructor
KalmanFilter(size_t n, Factorization method =
KALMANFILTER_DEFAULT_FACTORIZATION) :
n_(n), I_(eye(n_, n_)), method_(method) {
}
/**
* Create initial state, i.e., prior density at time k=0
* In Kalman Filter notation, this are is x_{0|0} and P_{0|0}
* @param x0 estimate at time 0
* @param P0 covariance at time 0, given as a diagonal Gaussian 'model'
*/
State init(const Vector& x0, const SharedDiagonal& P0);
/// version of init with a full covariance matrix
State init(const Vector& x0, const Matrix& P0);
/// print
void print(const std::string& s = "") const;
/** Return step index k, starts at 0, incremented at each predict. */
static Key step(const State& p) {
return p->firstFrontalKey();
}
/**
* Predict the state P(x_{t+1}|Z^t)
* In Kalman Filter notation, this is x_{t+1|t} and P_{t+1|t}
* After the call, that is the density that can be queried.
* Details and parameters:
* In a linear Kalman Filter, the motion model is f(x_{t}) = F*x_{t} + B*u_{t} + w
* where F is the state transition model/matrix, B is the control input model,
* and w is zero-mean, Gaussian white noise with covariance Q.
*/
State predict(const State& p, const Matrix& F, const Matrix& B,
const Vector& u, const SharedDiagonal& modelQ);
/*
* Version of predict with full covariance
* Q is normally derived as G*w*G^T where w models uncertainty of some
* physical property, such as velocity or acceleration, and G is derived from physics.
* This version allows more realistic models than a diagonal covariance matrix.
*/
State predictQ(const State& p, const Matrix& F, const Matrix& B,
const Vector& u, const Matrix& Q);
/**
* Predict the state P(x_{t+1}|Z^t)
* In Kalman Filter notation, this is x_{t+1|t} and P_{t+1|t}
* After the call, that is the density that can be queried.
* Details and parameters:
* This version of predict takes GaussianFactor motion model [A0 A1 b]
* with an optional noise model.
*/
State predict2(const State& p, const Matrix& A0, const Matrix& A1,
const Vector& b, const SharedDiagonal& model);
/**
* Update Kalman filter with a measurement
* For the Kalman Filter, the measurement function, h(x_{t}) = z_{t}
* will be of the form h(x_{t}) = H*x_{t} + v
* where H is the observation model/matrix, and v is zero-mean,
* Gaussian white noise with covariance R.
* Currently, R is restricted to diagonal Gaussians (model parameter)
*/
State update(const State& p, const Matrix& H, const Vector& z,
const SharedDiagonal& model);
/*
* Version of update with full covariance
* Q is normally derived as G*w*G^T where w models uncertainty of some
* physical property, such as velocity or acceleration, and G is derived from physics.
* This version allows more realistic models than a diagonal covariance matrix.
*/
State updateQ(const State& p, const Matrix& H, const Vector& z,
const Matrix& Q);
}; };
/**
* The Kalman filter state is simply a GaussianDensity
*/
typedef GaussianDensity::shared_ptr State;
private:
const size_t n_; /** dimensionality of state */
const Matrix I_; /** identity matrix of size n*n */
const Factorization method_; /** algorithm */
GaussianFactorGraph::Eliminate factorization() const;
public:
// Constructor
KalmanFilter(size_t n, Factorization method =
KALMANFILTER_DEFAULT_FACTORIZATION) :
n_(n), I_(eye(n_, n_)), method_(method) {
}
/**
* Create initial state, i.e., prior density at time k=0
* In Kalman Filter notation, these are x_{0|0} and P_{0|0}
* @param x0 estimate at time 0
* @param P0 covariance at time 0, given as a diagonal Gaussian 'model'
*/
State init(const Vector& x0, const SharedDiagonal& P0);
/// version of init with a full covariance matrix
State init(const Vector& x0, const Matrix& P0);
/// print
void print(const std::string& s = "") const;
/** Return step index k, starts at 0, incremented at each predict. */
static Key step(const State& p) {
return p->firstFrontalKey();
}
/**
* Predict the state P(x_{t+1}|Z^t)
* In Kalman Filter notation, this is x_{t+1|t} and P_{t+1|t}
* Details and parameters:
* In a linear Kalman Filter, the motion model is f(x_{t}) = F*x_{t} + B*u_{t} + w
* where F is the state transition model/matrix, B is the control input model,
* and w is zero-mean, Gaussian white noise with covariance Q.
*/
State predict(const State& p, const Matrix& F, const Matrix& B,
const Vector& u, const SharedDiagonal& modelQ);
/*
* Version of predict with full covariance
* Q is normally derived as G*w*G^T where w models uncertainty of some
* physical property, such as velocity or acceleration, and G is derived from physics.
* This version allows more realistic models than a diagonal covariance matrix.
*/
State predictQ(const State& p, const Matrix& F, const Matrix& B,
const Vector& u, const Matrix& Q);
/**
* Predict the state P(x_{t+1}|Z^t)
* In Kalman Filter notation, this is x_{t+1|t} and P_{t+1|t}
* After the call, that is the density that can be queried.
* Details and parameters:
* This version of predict takes GaussianFactor motion model [A0 A1 b]
* with an optional noise model.
*/
State predict2(const State& p, const Matrix& A0, const Matrix& A1,
const Vector& b, const SharedDiagonal& model);
/**
* Update Kalman filter with a measurement
* For the Kalman Filter, the measurement function, h(x_{t}) = z_{t}
* will be of the form h(x_{t}) = H*x_{t} + v
* where H is the observation model/matrix, and v is zero-mean,
* Gaussian white noise with covariance R.
* In this version, R is restricted to diagonal Gaussians (model parameter)
*/
State update(const State& p, const Matrix& H, const Vector& z,
const SharedDiagonal& model);
/*
* Version of update with full covariance
* Q is normally derived as G*w*G^T where w models uncertainty of some
* physical property, such as velocity or acceleration, and G is derived from physics.
* This version allows more realistic models than a diagonal covariance matrix.
*/
State updateQ(const State& p, const Matrix& H, const Vector& z,
const Matrix& Q);
};
} // \namespace gtsam } // \namespace gtsam
/* ************************************************************************* */ /* ************************************************************************* */