Comments and formatting, and much cleaner solve

release/4.3a0
Frank Dellaert 2013-12-10 05:23:28 +00:00
parent abbbd02979
commit f2da19e4de
2 changed files with 229 additions and 219 deletions

View File

@ -21,7 +21,7 @@
*/
#include <gtsam/linear/KalmanFilter.h>
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/GaussianBayesNet.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/linear/HessianFactor.h>
#include <gtsam/base/Testable.h>
@ -34,134 +34,145 @@ using namespace std;
namespace gtsam {
/// Auxiliary function to solve factor graph and return pointer to root conditional
KalmanFilter::State solve(const GaussianFactorGraph& factorGraph, const GaussianFactorGraph::Eliminate& function)
{
// Do a dense solve, e.g. don't use the multifrontal framework
// Eliminate the keys in increasing order so that the last key is the one we want to keep.
FastSet<Key> keysToEliminate = factorGraph.keys();
FastSet<Key>::const_iterator lastKeyPos = keysToEliminate.end();
-- lastKeyPos;
Key remainingKey = *lastKeyPos;
keysToEliminate.erase(lastKeyPos);
GaussianFactorGraph::EliminationResult result = function(factorGraph, Ordering(keysToEliminate));
// Auxiliary function to solve factor graph and return pointer to root conditional
static KalmanFilter::State solve(const GaussianFactorGraph& factorGraph,
const GaussianFactorGraph::Eliminate& function) {
// As this is a filter, all we need is the posterior P(x_t). Eliminate it to be
// upper-triangular.
GaussianFactorGraph graphOfRemainingFactor;
graphOfRemainingFactor += result.second;
GaussianDensity::shared_ptr state = boost::make_shared<GaussianDensity>(
*function(graphOfRemainingFactor, Ordering(cref_list_of<1>(remainingKey))).first);
// We will eliminate the keys in increasing order, except the last key
Ordering ordering1(factorGraph.keys());
Key lastKey = ordering1.back();
ordering1.pop_back();
return state;
}
// Eliminate the graph using the provided Eliminate function
GaussianConditional::shared_ptr conditional;
GaussianFactor::shared_ptr factor;
boost::tie(conditional, factor) = function(factorGraph, ordering1);
/* ************************************************************************* */
KalmanFilter::State fuse(const KalmanFilter::State& p, GaussianFactor::shared_ptr newFactor,
const GaussianFactorGraph::Eliminate& function)
{
// Create a factor graph
GaussianFactorGraph factorGraph;
factorGraph += p, newFactor;
// As this is a filter, all we need is the posterior P(x_t).
GaussianFactorGraph graphOfRemainingFactor;
graphOfRemainingFactor += factor;
// Eliminate graph in order x0, x1, to get Bayes net P(x0|x1)P(x1)
return solve(factorGraph, function);
}
// Eliminate it to be upper-triangular.
Ordering ordering2;
ordering2 += lastKey;
boost::tie(conditional, factor) = function(graphOfRemainingFactor, ordering2);
/* ************************************************************************* */
GaussianFactorGraph::Eliminate KalmanFilter::factorization() const {
return method_ == QR ?
GaussianFactorGraph::Eliminate(EliminateQR) :
GaussianFactorGraph::Eliminate(EliminateCholesky);
}
return boost::make_shared<GaussianDensity>(*conditional);
}
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::init(const Vector& x0, const SharedDiagonal& P0) {
/* ************************************************************************* */
static KalmanFilter::State fuse(const KalmanFilter::State& p,
GaussianFactor::shared_ptr newFactor,
const GaussianFactorGraph::Eliminate& function) {
// Create a factor graph
GaussianFactorGraph factorGraph;
factorGraph += p, newFactor;
// Create a factor graph f(x0), eliminate it into P(x0)
GaussianFactorGraph factorGraph;
factorGraph += JacobianFactor(0, I_, x0, P0); // |x-x0|^2_diagSigma
return solve(factorGraph, factorization());
}
// Eliminate graph in order x0, x1, to get Bayes net P(x0|x1)P(x1)
return solve(factorGraph, function);
}
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::init(const Vector& x, const Matrix& P0) {
/* ************************************************************************* */
GaussianFactorGraph::Eliminate KalmanFilter::factorization() const {
return
method_ == QR ? GaussianFactorGraph::Eliminate(EliminateQR) :
GaussianFactorGraph::Eliminate(EliminateCholesky);
}
// Create a factor graph f(x0), eliminate it into P(x0)
GaussianFactorGraph factorGraph;
factorGraph += HessianFactor(0, x, P0); // 0.5*(x-x0)'*inv(Sigma)*(x-x0)
return solve(factorGraph, factorization());
}
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::init(const Vector& x0,
const SharedDiagonal& P0) {
/* ************************************************************************* */
void KalmanFilter::print(const string& s) const {
cout << "KalmanFilter " << s << ", dim = " << n_ << endl;
}
// Create a factor graph f(x0), eliminate it into P(x0)
GaussianFactorGraph factorGraph;
factorGraph += JacobianFactor(0, I_, x0, P0); // |x-x0|^2_diagSigma
return solve(factorGraph, factorization());
}
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::predict(const State& p, const Matrix& F,
const Matrix& B, const Vector& u, const SharedDiagonal& model) {
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::init(const Vector& x, const Matrix& P0) {
// The factor related to the motion model is defined as
// f2(x_{t},x_{t+1}) = (F*x_{t} + B*u - x_{t+1}) * Q^-1 * (F*x_{t} + B*u - x_{t+1})^T
Key k = step(p);
return fuse(p, boost::make_shared<JacobianFactor>(k, -F, k + 1, I_, B * u, model), factorization());
}
// Create a factor graph f(x0), eliminate it into P(x0)
GaussianFactorGraph factorGraph;
factorGraph += HessianFactor(0, x, P0); // 0.5*(x-x0)'*inv(Sigma)*(x-x0)
return solve(factorGraph, factorization());
}
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::predictQ(const State& p, const Matrix& F,
const Matrix& B, const Vector& u, const Matrix& Q) {
/* ************************************************************************* */
void KalmanFilter::print(const string& s) const {
cout << "KalmanFilter " << s << ", dim = " << n_ << endl;
}
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::predict(const State& p, const Matrix& F,
const Matrix& B, const Vector& u, const SharedDiagonal& model) {
// The factor related to the motion model is defined as
// f2(x_{t},x_{t+1}) = (F*x_{t} + B*u - x_{t+1}) * Q^-1 * (F*x_{t} + B*u - x_{t+1})^T
Key k = step(p);
return fuse(p,
boost::make_shared<JacobianFactor>(k, -F, k + 1, I_, B * u, model),
factorization());
}
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::predictQ(const State& p, const Matrix& F,
const Matrix& B, const Vector& u, const Matrix& Q) {
#ifndef NDEBUG
DenseIndex n = F.cols();
assert(F.rows() == n);
assert(B.rows() == n);
assert(B.cols() == u.size());
assert(Q.rows() == n);
assert(Q.cols() == n);
DenseIndex n = F.cols();
assert(F.rows() == n);
assert(B.rows() == n);
assert(B.cols() == u.size());
assert(Q.rows() == n);
assert(Q.cols() == n);
#endif
// The factor related to the motion model is defined as
// f2(x_{t},x_{t+1}) = (F*x_{t} + B*u - x_{t+1}) * Q^-1 * (F*x_{t} + B*u - x_{t+1})^T
// See documentation in HessianFactor, we have A1 = -F, A2 = I_, b = B*u:
// TODO: starts to seem more elaborate than straight-up KF equations?
Matrix M = inverse(Q), Ft = trans(F);
Matrix G12 = -Ft * M, G11 = -G12 * F, G22 = M;
Vector b = B * u, g2 = M * b, g1 = -Ft * g2;
double f = dot(b, g2);
Key k = step(p);
return fuse(p, boost::make_shared<HessianFactor>(k, k + 1, G11, G12, g1, G22, g2, f), factorization());
}
// The factor related to the motion model is defined as
// f2(x_{t},x_{t+1}) = (F*x_{t} + B*u - x_{t+1}) * Q^-1 * (F*x_{t} + B*u - x_{t+1})^T
// See documentation in HessianFactor, we have A1 = -F, A2 = I_, b = B*u:
// TODO: starts to seem more elaborate than straight-up KF equations?
Matrix M = inverse(Q), Ft = trans(F);
Matrix G12 = -Ft * M, G11 = -G12 * F, G22 = M;
Vector b = B * u, g2 = M * b, g1 = -Ft * g2;
double f = dot(b, g2);
Key k = step(p);
return fuse(p,
boost::make_shared<HessianFactor>(k, k + 1, G11, G12, g1, G22, g2, f),
factorization());
}
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::predict2(const State& p, const Matrix& A0,
const Matrix& A1, const Vector& b, const SharedDiagonal& model) {
// Nhe factor related to the motion model is defined as
// f2(x_{t},x_{t+1}) = |A0*x_{t} + A1*x_{t+1} - b|^2
Key k = step(p);
return fuse(p, boost::make_shared<JacobianFactor>(k, A0, k + 1, A1, b, model), factorization());
}
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::predict2(const State& p, const Matrix& A0,
const Matrix& A1, const Vector& b, const SharedDiagonal& model) {
// Nhe factor related to the motion model is defined as
// f2(x_{t},x_{t+1}) = |A0*x_{t} + A1*x_{t+1} - b|^2
Key k = step(p);
return fuse(p, boost::make_shared<JacobianFactor>(k, A0, k + 1, A1, b, model),
factorization());
}
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::update(const State& p, const Matrix& H,
const Vector& z, const SharedDiagonal& model) {
// The factor related to the measurements would be defined as
// f2 = (h(x_{t}) - z_{t}) * R^-1 * (h(x_{t}) - z_{t})^T
// = (x_{t} - z_{t}) * R^-1 * (x_{t} - z_{t})^T
Key k = step(p);
return fuse(p, boost::make_shared<JacobianFactor>(k, H, z, model), factorization());
}
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::update(const State& p, const Matrix& H,
const Vector& z, const SharedDiagonal& model) {
// The factor related to the measurements would be defined as
// f2 = (h(x_{t}) - z_{t}) * R^-1 * (h(x_{t}) - z_{t})^T
// = (x_{t} - z_{t}) * R^-1 * (x_{t} - z_{t})^T
Key k = step(p);
return fuse(p, boost::make_shared<JacobianFactor>(k, H, z, model),
factorization());
}
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::updateQ(const State& p, const Matrix& H, const Vector& z,
const Matrix& Q) {
Key k = step(p);
Matrix M = inverse(Q), Ht = trans(H);
Matrix G = Ht * M * H;
Vector g = Ht * M * z;
double f = dot(z, M * z);
return fuse(p, boost::make_shared<HessianFactor>(k, G, g, f), factorization());
}
/* ************************************************************************* */
KalmanFilter::State KalmanFilter::updateQ(const State& p, const Matrix& H,
const Vector& z, const Matrix& Q) {
Key k = step(p);
Matrix M = inverse(Q), Ht = trans(H);
Matrix G = Ht * M * H;
Vector g = Ht * M * z;
double f = dot(z, M * z);
return fuse(p, boost::make_shared<HessianFactor>(k, G, g, f), factorization());
}
/* ************************************************************************* */

View File

@ -29,120 +29,119 @@
namespace gtsam {
/**
* Kalman Filter class
*
* Knows how to maintain a Gaussian density under linear-Gaussian motion and
* measurement models. It uses the square-root information form, as usual in GTSAM.
*
* The filter is functional, in that it does not have state: you call init() to create
* an initial state, then predict() and update() that create new states out of an old state.
*/
class GTSAM_EXPORT KalmanFilter {
public:
/**
* Kalman Filter class
*
* Knows how to maintain a Gaussian density under linear-Gaussian motion and
* measurement models. It uses the square-root information form, as usual in GTSAM.
*
* The filter is functional, in that it does not have state: you call init() to create
* an initial state, then predict() and update() that create new states out of old.
* This Kalman filter is a Square-root Information filter
* The type below allows you to specify the factorization variant.
*/
class GTSAM_EXPORT KalmanFilter {
public:
/**
* This Kalman filter is a Square-root Information filter
* The type below allows you to specify the factorization variant.
*/
enum Factorization {
QR, CHOLESKY
};
/**
* The Kalman filter state is simply a GaussianDensity
*/
typedef GaussianDensity::shared_ptr State;
private:
const size_t n_; /** dimensionality of state */
const Matrix I_; /** identity matrix of size n*n */
const Factorization method_; /** algorithm */
GaussianFactorGraph::Eliminate factorization() const;
public:
// private constructor
KalmanFilter(size_t n, Factorization method =
KALMANFILTER_DEFAULT_FACTORIZATION) :
n_(n), I_(eye(n_, n_)), method_(method) {
}
/**
* Create initial state, i.e., prior density at time k=0
* In Kalman Filter notation, this are is x_{0|0} and P_{0|0}
* @param x0 estimate at time 0
* @param P0 covariance at time 0, given as a diagonal Gaussian 'model'
*/
State init(const Vector& x0, const SharedDiagonal& P0);
/// version of init with a full covariance matrix
State init(const Vector& x0, const Matrix& P0);
/// print
void print(const std::string& s = "") const;
/** Return step index k, starts at 0, incremented at each predict. */
static Key step(const State& p) {
return p->firstFrontalKey();
}
/**
* Predict the state P(x_{t+1}|Z^t)
* In Kalman Filter notation, this is x_{t+1|t} and P_{t+1|t}
* After the call, that is the density that can be queried.
* Details and parameters:
* In a linear Kalman Filter, the motion model is f(x_{t}) = F*x_{t} + B*u_{t} + w
* where F is the state transition model/matrix, B is the control input model,
* and w is zero-mean, Gaussian white noise with covariance Q.
*/
State predict(const State& p, const Matrix& F, const Matrix& B,
const Vector& u, const SharedDiagonal& modelQ);
/*
* Version of predict with full covariance
* Q is normally derived as G*w*G^T where w models uncertainty of some
* physical property, such as velocity or acceleration, and G is derived from physics.
* This version allows more realistic models than a diagonal covariance matrix.
*/
State predictQ(const State& p, const Matrix& F, const Matrix& B,
const Vector& u, const Matrix& Q);
/**
* Predict the state P(x_{t+1}|Z^t)
* In Kalman Filter notation, this is x_{t+1|t} and P_{t+1|t}
* After the call, that is the density that can be queried.
* Details and parameters:
* This version of predict takes GaussianFactor motion model [A0 A1 b]
* with an optional noise model.
*/
State predict2(const State& p, const Matrix& A0, const Matrix& A1,
const Vector& b, const SharedDiagonal& model);
/**
* Update Kalman filter with a measurement
* For the Kalman Filter, the measurement function, h(x_{t}) = z_{t}
* will be of the form h(x_{t}) = H*x_{t} + v
* where H is the observation model/matrix, and v is zero-mean,
* Gaussian white noise with covariance R.
* Currently, R is restricted to diagonal Gaussians (model parameter)
*/
State update(const State& p, const Matrix& H, const Vector& z,
const SharedDiagonal& model);
/*
* Version of update with full covariance
* Q is normally derived as G*w*G^T where w models uncertainty of some
* physical property, such as velocity or acceleration, and G is derived from physics.
* This version allows more realistic models than a diagonal covariance matrix.
*/
State updateQ(const State& p, const Matrix& H, const Vector& z,
const Matrix& Q);
enum Factorization {
QR, CHOLESKY
};
/**
* The Kalman filter state is simply a GaussianDensity
*/
typedef GaussianDensity::shared_ptr State;
private:
const size_t n_; /** dimensionality of state */
const Matrix I_; /** identity matrix of size n*n */
const Factorization method_; /** algorithm */
GaussianFactorGraph::Eliminate factorization() const;
public:
// Constructor
KalmanFilter(size_t n, Factorization method =
KALMANFILTER_DEFAULT_FACTORIZATION) :
n_(n), I_(eye(n_, n_)), method_(method) {
}
/**
* Create initial state, i.e., prior density at time k=0
* In Kalman Filter notation, these are x_{0|0} and P_{0|0}
* @param x0 estimate at time 0
* @param P0 covariance at time 0, given as a diagonal Gaussian 'model'
*/
State init(const Vector& x0, const SharedDiagonal& P0);
/// version of init with a full covariance matrix
State init(const Vector& x0, const Matrix& P0);
/// print
void print(const std::string& s = "") const;
/** Return step index k, starts at 0, incremented at each predict. */
static Key step(const State& p) {
return p->firstFrontalKey();
}
/**
* Predict the state P(x_{t+1}|Z^t)
* In Kalman Filter notation, this is x_{t+1|t} and P_{t+1|t}
* Details and parameters:
* In a linear Kalman Filter, the motion model is f(x_{t}) = F*x_{t} + B*u_{t} + w
* where F is the state transition model/matrix, B is the control input model,
* and w is zero-mean, Gaussian white noise with covariance Q.
*/
State predict(const State& p, const Matrix& F, const Matrix& B,
const Vector& u, const SharedDiagonal& modelQ);
/*
* Version of predict with full covariance
* Q is normally derived as G*w*G^T where w models uncertainty of some
* physical property, such as velocity or acceleration, and G is derived from physics.
* This version allows more realistic models than a diagonal covariance matrix.
*/
State predictQ(const State& p, const Matrix& F, const Matrix& B,
const Vector& u, const Matrix& Q);
/**
* Predict the state P(x_{t+1}|Z^t)
* In Kalman Filter notation, this is x_{t+1|t} and P_{t+1|t}
* After the call, that is the density that can be queried.
* Details and parameters:
* This version of predict takes GaussianFactor motion model [A0 A1 b]
* with an optional noise model.
*/
State predict2(const State& p, const Matrix& A0, const Matrix& A1,
const Vector& b, const SharedDiagonal& model);
/**
* Update Kalman filter with a measurement
* For the Kalman Filter, the measurement function, h(x_{t}) = z_{t}
* will be of the form h(x_{t}) = H*x_{t} + v
* where H is the observation model/matrix, and v is zero-mean,
* Gaussian white noise with covariance R.
* In this version, R is restricted to diagonal Gaussians (model parameter)
*/
State update(const State& p, const Matrix& H, const Vector& z,
const SharedDiagonal& model);
/*
* Version of update with full covariance
* Q is normally derived as G*w*G^T where w models uncertainty of some
* physical property, such as velocity or acceleration, and G is derived from physics.
* This version allows more realistic models than a diagonal covariance matrix.
*/
State updateQ(const State& p, const Matrix& H, const Vector& z,
const Matrix& Q);
};
} // \namespace gtsam
/* ************************************************************************* */