Merge remote-tracking branch 'upstream/develop' into gal update upstream
commit
5702ec791b
|
@ -0,0 +1,46 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# This script deletes all but the most recent release from the gtsam-develop project on PyPI
|
||||
# and can be used if the project size exceeds the PyPI limit of 10 GB. The user must have
|
||||
# owner or maintainer privileges on the project.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage: $(basename "$0") <pypi_username>
|
||||
|
||||
Deletes all but the most recent release from the gtsam-develop project on PyPI.
|
||||
You must supply the PyPI user name that has owner or maintainer privileges on
|
||||
the project. THIS OPERATION IS PERMANENT.
|
||||
|
||||
Examples
|
||||
$ $(basename "$0") yambati3
|
||||
$ $(basename "$0") # will prompt for user name
|
||||
EOF
|
||||
}
|
||||
|
||||
if [[ $# -ge 1 ]]; then
|
||||
PYPI_USER="$1"
|
||||
else
|
||||
read -rp "Enter your PyPI user name: " PYPI_USER
|
||||
[[ -z "$PYPI_USER" ]] && { echo "No user name supplied."; usage; exit 1; }
|
||||
fi
|
||||
|
||||
echo "-----------------------------------------------------------------------"
|
||||
echo "WARNING: This WILL permanently delete all but the most recent release"
|
||||
echo " of 'gtsam-develop' on PyPI for user '$PYPI_USER'."
|
||||
echo " This cannot be undone."
|
||||
echo "-----------------------------------------------------------------------"
|
||||
read -rp "Proceed? [y/N]: " REPLY
|
||||
REPLY=${REPLY,,} # to lowercase
|
||||
[[ "$REPLY" != "y" && "$REPLY" != "yes" ]] && { echo "Aborted."; exit 0; }
|
||||
|
||||
echo "Running pypi_cleanup for user '$PYPI_USER'..."
|
||||
python3 -m pypi_cleanup.__init__ \
|
||||
-p gtsam-develop \
|
||||
--leave-most-recent-only \
|
||||
--do-it \
|
||||
-u "$PYPI_USER"
|
||||
|
||||
echo "Done."
|
|
@ -0,0 +1,77 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file IEKF_Rot3Example.cpp
|
||||
* @brief Left‐Invariant EKF on SO(3) with state‐dependent pitch/roll control
|
||||
* and a single magnetometer update.
|
||||
* @date April 25, 2025
|
||||
* @authors Scott Baker, Matt Kielo, Frank Dellaert
|
||||
*/
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/OptionalJacobian.h>
|
||||
#include <gtsam/geometry/Rot3.h>
|
||||
#include <gtsam/navigation/LieGroupEKF.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
// --- 1) Closed‐loop dynamics f(X): xi = –k·[φx,φy,0], H = ∂xi/∂φ·Dφ ---
|
||||
static constexpr double k = 0.5;
|
||||
Vector3 dynamicsSO3(const Rot3& X, OptionalJacobian<3, 3> H = {}) {
|
||||
// φ = Logmap(R), Dφ = ∂φ/∂δR
|
||||
Matrix3 D_phi;
|
||||
Vector3 phi = Rot3::Logmap(X, D_phi);
|
||||
// zero out yaw
|
||||
phi[2] = 0.0;
|
||||
D_phi.row(2).setZero();
|
||||
|
||||
if (H) *H = -k * D_phi; // ∂(–kφ)/∂δR
|
||||
return -k * phi; // xi ∈ 𝔰𝔬(3)
|
||||
}
|
||||
|
||||
// --- 2) Magnetometer model: z = R⁻¹ m, H = –[z]_× ---
|
||||
static const Vector3 m_world(0, 0, -1);
|
||||
Vector3 h_mag(const Rot3& X, OptionalJacobian<3, 3> H = {}) {
|
||||
Vector3 z = X.inverse().rotate(m_world);
|
||||
if (H) *H = -skewSymmetric(z);
|
||||
return z;
|
||||
}
|
||||
|
||||
int main() {
|
||||
// Initial estimate (identity) and covariance
|
||||
const Rot3 R0 = Rot3::RzRyRx(0.1, -0.2, 0.3);
|
||||
const Matrix3 P0 = Matrix3::Identity() * 0.1;
|
||||
LieGroupEKF<Rot3> ekf(R0, P0);
|
||||
|
||||
// Timestep, process noise, measurement noise
|
||||
double dt = 0.1;
|
||||
Matrix3 Q = Matrix3::Identity() * 0.01;
|
||||
Matrix3 Rm = Matrix3::Identity() * 0.05;
|
||||
|
||||
cout << "=== Init ===\nR:\n"
|
||||
<< ekf.state().matrix() << "\nP:\n"
|
||||
<< ekf.covariance() << "\n\n";
|
||||
|
||||
// Predict using state‐dependent f
|
||||
ekf.predict(dynamicsSO3, dt, Q);
|
||||
cout << "--- After predict ---\nR:\n" << ekf.state().matrix() << "\n\n";
|
||||
|
||||
// Magnetometer measurement = body‐frame reading of m_world
|
||||
Vector3 z = h_mag(R0);
|
||||
ekf.update(h_mag, z, Rm);
|
||||
cout << "--- After update ---\nR:\n" << ekf.state().matrix() << "\n";
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file IEKF_NavstateExample.cpp
|
||||
* @brief InvariantEKF on NavState (SE_2(3)) with IMU (predict) and GPS (update)
|
||||
* @date April 25, 2025
|
||||
* @authors Scott Baker, Matt Kielo, Frank Dellaert
|
||||
*/
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/OptionalJacobian.h>
|
||||
#include <gtsam/navigation/InvariantEKF.h>
|
||||
#include <gtsam/navigation/NavState.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
/**
|
||||
* @brief Left-invariant dynamics for NavState.
|
||||
* @param imu 6×1 vector [a; ω]: linear acceleration and angular velocity.
|
||||
* @return 9×1 tangent: [ω; 0₃; a].
|
||||
*/
|
||||
Vector9 dynamics(const Vector6& imu) {
|
||||
auto a = imu.head<3>();
|
||||
auto w = imu.tail<3>();
|
||||
Vector9 xi;
|
||||
xi << w, Vector3::Zero(), a;
|
||||
return xi;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief GPS measurement model: returns position and its Jacobian.
|
||||
* @param X Current state.
|
||||
* @param H Optional 3×9 Jacobian w.r.t. X.
|
||||
* @return 3×1 position vector.
|
||||
*/
|
||||
Vector3 h_gps(const NavState& X, OptionalJacobian<3, 9> H = {}) {
|
||||
return X.position(H);
|
||||
}
|
||||
|
||||
int main() {
|
||||
// Initial state & covariances
|
||||
NavState X0; // R=I, v=0, t=0
|
||||
Matrix9 P0 = Matrix9::Identity() * 0.1;
|
||||
InvariantEKF<NavState> ekf(X0, P0);
|
||||
|
||||
// Noise & timestep
|
||||
double dt = 1.0;
|
||||
Matrix9 Q = Matrix9::Identity() * 0.01;
|
||||
Matrix3 R = Matrix3::Identity() * 0.5;
|
||||
|
||||
// Two IMU samples [a; ω]
|
||||
Vector6 imu1;
|
||||
imu1 << 0.1, 0, 0, 0, 0.2, 0;
|
||||
Vector6 imu2;
|
||||
imu2 << 0, 0.3, 0, 0.4, 0, 0;
|
||||
|
||||
// Two GPS fixes
|
||||
Vector3 z1;
|
||||
z1 << 0.3, 0, 0;
|
||||
Vector3 z2;
|
||||
z2 << 0.6, 0, 0;
|
||||
|
||||
cout << "=== Init ===\nX: " << ekf.state() << "\nP: " << ekf.covariance()
|
||||
<< "\n\n";
|
||||
|
||||
// --- first predict/update ---
|
||||
ekf.predict(dynamics(imu1), dt, Q);
|
||||
cout << "--- After predict 1 ---\nX: " << ekf.state()
|
||||
<< "\nP: " << ekf.covariance() << "\n\n";
|
||||
ekf.update(h_gps, z1, R);
|
||||
cout << "--- After update 1 ---\nX: " << ekf.state()
|
||||
<< "\nP: " << ekf.covariance() << "\n\n";
|
||||
|
||||
// --- second predict/update ---
|
||||
ekf.predict(dynamics(imu2), dt, Q);
|
||||
cout << "--- After predict 2 ---\nX: " << ekf.state()
|
||||
<< "\nP: " << ekf.covariance() << "\n\n";
|
||||
ekf.update(h_gps, z2, R);
|
||||
cout << "--- After update 2 ---\nX: " << ekf.state()
|
||||
<< "\nP: " << ekf.covariance() << "\n";
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,119 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file IEKF_SE2Example.cpp
|
||||
* @brief A left invariant Extended Kalman Filter example using a Lie group
|
||||
* odometry as the prediction stage on SE(2) and
|
||||
*
|
||||
* This example uses the templated InvariantEKF class to estimate the state of
|
||||
* an object using odometry / GPS measurements The prediction stage of the
|
||||
* InvariantEKF uses a Lie Group element to propagate the stage in a discrete
|
||||
* InvariantEKF. For most cases, U = exp(u^ * dt) if u is a control vector that
|
||||
* is constant over the interval dt. However, if u is not constant over dt,
|
||||
* other approaches are needed to find the value of U. This approach simply
|
||||
* takes a Lie group element U, which can be found in various different ways.
|
||||
*
|
||||
* This data was compared to a left invariant EKF on SE(2) using identical
|
||||
* measurements and noise from the source of the InEKF plugin
|
||||
* https://inekf.readthedocs.io/en/latest/ Based on the paper "An Introduction
|
||||
* to the Invariant Extended Kalman Filter" by Easton R. Potokar, Randal W.
|
||||
* Beard, and Joshua G. Mangelson
|
||||
*
|
||||
* @date Apr 25, 2025
|
||||
* @authors Scott Baker, Matt Kielo, Frank Dellaert
|
||||
*/
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/navigation/InvariantEKF.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
// Create a 2D GPS measurement function that returns the predicted measurement h
|
||||
// and Jacobian H. The predicted measurement h is the translation of the state
|
||||
// X.
|
||||
Vector2 h_gps(const Pose2& X, OptionalJacobian<2, 3> H = {}) {
|
||||
return X.translation(H);
|
||||
}
|
||||
|
||||
// Define a InvariantEKF class that uses the Pose2 Lie group as the state and
|
||||
// the Vector2 measurement type.
|
||||
int main() {
|
||||
// // Initialize the filter's state, covariance, and time interval values.
|
||||
Pose2 X0(0.0, 0.0, 0.0);
|
||||
Matrix3 P0 = Matrix3::Identity() * 0.1;
|
||||
|
||||
// Create the filter with the initial state, covariance, and measurement
|
||||
// function.
|
||||
InvariantEKF<Pose2> ekf(X0, P0);
|
||||
|
||||
// Define the process covariance and measurement covariance matrices Q and R.
|
||||
Matrix3 Q = (Vector3(0.05, 0.05, 0.001)).asDiagonal();
|
||||
Matrix2 R = I_2x2 * 0.01;
|
||||
|
||||
// Define odometry movements.
|
||||
// U1: Move 1 unit in X, 1 unit in Y, 0.5 radians in theta.
|
||||
// U2: Move 1 unit in X, 1 unit in Y, 0 radians in theta.
|
||||
Pose2 U1(1.0, 1.0, 0.5), U2(1.0, 1.0, 0.0);
|
||||
|
||||
// Create GPS measurements.
|
||||
// z1: Measure robot at (1, 0)
|
||||
// z2: Measure robot at (1, 1)
|
||||
Vector2 z1, z2;
|
||||
z1 << 1.0, 0.0;
|
||||
z2 << 1.0, 1.0;
|
||||
|
||||
// Define a transformation matrix to convert the covariance into (theta, x, y)
|
||||
// form. The paper and data mentioned above uses a (theta, x, y) notation,
|
||||
// whereas GTSAM uses (x, y, theta). The transformation matrix is used to
|
||||
// directly compare results of the covariance matrix.
|
||||
Matrix3 TransformP;
|
||||
TransformP << 0, 0, 1, 1, 0, 0, 0, 1, 0;
|
||||
|
||||
// Propagating/updating the filter
|
||||
// Initial state and covariance
|
||||
cout << "\nInitialization:\n";
|
||||
cout << "X0: " << ekf.state() << endl;
|
||||
cout << "P0: " << TransformP * ekf.covariance() * TransformP.transpose()
|
||||
<< endl;
|
||||
|
||||
// First prediction stage
|
||||
ekf.predict(U1, Q);
|
||||
cout << "\nFirst Prediction:\n";
|
||||
cout << "X: " << ekf.state() << endl;
|
||||
cout << "P: " << TransformP * ekf.covariance() * TransformP.transpose()
|
||||
<< endl;
|
||||
|
||||
// First update stage
|
||||
ekf.update(h_gps, z1, R);
|
||||
cout << "\nFirst Update:\n";
|
||||
cout << "X: " << ekf.state() << endl;
|
||||
cout << "P: " << TransformP * ekf.covariance() * TransformP.transpose()
|
||||
<< endl;
|
||||
|
||||
// Second prediction stage
|
||||
ekf.predict(U2, Q);
|
||||
cout << "\nSecond Prediction:\n";
|
||||
cout << "X: " << ekf.state() << endl;
|
||||
cout << "P: " << TransformP * ekf.covariance() * TransformP.transpose()
|
||||
<< endl;
|
||||
|
||||
// Second update stage
|
||||
ekf.update(h_gps, z2, R);
|
||||
cout << "\nSecond Update:\n";
|
||||
cout << "X: " << ekf.state() << endl;
|
||||
cout << "P: " << TransformP * ekf.covariance() * TransformP.transpose()
|
||||
<< endl;
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -27,8 +27,6 @@
|
|||
#include <gtsam/slam/dataset.h>
|
||||
#include <time.h>
|
||||
|
||||
#include <boost/algorithm/string/classification.hpp>
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
|
|
@ -169,7 +169,7 @@ namespace internal {
|
|||
/// To use this for your gtsam type, define:
|
||||
/// template<> struct traits<Class> : public internal::LieGroupTraits<Class> {};
|
||||
/// Assumes existence of: identity, dimension, localCoordinates, retract,
|
||||
/// and additionally Logmap, Expmap, compose, between, and inverse
|
||||
/// and additionally Logmap, Expmap, AdjointMap, compose, between, and inverse
|
||||
template<class Class>
|
||||
struct LieGroupTraits : public GetDimensionImpl<Class, Class::dimension> {
|
||||
using structure_category = lie_group_tag;
|
||||
|
@ -183,18 +183,20 @@ struct LieGroupTraits: public GetDimensionImpl<Class, Class::dimension> {
|
|||
/// @name Manifold
|
||||
/// @{
|
||||
using ManifoldType = Class;
|
||||
// Note: Class::dimension can be an int or Eigen::Dynamic.
|
||||
// GetDimensionImpl handles resolving this to a static value or providing GetDimension(obj).
|
||||
inline constexpr static auto dimension = Class::dimension;
|
||||
using TangentVector = Eigen::Matrix<double, dimension, 1>;
|
||||
using ChartJacobian = OptionalJacobian<dimension, dimension>;
|
||||
|
||||
static TangentVector Local(const Class& origin, const Class& other,
|
||||
ChartJacobian Horigin = {}, ChartJacobian Hother = {}) {
|
||||
return origin.localCoordinates(other, Horigin, Hother);
|
||||
ChartJacobian H1 = {}, ChartJacobian H2 = {}) {
|
||||
return origin.localCoordinates(other, H1, H2);
|
||||
}
|
||||
|
||||
static Class Retract(const Class& origin, const TangentVector& v,
|
||||
ChartJacobian Horigin = {}, ChartJacobian Hv = {}) {
|
||||
return origin.retract(v, Horigin, Hv);
|
||||
ChartJacobian H = {}, ChartJacobian Hv = {}) {
|
||||
return origin.retract(v, H, Hv);
|
||||
}
|
||||
/// @}
|
||||
|
||||
|
@ -222,6 +224,13 @@ struct LieGroupTraits: public GetDimensionImpl<Class, Class::dimension> {
|
|||
ChartJacobian H = {}) {
|
||||
return m.inverse(H);
|
||||
}
|
||||
|
||||
static Eigen::Matrix<double, dimension, dimension> AdjointMap(const Class& m) {
|
||||
// This assumes that the Class itself provides a member function `AdjointMap()`
|
||||
// For dynamically-sized types (dimension == Eigen::Dynamic),
|
||||
// m.AdjointMap() must return a gtsam::Matrix of the correct runtime dimensions.
|
||||
return m.AdjointMap();
|
||||
}
|
||||
/// @}
|
||||
};
|
||||
|
||||
|
@ -277,6 +286,10 @@ inline Class expmap_default(const Class& t, const Vector& d) {
|
|||
template<typename T>
|
||||
class IsLieGroup: public IsGroup<T>, public IsManifold<T> {
|
||||
public:
|
||||
// Concept marker: allows checking IsLieGroup<T>::value in templates
|
||||
static constexpr bool value =
|
||||
std::is_base_of<lie_group_tag, typename traits<T>::structure_category>::value;
|
||||
|
||||
typedef typename traits<T>::structure_category structure_category_tag;
|
||||
typedef typename traits<T>::ManifoldType ManifoldType;
|
||||
typedef typename traits<T>::TangentVector TangentVector;
|
||||
|
@ -284,7 +297,7 @@ public:
|
|||
|
||||
GTSAM_CONCEPT_USAGE(IsLieGroup) {
|
||||
static_assert(
|
||||
(std::is_base_of<lie_group_tag, structure_category_tag>::value),
|
||||
value,
|
||||
"This type's trait does not assert it is a Lie group (or derived)");
|
||||
|
||||
// group operations with Jacobians
|
||||
|
|
|
@ -138,10 +138,13 @@ public:
|
|||
static const int dim = traits<T>::dimension;
|
||||
typedef typename traits<T>::ManifoldType ManifoldType;
|
||||
typedef typename traits<T>::TangentVector TangentVector;
|
||||
// Concept marker: allows checking IsManifold<T>::value in templates
|
||||
static constexpr bool value =
|
||||
std::is_base_of<manifold_tag, structure_category_tag>::value;
|
||||
|
||||
GTSAM_CONCEPT_USAGE(IsManifold) {
|
||||
static_assert(
|
||||
(std::is_base_of<manifold_tag, structure_category_tag>::value),
|
||||
value,
|
||||
"This type's structure_category trait does not assert it as a manifold (or derived)");
|
||||
static_assert(TangentVector::SizeAtCompileTime == dim);
|
||||
|
||||
|
|
|
@ -82,12 +82,12 @@ struct VectorSpaceImpl {
|
|||
return -v;
|
||||
}
|
||||
|
||||
static LieAlgebra Hat(const TangentVector& v) {
|
||||
return v;
|
||||
}
|
||||
static LieAlgebra Hat(const TangentVector& v) { return v; }
|
||||
|
||||
static TangentVector Vee(const LieAlgebra& X) {
|
||||
return X;
|
||||
static TangentVector Vee(const LieAlgebra& X) { return X; }
|
||||
|
||||
static Jacobian AdjointMap(const Class& /*m*/) {
|
||||
return Jacobian::Identity();
|
||||
}
|
||||
/// @}
|
||||
};
|
||||
|
@ -141,8 +141,7 @@ struct VectorSpaceImpl<Class,Eigen::Dynamic> {
|
|||
|
||||
static Class Expmap(const TangentVector& v, ChartJacobian Hv = {}) {
|
||||
Class result(v);
|
||||
if (Hv)
|
||||
*Hv = Eye(v);
|
||||
if (Hv) *Hv = Eye(v);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -165,6 +164,7 @@ struct VectorSpaceImpl<Class,Eigen::Dynamic> {
|
|||
return -v;
|
||||
}
|
||||
|
||||
static Eigen::MatrixXd AdjointMap(const Class& m) { return Eye(m); }
|
||||
/// @}
|
||||
};
|
||||
|
||||
|
@ -273,8 +273,8 @@ struct ScalarTraits : VectorSpaceImpl<Scalar, 1> {
|
|||
if (H) (*H)[0] = 1.0;
|
||||
return v[0];
|
||||
}
|
||||
// AdjointMap for ScalarTraits is inherited from VectorSpaceImpl<Scalar, 1>
|
||||
/// @}
|
||||
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
@ -352,6 +352,9 @@ struct traits<Eigen::Matrix<double, M, N, Options, MaxRows, MaxCols> > :
|
|||
if (H) *H = Jacobian::Identity();
|
||||
return m + Eigen::Map<const Fixed>(v.data());
|
||||
}
|
||||
|
||||
// AdjointMap for fixed-size Eigen matrices is inherited from
|
||||
// internal::VectorSpaceImpl< Eigen::Matrix<double, M, N, ...> , M*N >
|
||||
/// @}
|
||||
};
|
||||
|
||||
|
@ -425,7 +428,7 @@ struct DynamicTraits {
|
|||
static TangentVector Logmap(const Dynamic& m, ChartJacobian H = {}) {
|
||||
if (H) *H = Eye(m);
|
||||
TangentVector result(GetDimension(m));
|
||||
Eigen::Map<Dynamic>(result.data(), m.cols(), m.rows()) = m;
|
||||
Eigen::Map<Dynamic>(result.data(), m.rows(), m.cols()) = m;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -461,6 +464,7 @@ struct DynamicTraits {
|
|||
return X;
|
||||
}
|
||||
|
||||
static Jacobian AdjointMap(const Dynamic& m) { return Eye(m); }
|
||||
/// @}
|
||||
|
||||
};
|
||||
|
@ -506,4 +510,3 @@ private:
|
|||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file DiscreteMarginals.cpp
|
||||
* @brief A class for computing marginals in a DiscreteFactorGraph
|
||||
* @author Abhijit Kundu
|
||||
* @author Richard Roberts
|
||||
* @author Varun Agrawal
|
||||
* @author Frank Dellaert
|
||||
* @date June 4, 2012
|
||||
*/
|
||||
|
||||
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
DiscreteMarginals::DiscreteMarginals(const DiscreteFactorGraph& graph) {
|
||||
bayesTree_ = graph.eliminateMultifrontal();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DiscreteFactor::shared_ptr DiscreteMarginals::operator()(Key variable) const {
|
||||
// Compute marginal
|
||||
DiscreteFactor::shared_ptr marginalFactor =
|
||||
bayesTree_->marginalFactor(variable, &EliminateDiscrete);
|
||||
return marginalFactor;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Vector DiscreteMarginals::marginalProbabilities(const DiscreteKey& key) const {
|
||||
// Compute marginal
|
||||
DiscreteFactor::shared_ptr marginalFactor = this->operator()(key.first);
|
||||
|
||||
// Create result
|
||||
Vector vResult(key.second);
|
||||
for (size_t state = 0; state < key.second; ++state) {
|
||||
DiscreteValues values;
|
||||
values[key.first] = state;
|
||||
vResult(state) = (*marginalFactor)(values);
|
||||
}
|
||||
return vResult;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void DiscreteMarginals::print(const std::string& s,
|
||||
const KeyFormatter formatter) const {
|
||||
std::cout << (s.empty() ? "Discrete Marginals of:" : s + " ") << std::endl;
|
||||
bayesTree_->print("", formatter);
|
||||
}
|
||||
|
||||
} /* namespace gtsam */
|
|
@ -14,6 +14,7 @@
|
|||
* @brief A class for computing marginals in a DiscreteFactorGraph
|
||||
* @author Abhijit Kundu
|
||||
* @author Richard Roberts
|
||||
* @author Varun Agrawal
|
||||
* @author Frank Dellaert
|
||||
* @date June 4, 2012
|
||||
*/
|
||||
|
@ -30,7 +31,7 @@ namespace gtsam {
|
|||
* A class for computing marginals of variables in a DiscreteFactorGraph
|
||||
* @ingroup discrete
|
||||
*/
|
||||
class DiscreteMarginals {
|
||||
class GTSAM_EXPORT DiscreteMarginals {
|
||||
protected:
|
||||
DiscreteBayesTree::shared_ptr bayesTree_;
|
||||
|
||||
|
@ -41,35 +42,20 @@ class DiscreteMarginals {
|
|||
* @param graph The factor graph defining the full joint
|
||||
* distribution on all variables.
|
||||
*/
|
||||
DiscreteMarginals(const DiscreteFactorGraph& graph) {
|
||||
bayesTree_ = graph.eliminateMultifrontal();
|
||||
}
|
||||
DiscreteMarginals(const DiscreteFactorGraph& graph);
|
||||
|
||||
/** Compute the marginal of a single variable */
|
||||
DiscreteFactor::shared_ptr operator()(Key variable) const {
|
||||
// Compute marginal
|
||||
DiscreteFactor::shared_ptr marginalFactor =
|
||||
bayesTree_->marginalFactor(variable, &EliminateDiscrete);
|
||||
return marginalFactor;
|
||||
}
|
||||
DiscreteFactor::shared_ptr operator()(Key variable) const;
|
||||
|
||||
/** Compute the marginal of a single variable
|
||||
* @param key DiscreteKey of the Variable
|
||||
* @return Vector of marginal probabilities
|
||||
*/
|
||||
Vector marginalProbabilities(const DiscreteKey& key) const {
|
||||
// Compute marginal
|
||||
DiscreteFactor::shared_ptr marginalFactor = this->operator()(key.first);
|
||||
Vector marginalProbabilities(const DiscreteKey& key) const;
|
||||
|
||||
// Create result
|
||||
Vector vResult(key.second);
|
||||
for (size_t state = 0; state < key.second; ++state) {
|
||||
DiscreteValues values;
|
||||
values[key.first] = state;
|
||||
vResult(state) = (*marginalFactor)(values);
|
||||
}
|
||||
return vResult;
|
||||
}
|
||||
/// Print details
|
||||
void print(const std::string& s = "",
|
||||
const KeyFormatter formatter = DefaultKeyFormatter) const;
|
||||
};
|
||||
|
||||
} /* namespace gtsam */
|
||||
|
|
|
@ -494,4 +494,18 @@ class DiscreteSearch {
|
|||
std::vector<gtsam::DiscreteSearchSolution> run(size_t K = 1) const;
|
||||
};
|
||||
|
||||
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||
|
||||
class DiscreteMarginals {
|
||||
DiscreteMarginals();
|
||||
DiscreteMarginals(const gtsam::DiscreteFactorGraph& graph);
|
||||
|
||||
gtsam::DiscreteFactor* operator()(gtsam::Key variable) const;
|
||||
gtsam::Vector marginalProbabilities(const gtsam::DiscreteKey& key) const;
|
||||
|
||||
void print(const std::string& s = "",
|
||||
const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -11,8 +11,15 @@
|
|||
|
||||
/**
|
||||
* @file RegularHessianFactor.h
|
||||
* @brief HessianFactor class with constant sized blocks
|
||||
* @brief Specialized HessianFactor class for regular problems (fixed-size blocks).
|
||||
* @details This factor is specifically designed for quadratic cost functions
|
||||
* where all variables involved have the same, fixed dimension `D`.
|
||||
* It stores the Hessian and gradient terms and provides optimized
|
||||
* methods for operations like Hessian-vector products, crucial for
|
||||
* iterative solvers like Conjugate Gradient. It ensures that all
|
||||
* involved blocks have the expected dimension `D`.
|
||||
* @author Sungtae An
|
||||
* @author Frank Dellaert
|
||||
* @date March 2014
|
||||
*/
|
||||
|
||||
|
@ -20,10 +27,32 @@
|
|||
|
||||
#include <gtsam/linear/HessianFactor.h>
|
||||
#include <gtsam/linear/RegularJacobianFactor.h>
|
||||
#include <gtsam/linear/VectorValues.h> // For VectorValues and Scatter
|
||||
#include <vector>
|
||||
#include <stdexcept> // For std::invalid_argument
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* @brief A HessianFactor where all variables have the same dimension D.
|
||||
* @details Represents a quadratic factor \f$ E(x) = 0.5 x^T G x - g^T x + 0.5 f \f$,
|
||||
* corresponding to a Gaussian probability density \f$ P(x) \propto \exp(-E(x)) \f$.
|
||||
* Here, G is the Hessian (or information matrix), g is the gradient vector,
|
||||
* and f is a constant term (negative log-likelihood at x=0).
|
||||
*
|
||||
* This class is templated on the dimension `D` and enforces that all variables
|
||||
* associated with this factor have this dimension during construction.
|
||||
* It inherits from HessianFactor but provides specialized, efficient implementations
|
||||
* for operations like `multiplyHessianAdd` using raw memory access, assuming
|
||||
* a regular block structure. This can significantly improve performance in
|
||||
* iterative optimization algorithms when dealing with many variables of the
|
||||
* same type and dimension (e.g., Pose3 variables in SLAM).
|
||||
*
|
||||
* It can be constructed directly from Hessian blocks, from a
|
||||
* RegularJacobianFactor, or by linearizing a NonlinearFactorGraph.
|
||||
*
|
||||
* @tparam D The dimension of each variable block involved in the factor.
|
||||
*/
|
||||
template<size_t D>
|
||||
class RegularHessianFactor : public HessianFactor {
|
||||
|
||||
|
@ -32,9 +61,14 @@ public:
|
|||
typedef Eigen::Matrix<double, D, 1> VectorD;
|
||||
typedef Eigen::Matrix<double, D, D> MatrixD;
|
||||
|
||||
/** Construct an n-way factor. Gs contains the upper-triangle blocks of the
|
||||
* quadratic term (the Hessian matrix) provided in row-order, gs the pieces
|
||||
* of the linear vector term, and f the constant term.
|
||||
/**
|
||||
* @brief Construct an n-way factor from supplied components.
|
||||
* @details Represents the energy function \f$ E(x) = 0.5 \sum_{i,j} x_i^T G_{ij} x_j - \sum_i g_i^T x_i + 0.5 f \f$.
|
||||
* Note that the keys in `js` must correspond order-wise to the blocks in `Gs` and `gs`.
|
||||
* @param js Vector of keys involved in the factor.
|
||||
* @param Gs Vector of upper-triangular Hessian blocks (row-major order, e.g., G11, G12, G13, G22, G23, G33 for a ternary factor).
|
||||
* @param gs Vector of gradient vector blocks (-J^T b).
|
||||
* @param f Constant term \f$ 0.5*f = 0.5*\|b\|^2 \f$.
|
||||
*/
|
||||
RegularHessianFactor(const KeyVector& js,
|
||||
const std::vector<Matrix>& Gs, const std::vector<Vector>& gs, double f) :
|
||||
|
@ -62,8 +96,14 @@ public:
|
|||
HessianFactor(j1, j2, j3, G11, G12, G13, g1, G22, G23, g2, G33, g3, f) {
|
||||
}
|
||||
|
||||
/** Constructor with an arbitrary number of keys and with the augmented information matrix
|
||||
* specified as a block matrix. */
|
||||
/**
|
||||
* @brief Constructor with an arbitrary number of keys and the augmented information matrix
|
||||
* specified as a block matrix.
|
||||
* @details The augmented information matrix contains the Hessian blocks G and the
|
||||
* gradient blocks g, arranged as \f$ [ G, -g ; -g^T, f ] \f$.
|
||||
* @param keys Container of keys specifying the variables involved and their order.
|
||||
* @param augmentedInformation The augmented information matrix [H -g; -g' f], laid out as a SymmetricBlockMatrix.
|
||||
*/
|
||||
template<typename KEYS>
|
||||
RegularHessianFactor(const KEYS& keys,
|
||||
const SymmetricBlockMatrix& augmentedInformation) :
|
||||
|
@ -71,18 +111,35 @@ public:
|
|||
checkInvariants();
|
||||
}
|
||||
|
||||
/// Construct from RegularJacobianFactor
|
||||
/**
|
||||
* @brief Construct a RegularHessianFactor from a RegularJacobianFactor.
|
||||
* @details Computes \f$ G = J^T J \f$, \f$ g = J^T b \f$, and \f$ f = b^T b \f$.
|
||||
* Requires that the JacobianFactor also has regular block sizes matching `D`.
|
||||
* @param jf The RegularJacobianFactor to convert.
|
||||
*/
|
||||
RegularHessianFactor(const RegularJacobianFactor<D>& jf)
|
||||
: HessianFactor(jf) {}
|
||||
: HessianFactor(jf) {
|
||||
}
|
||||
|
||||
/// Construct from a GaussianFactorGraph
|
||||
/**
|
||||
* @brief Construct from a GaussianFactorGraph combined using a Scatter.
|
||||
* @details This is typically used internally by optimizers. It sums the Hessian
|
||||
* contributions from multiple factors in the graph.
|
||||
* @param factors The graph containing factors to combine.
|
||||
* @param scatter A Scatter structure indicating the layout of variables.
|
||||
*/
|
||||
RegularHessianFactor(const GaussianFactorGraph& factors,
|
||||
const Scatter& scatter)
|
||||
: HessianFactor(factors, scatter) {
|
||||
checkInvariants();
|
||||
}
|
||||
|
||||
/// Construct from a GaussianFactorGraph
|
||||
/**
|
||||
* @brief Construct from a GaussianFactorGraph.
|
||||
* @details This is typically used internally by optimizers. It sums the Hessian
|
||||
* contributions from multiple factors in the graph. Assumes keys are ordered 0..n-1.
|
||||
* @param factors The graph containing factors to combine.
|
||||
*/
|
||||
RegularHessianFactor(const GaussianFactorGraph& factors)
|
||||
: HessianFactor(factors) {
|
||||
checkInvariants();
|
||||
|
@ -90,128 +147,194 @@ public:
|
|||
|
||||
private:
|
||||
|
||||
/// Check invariants after construction
|
||||
void checkInvariants() {
|
||||
if (info_.cols() != 1 + (info_.nBlocks()-1) * (DenseIndex)D)
|
||||
/**
|
||||
* @brief Check if the factor has regular block structure.
|
||||
* @details Verifies that the dimensions of the stored augmented information matrix
|
||||
* `info_` correspond to the expected size based on the number of keys and the
|
||||
* fixed block dimension `D`. Throws `std::invalid_argument` if the structure
|
||||
* is not regular. This is called internally by constructors.
|
||||
*/
|
||||
void checkInvariants() const {
|
||||
if (info_.cols() != 0 && // Allow zero-key factors (e.g. priors on anchor nodes)
|
||||
info_.cols() != 1 + (info_.nBlocks() - 1) * static_cast<DenseIndex>(D))
|
||||
throw std::invalid_argument(
|
||||
"RegularHessianFactor constructor was given non-regular factors");
|
||||
"RegularHessianFactor constructor was given non-regular factors or "
|
||||
"incorrect template dimension D");
|
||||
}
|
||||
|
||||
// Use Eigen magic to access raw memory
|
||||
// Use Eigen magic to access raw memory for efficiency in Hessian products
|
||||
typedef Eigen::Map<VectorD> DMap;
|
||||
typedef Eigen::Map<const VectorD> ConstDMap;
|
||||
|
||||
// Scratch space for multiplyHessianAdd
|
||||
// Scratch space for multiplyHessianAdd to avoid re-allocation
|
||||
// According to link below this is thread-safe.
|
||||
// http://stackoverflow.com/questions/11160964/multiple-copies-of-the-same-object-c-thread-safe
|
||||
mutable std::vector<VectorD> y_;
|
||||
mutable std::vector<VectorD, Eigen::aligned_allocator<VectorD>> y_;
|
||||
|
||||
public:
|
||||
|
||||
/** y += alpha * A'*A*x */
|
||||
/**
|
||||
* @brief Multiply the Hessian part of the factor times a VectorValues `x` and add the result to `y`.
|
||||
* @details Computes `y += alpha * H * x`.
|
||||
* @param alpha Scalar multiplier.
|
||||
* @param x VectorValues containing the vector to multiply with.
|
||||
* @param[in,out] y VectorValues to store the result (accumulates).
|
||||
*/
|
||||
void multiplyHessianAdd(double alpha, const VectorValues& x,
|
||||
VectorValues& y) const override {
|
||||
// Note: This implementation just calls the base class. The raw memory versions
|
||||
// below are specifically optimized for the regular structure of this class.
|
||||
// Consider using those directly or ensuring the base class implementation
|
||||
// is efficient enough for your use case if calling this version.
|
||||
HessianFactor::multiplyHessianAdd(alpha, x, y);
|
||||
}
|
||||
|
||||
/** y += alpha * A'*A*x */
|
||||
/**
|
||||
* @brief Multiply the Hessian part of the factor times a raw vector `x` and add the result to `y`. (Raw memory version)
|
||||
* @details Computes `y += alpha * H * x`, optimized for regular block structure.
|
||||
* Assumes `x` and `yvalues` are pre-allocated, contiguous memory blocks
|
||||
* where variable `j` corresponds to memory chunk `x + keys_[j] * D`
|
||||
* and `yvalues + keys_[j] * D`.
|
||||
* @param alpha Scalar multiplier.
|
||||
* @param x Raw pointer to the input vector data.
|
||||
* @param[in,out] yvalues Raw pointer to the output vector data (accumulates).
|
||||
*/
|
||||
void multiplyHessianAdd(double alpha, const double* x,
|
||||
double* yvalues) const {
|
||||
// Create a vector of temporary y_ values, corresponding to rows i
|
||||
y_.resize(size());
|
||||
// Ensure scratch space is properly sized
|
||||
const size_t n = size();
|
||||
if (y_.size() != n) {
|
||||
y_.resize(n);
|
||||
}
|
||||
for (VectorD& yi : y_)
|
||||
yi.setZero();
|
||||
|
||||
// Accessing the VectorValues one by one is expensive
|
||||
// So we will loop over columns to access x only once per column
|
||||
// And fill the above temporary y_ values, to be added into yvalues after
|
||||
VectorD xj(D);
|
||||
for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
|
||||
Key key = keys_[j];
|
||||
const double* xj = x + key * D;
|
||||
// Loop over columns (j) of the Hessian H=info_
|
||||
// Accessing x only once per column j
|
||||
// Fill temporary y_ vector corresponding to rows i
|
||||
for (DenseIndex j = 0; j < static_cast<DenseIndex>(n); ++j) {
|
||||
Key key_j = keys_[j];
|
||||
ConstDMap xj(x + key_j * D); // Map to the j-th block of x
|
||||
|
||||
DenseIndex i = 0;
|
||||
for (; i < j; ++i)
|
||||
y_[i] += info_.aboveDiagonalBlock(i, j) * ConstDMap(xj);
|
||||
// blocks on the diagonal are only half
|
||||
y_[i] += info_.diagonalBlock(j) * ConstDMap(xj);
|
||||
// for below diagonal, we take transpose block from upper triangular part
|
||||
for (i = j + 1; i < (DenseIndex) size(); ++i)
|
||||
y_[i] += info_.aboveDiagonalBlock(j, i).transpose() * ConstDMap(xj);
|
||||
// Off-diagonal blocks G_ij * x_j for i < j
|
||||
for (; i < j; ++i) {
|
||||
y_[i] += info_.aboveDiagonalBlock(i, j) * xj;
|
||||
}
|
||||
|
||||
// copy to yvalues
|
||||
for (DenseIndex i = 0; i < (DenseIndex) size(); ++i) {
|
||||
Key key = keys_[i];
|
||||
DMap(yvalues + key * D) += alpha * y_[i];
|
||||
// Diagonal block G_jj * x_j
|
||||
y_[i] += info_.diagonalBlock(j) * xj;
|
||||
// Off-diagonal blocks G_ij * x_j for i > j (using transpose G_ji^T * x_j)
|
||||
for (i = j + 1; i < static_cast<DenseIndex>(n); ++i) {
|
||||
y_[i] += info_.aboveDiagonalBlock(j, i).transpose() * xj;
|
||||
}
|
||||
}
|
||||
|
||||
/// Raw memory version, with offsets TODO document reasoning
|
||||
// Add accumulated results from y_ to the output yvalues
|
||||
for (DenseIndex i = 0; i < static_cast<DenseIndex>(n); ++i) {
|
||||
Key key_i = keys_[i];
|
||||
DMap map_yi(yvalues + key_i * D); // Map to the i-th block of yvalues
|
||||
map_yi += alpha * y_[i];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Multiply the Hessian part of the factor times a raw vector `x` and add the result to `y`.
|
||||
* @details Computes `y += alpha * H * x`, optimized for regular block structure,
|
||||
* but handles potentially non-contiguous or scattered memory layouts
|
||||
* for `x` and `yvalues` as defined by the `offsets` vector.
|
||||
* `offsets[k]` should give the starting index of variable `k` in the
|
||||
* raw memory arrays. `offsets[k+1] - offsets[k]` should equal `D`.
|
||||
* @param alpha Scalar multiplier.
|
||||
* @param x Raw pointer to the input vector data.
|
||||
* @param[in,out] yvalues Raw pointer to the output vector data (accumulates).
|
||||
* @param offsets Vector mapping variable keys to their starting index in `x` and `yvalues`.
|
||||
*/
|
||||
void multiplyHessianAdd(double alpha, const double* x, double* yvalues,
|
||||
std::vector<size_t> offsets) const {
|
||||
|
||||
// Create a vector of temporary y_ values, corresponding to rows i
|
||||
y_.resize(size());
|
||||
const std::vector<size_t>& offsets) const {
|
||||
// Ensure scratch space is properly sized
|
||||
const size_t n = size();
|
||||
if (y_.size() != n) {
|
||||
y_.resize(n);
|
||||
}
|
||||
for (VectorD& yi : y_)
|
||||
yi.setZero();
|
||||
|
||||
// Accessing the VectorValues one by one is expensive
|
||||
// So we will loop over columns to access x only once per column
|
||||
// And fill the above temporary y_ values, to be added into yvalues after
|
||||
for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
|
||||
// Loop over columns (j) of the Hessian H=info_
|
||||
for (DenseIndex j = 0; j < static_cast<DenseIndex>(n); ++j) {
|
||||
Key key_j = keys_[j];
|
||||
size_t offset_j = offsets[key_j];
|
||||
// Ensure block size matches D (redundant if checkInvariants worked, but safe)
|
||||
size_t dim_j = offsets[key_j + 1] - offset_j;
|
||||
if (dim_j != D) throw std::runtime_error("RegularHessianFactor::multiplyHessianAdd: Mismatched dimension in offset map.");
|
||||
ConstDMap xj(x + offset_j); // Map to the j-th block of x using offset
|
||||
|
||||
DenseIndex i = 0;
|
||||
for (; i < j; ++i)
|
||||
y_[i] += info_.aboveDiagonalBlock(i, j)
|
||||
* ConstDMap(x + offsets[keys_[j]],
|
||||
offsets[keys_[j] + 1] - offsets[keys_[j]]);
|
||||
// blocks on the diagonal are only half
|
||||
y_[i] += info_.diagonalBlock(j)
|
||||
* ConstDMap(x + offsets[keys_[j]],
|
||||
offsets[keys_[j] + 1] - offsets[keys_[j]]);
|
||||
// for below diagonal, we take transpose block from upper triangular part
|
||||
for (i = j + 1; i < (DenseIndex) size(); ++i)
|
||||
y_[i] += info_.aboveDiagonalBlock(j, i).transpose()
|
||||
* ConstDMap(x + offsets[keys_[j]],
|
||||
offsets[keys_[j] + 1] - offsets[keys_[j]]);
|
||||
// Off-diagonal blocks G_ij * x_j for i < j
|
||||
for (; i < j; ++i) {
|
||||
y_[i] += info_.aboveDiagonalBlock(i, j) * xj;
|
||||
}
|
||||
// Diagonal block G_jj * x_j
|
||||
y_[i] += info_.diagonalBlock(j) * xj;
|
||||
// Off-diagonal blocks G_ij * x_j for i > j (using transpose G_ji^T * x_j)
|
||||
for (i = j + 1; i < static_cast<DenseIndex>(n); ++i) {
|
||||
y_[i] += info_.aboveDiagonalBlock(j, i).transpose() * xj;
|
||||
}
|
||||
}
|
||||
|
||||
// copy to yvalues
|
||||
for (DenseIndex i = 0; i < (DenseIndex) size(); ++i)
|
||||
DMap(yvalues + offsets[keys_[i]],
|
||||
offsets[keys_[i] + 1] - offsets[keys_[i]]) += alpha * y_[i];
|
||||
// Add accumulated results from y_ to the output yvalues using offsets
|
||||
for (DenseIndex i = 0; i < static_cast<DenseIndex>(n); ++i) {
|
||||
Key key_i = keys_[i];
|
||||
size_t offset_i = offsets[key_i];
|
||||
size_t dim_i = offsets[key_i + 1] - offset_i;
|
||||
if (dim_i != D) throw std::runtime_error("RegularHessianFactor::multiplyHessianAdd: Mismatched dimension in offset map.");
|
||||
DMap map_yi(yvalues + offset_i); // Map to the i-th block of yvalues using offset
|
||||
map_yi += alpha * y_[i];
|
||||
}
|
||||
}
|
||||
|
||||
/** Return the diagonal of the Hessian for this factor (raw memory version) */
|
||||
/**
|
||||
* @brief Return the diagonal of the Hessian for this factor (Raw memory version).
|
||||
* @details Adds the diagonal elements of the Hessian matrix \f$ H = G \f$ associated
|
||||
* with this factor to the pre-allocated raw memory block `d`.
|
||||
* Assumes `d` has the standard contiguous layout `d + keys_[i] * D`.
|
||||
* @param[in,out] d Raw pointer to the memory block where Hessian diagonals should be added.
|
||||
*/
|
||||
void hessianDiagonal(double* d) const override {
|
||||
|
||||
// Loop over all variables in the factor
|
||||
for (DenseIndex pos = 0; pos < (DenseIndex) size(); ++pos) {
|
||||
// Loop over all variables (diagonal blocks) in the factor
|
||||
const size_t n = size();
|
||||
for (DenseIndex pos = 0; pos < static_cast<DenseIndex>(n); ++pos) {
|
||||
Key j = keys_[pos];
|
||||
// Get the diagonal block, and insert its diagonal
|
||||
// Get the diagonal block G_jj and add its diagonal elements to d
|
||||
DMap(d + D * j) += info_.diagonal(pos);
|
||||
}
|
||||
}
|
||||
|
||||
/// Add gradient at zero to d TODO: is it really the goal to add ??
|
||||
/**
|
||||
* @brief Add the gradient vector \f$ -g \f$ (gradient at zero) to a raw memory block `d`.
|
||||
* @details Adds the gradient vector \f$ -g \f$ (where the factor represents
|
||||
* \f$ 0.5 x^T G x - g^T x + 0.5 f \f$) to the pre-allocated raw
|
||||
* memory block `d`. Assumes `d` has the standard contiguous layout
|
||||
* `d + keys_[i] * D`.
|
||||
* @param[in,out] d Raw pointer to the memory block where the gradient should be added.
|
||||
*/
|
||||
void gradientAtZero(double* d) const override {
|
||||
|
||||
// Loop over all variables in the factor
|
||||
for (DenseIndex pos = 0; pos < (DenseIndex) size(); ++pos) {
|
||||
// The linear term g is stored as the last block column of info_ (negated).
|
||||
// We add (-g) to d.
|
||||
const size_t n = size();
|
||||
for (DenseIndex pos = 0; pos < static_cast<DenseIndex>(n); ++pos) {
|
||||
Key j = keys_[pos];
|
||||
// Get the diagonal block, and insert its diagonal
|
||||
DMap(d + D * j) -= info_.aboveDiagonalBlock(pos, size());;
|
||||
// info_.aboveDiagonalBlock(pos, n) accesses the block corresponding to g_j
|
||||
DMap(d + D * j) += info_.aboveDiagonalBlock(pos, n);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
||||
};
|
||||
// end class RegularHessianFactor
|
||||
}; // end class RegularHessianFactor
|
||||
|
||||
// traits
|
||||
template<size_t D> struct traits<RegularHessianFactor<D> > : public Testable<
|
||||
RegularHessianFactor<D> > {
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,113 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file InvariantEKF.h
|
||||
* @brief Left-Invariant Extended Kalman Filter implementation.
|
||||
*
|
||||
* This file defines the InvariantEKF class template, inheriting from LieGroupEKF,
|
||||
* which specifically implements the Left-Invariant EKF formulation. It restricts
|
||||
* prediction methods to only those based on group composition (state-independent
|
||||
* motion models), hiding the state-dependent prediction variants from LieGroupEKF.
|
||||
*
|
||||
* @date April 24, 2025
|
||||
* @authors Scott Baker, Matt Kielo, Frank Dellaert
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/navigation/LieGroupEKF.h> // Include the base class
|
||||
#include <gtsam/base/Lie.h> // For traits (needed for AdjointMap, Expmap)
|
||||
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* @class InvariantEKF
|
||||
* @brief Left-Invariant Extended Kalman Filter on a Lie group G.
|
||||
*
|
||||
* @tparam G Lie group type (must satisfy LieGroup concept).
|
||||
*
|
||||
* This filter inherits from LieGroupEKF but restricts the prediction interface
|
||||
* to only the left-invariant prediction methods:
|
||||
* 1. Prediction via group composition: `predict(const G& U, const Covariance& Q)`
|
||||
* 2. Prediction via tangent control vector: `predict(const TangentVector& u, double dt, const Covariance& Q)`
|
||||
*
|
||||
* The state-dependent prediction methods from LieGroupEKF are hidden.
|
||||
* The update step remains the same as in ManifoldEKF/LieGroupEKF.
|
||||
* For details on how static and dynamic dimensions are handled, please refer to
|
||||
* the `ManifoldEKF` class documentation.
|
||||
*/
|
||||
template <typename G>
|
||||
class InvariantEKF : public LieGroupEKF<G> {
|
||||
public:
|
||||
using Base = LieGroupEKF<G>; ///< Base class type
|
||||
using TangentVector = typename Base::TangentVector; ///< Tangent vector type
|
||||
/// Jacobian for group-specific operations like AdjointMap. Eigen::Matrix<double, Dim, Dim>.
|
||||
using Jacobian = typename Base::Jacobian;
|
||||
/// Covariance matrix type. Eigen::Matrix<double, Dim, Dim>.
|
||||
using Covariance = typename Base::Covariance;
|
||||
|
||||
|
||||
/**
|
||||
* Constructor: forwards to LieGroupEKF constructor.
|
||||
* @param X0 Initial state on Lie group G.
|
||||
* @param P0 Initial covariance in the tangent space at X0.
|
||||
*/
|
||||
InvariantEKF(const G& X0, const Covariance& P0) : Base(X0, P0) {}
|
||||
|
||||
// We hide state-dependent predict methods from LieGroupEKF by only providing the
|
||||
// invariant predict methods below.
|
||||
|
||||
/**
|
||||
* Predict step via group composition (Left-Invariant):
|
||||
* X_{k+1} = X_k * U
|
||||
* P_{k+1} = Ad_{U^{-1}} P_k Ad_{U^{-1}}^T + Q
|
||||
* where Ad_{U^{-1}} is the Adjoint map of U^{-1}.
|
||||
*
|
||||
* @param U Lie group element representing the motion increment.
|
||||
* @param Q Process noise covariance.
|
||||
*/
|
||||
void predict(const G& U, const Covariance& Q) {
|
||||
this->X_ = traits<G>::Compose(this->X_, U);
|
||||
const G U_inv = traits<G>::Inverse(U);
|
||||
const Jacobian A = traits<G>::AdjointMap(U_inv);
|
||||
// P_ is Covariance. A is Jacobian. Q is Covariance.
|
||||
// All are Eigen::Matrix<double,Dim,Dim>.
|
||||
this->P_ = A * this->P_ * A.transpose() + Q;
|
||||
}
|
||||
|
||||
/**
|
||||
* Predict step via tangent control vector:
|
||||
* U = Expmap(u * dt)
|
||||
* Then calls predict(U, Q).
|
||||
*
|
||||
* @param u Tangent space control vector.
|
||||
* @param dt Time interval.
|
||||
* @param Q Process noise covariance matrix.
|
||||
*/
|
||||
void predict(const TangentVector& u, double dt, const Covariance& Q) {
|
||||
G U;
|
||||
if constexpr (std::is_same_v<G, Matrix>) {
|
||||
// Specialize to Matrix case as its Expmap is not defined.
|
||||
const Matrix& X = static_cast<const Matrix&>(this->X_);
|
||||
U.resize(X.rows(), X.cols());
|
||||
Eigen::Map<Vector>(static_cast<Matrix&>(U).data(), U.size()) = u * dt;
|
||||
}
|
||||
else {
|
||||
U = traits<G>::Expmap(u * dt);
|
||||
}
|
||||
predict(U, Q); // Call the group composition predict
|
||||
}
|
||||
|
||||
}; // InvariantEKF
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,185 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file LieGroupEKF.h
|
||||
* @brief Extended Kalman Filter derived class for Lie groups G.
|
||||
*
|
||||
* This file defines the LieGroupEKF class template, inheriting from ManifoldEKF,
|
||||
* for performing EKF steps specifically on states residing in a Lie group.
|
||||
* It provides predict methods with state-dependent dynamics functions.
|
||||
* Please use the InvariantEKF class for prediction via group composition.
|
||||
*
|
||||
* @date April 24, 2025
|
||||
* @authors Scott Baker, Matt Kielo, Frank Dellaert
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/navigation/ManifoldEKF.h> // Include the base class
|
||||
#include <gtsam/base/Lie.h> // Include for Lie group traits and operations
|
||||
|
||||
#include <Eigen/Dense>
|
||||
#include <type_traits>
|
||||
#include <functional> // For std::function
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* @class LieGroupEKF
|
||||
* @brief Extended Kalman Filter on a Lie group G, derived from ManifoldEKF
|
||||
*
|
||||
* @tparam G Lie group type (must satisfy LieGroup concept).
|
||||
*
|
||||
* This filter specializes ManifoldEKF for Lie groups, offering predict methods
|
||||
* with state-dependent dynamics functions.
|
||||
* Use the InvariantEKF class for prediction via group composition.
|
||||
* For details on how static and dynamic dimensions are handled, please refer to
|
||||
* the `ManifoldEKF` class documentation.
|
||||
*/
|
||||
template <typename G>
|
||||
class LieGroupEKF : public ManifoldEKF<G> {
|
||||
public:
|
||||
using Base = ManifoldEKF<G>; ///< Base class type
|
||||
static constexpr int Dim = Base::Dim; ///< Compile-time dimension of G.
|
||||
using TangentVector = typename Base::TangentVector; ///< Tangent vector type for G.
|
||||
/// Jacobian for group operations (Adjoint, Expmap derivatives, F).
|
||||
using Jacobian = typename Base::Jacobian; // Eigen::Matrix<double, Dim, Dim>
|
||||
using Covariance = typename Base::Covariance; // Eigen::Matrix<double, Dim, Dim>
|
||||
|
||||
/**
|
||||
* Constructor: initialize with state and covariance.
|
||||
* @param X0 Initial state on Lie group G.
|
||||
* @param P0 Initial covariance in the tangent space at X0.
|
||||
*/
|
||||
LieGroupEKF(const G& X0, const Covariance& P0) : Base(X0, P0) {
|
||||
static_assert(IsLieGroup<G>::value, "Template parameter G must be a GTSAM Lie Group");
|
||||
}
|
||||
|
||||
/**
|
||||
* SFINAE check for correctly typed state-dependent dynamics function.
|
||||
* Signature: TangentVector f(const G& X, OptionalJacobian<Dim, Dim> Df)
|
||||
* Df = d(xi)/d(local(X))
|
||||
*/
|
||||
template <typename Dynamics>
|
||||
using enable_if_dynamics = std::enable_if_t<
|
||||
!std::is_convertible_v<Dynamics, TangentVector>&&
|
||||
std::is_invocable_r_v<TangentVector, Dynamics, const G&,
|
||||
OptionalJacobian<Dim, Dim>&>>;
|
||||
|
||||
/**
|
||||
* Predict mean and Jacobian A with state-dependent dynamics:
|
||||
* xi = f(X_k, Df) (Compute tangent vector dynamics and Jacobian Df)
|
||||
* U = Expmap(xi * dt, Dexp) (Compute motion increment U and Expmap Jacobian Dexp)
|
||||
* X_{k+1} = X_k * U (Predict next state)
|
||||
* A = Ad_{U^{-1}} + Dexp * Df * dt (Compute full state transition Jacobian)
|
||||
*
|
||||
* @tparam Dynamics Functor signature: TangentVector f(const G&, OptionalJacobian<Dim,Dim>&)
|
||||
* @param f Dynamics functor returning tangent vector xi and its Jacobian Df w.r.t. local(X).
|
||||
* @param dt Time step.
|
||||
* @param A OptionalJacobian to store the computed state transition Jacobian A.
|
||||
* @return Predicted state X_{k+1}.
|
||||
*/
|
||||
template <typename Dynamics, typename = enable_if_dynamics<Dynamics>>
|
||||
G predictMean(Dynamics&& f, double dt, OptionalJacobian<Dim, Dim> A = {}) const {
|
||||
Jacobian Df, Dexp; // Eigen::Matrix<double, Dim, Dim>
|
||||
|
||||
if constexpr (std::is_same_v<G, Matrix>) {
|
||||
// Specialize to Matrix case as its Expmap is not defined.
|
||||
TangentVector xi = f(this->X_, A ? &Df : nullptr);
|
||||
const Matrix nextX = traits<Matrix>::Retract(this->X_, xi * dt, A ? &Dexp : nullptr); // just addition
|
||||
if (A) {
|
||||
const Matrix I_n = Matrix::Identity(this->n_, this->n_);
|
||||
*A = I_n + Dexp * Df * dt; // AdjointMap is always identity for Matrix
|
||||
}
|
||||
return nextX;
|
||||
}
|
||||
else {
|
||||
TangentVector xi = f(this->X_, A ? &Df : nullptr); // xi and Df = d(xi)/d(localX)
|
||||
G U = traits<G>::Expmap(xi * dt, A ? &Dexp : nullptr);
|
||||
if (A) {
|
||||
// State transition Jacobian for left-invariant EKF:
|
||||
*A = traits<G>::Inverse(U).AdjointMap() + Dexp * Df * dt;
|
||||
}
|
||||
return this->X_.compose(U);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Predict step with state-dependent dynamics:
|
||||
* Uses predictMean to compute X_{k+1} and A, then updates covariance.
|
||||
* X_{k+1}, A = predictMean(f, dt)
|
||||
* P_{k+1} = A P_k A^T + Q
|
||||
*
|
||||
* @tparam Dynamics Functor signature: TangentVector f(const G&, OptionalJacobian<Dim,Dim>&)
|
||||
* @param f Dynamics functor.
|
||||
* @param dt Time step.
|
||||
* @param Q Process noise covariance.
|
||||
*/
|
||||
template <typename Dynamics, typename = enable_if_dynamics<Dynamics>>
|
||||
void predict(Dynamics&& f, double dt, const Covariance& Q) {
|
||||
Jacobian A;
|
||||
if constexpr (Dim == Eigen::Dynamic) {
|
||||
A.resize(this->n_, this->n_);
|
||||
}
|
||||
this->X_ = predictMean(std::forward<Dynamics>(f), dt, A);
|
||||
this->P_ = A * this->P_ * A.transpose() + Q;
|
||||
}
|
||||
|
||||
/**
|
||||
* SFINAE check for state- and control-dependent dynamics function.
|
||||
* Signature: TangentVector f(const G& X, const Control& u, OptionalJacobian<Dim, Dim> Df)
|
||||
*/
|
||||
template<typename Control, typename Dynamics>
|
||||
using enable_if_full_dynamics = std::enable_if_t<
|
||||
std::is_invocable_r_v<TangentVector, Dynamics, const G&, const Control&, OptionalJacobian<Dim, Dim>&>
|
||||
>;
|
||||
|
||||
/**
|
||||
* Predict mean and Jacobian A with state and control input dynamics:
|
||||
* Wraps the dynamics function and calls the state-only predictMean.
|
||||
* xi = f(X_k, u, Df)
|
||||
*
|
||||
* @tparam Control Control input type.
|
||||
* @tparam Dynamics Functor signature: TangentVector f(const G&, const Control&, OptionalJacobian<Dim,Dim>&)
|
||||
* @param f Dynamics functor.
|
||||
* @param u Control input.
|
||||
* @param dt Time step.
|
||||
* @param A Optional pointer to store the computed state transition Jacobian A.
|
||||
* @return Predicted state X_{k+1}.
|
||||
*/
|
||||
template <typename Control, typename Dynamics, typename = enable_if_full_dynamics<Control, Dynamics>>
|
||||
G predictMean(Dynamics&& f, const Control& u, double dt, OptionalJacobian<Dim, Dim> A = {}) const {
|
||||
return predictMean([&](const G& X, OptionalJacobian<Dim, Dim> Df) { return f(X, u, Df); }, dt, A);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Predict step with state and control input dynamics:
|
||||
* Wraps the dynamics function and calls the state-only predict.
|
||||
* xi = f(X_k, u, Df)
|
||||
*
|
||||
* @tparam Control Control input type.
|
||||
* @tparam Dynamics Functor signature: TangentVector f(const G&, const Control&, OptionalJacobian<Dim,Dim>&)
|
||||
* @param f Dynamics functor.
|
||||
* @param u Control input.
|
||||
* @param dt Time step.
|
||||
* @param Q Process noise covariance.
|
||||
*/
|
||||
template <typename Control, typename Dynamics, typename = enable_if_full_dynamics<Control, Dynamics>>
|
||||
void predict(Dynamics&& f, const Control& u, double dt, const Covariance& Q) {
|
||||
return predict([&](const G& X, OptionalJacobian<Dim, Dim> Df) { return f(X, u, Df); }, dt, Q);
|
||||
}
|
||||
|
||||
}; // LieGroupEKF
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,258 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file ManifoldEKF.h
|
||||
* @brief Extended Kalman Filter base class on a generic manifold M
|
||||
*
|
||||
* This file defines the ManifoldEKF class template for performing prediction
|
||||
* and update steps of an Extended Kalman Filter on states residing in a
|
||||
* differentiable manifold. It relies on the manifold's retract and
|
||||
* localCoordinates operations.
|
||||
*
|
||||
* @date April 24, 2025
|
||||
* @authors Scott Baker, Matt Kielo, Frank Dellaert
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/OptionalJacobian.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <gtsam/base/Manifold.h> // Include for traits and IsManifold
|
||||
|
||||
#include <Eigen/Dense>
|
||||
#include <string>
|
||||
#include <stdexcept>
|
||||
#include <type_traits>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* @class ManifoldEKF
|
||||
* @brief Extended Kalman Filter on a generic manifold M
|
||||
*
|
||||
* @tparam M Manifold type (must satisfy Manifold concept).
|
||||
*
|
||||
* This filter maintains a state X in the manifold M and covariance P in the
|
||||
* tangent space at X.
|
||||
* Prediction requires providing the predicted next state and the state transition Jacobian F.
|
||||
* Updates apply a measurement function h and correct the state using the tangent space error.
|
||||
*
|
||||
* **Handling Static and Dynamic Dimensions:**
|
||||
* The filter supports manifolds M with either a compile-time fixed dimension or a
|
||||
* runtime dynamic dimension. This is determined by `gtsam::traits<M>::dimension`.
|
||||
* - If `dimension` is an integer (e.g., 3, 6), it's a fixed-size manifold.
|
||||
* - If `dimension` is `Eigen::Dynamic`, it's a dynamically-sized manifold. In this case,
|
||||
* `gtsam::traits<M>::GetDimension(const M&)` must be available to retrieve the
|
||||
* actual dimension at runtime.
|
||||
* The internal protected member `n_` stores this runtime dimension.
|
||||
* Covariance matrices (e.g., `P_`, method argument `Q`) and Jacobians (e.g., method argument `F`)
|
||||
* are typed using `Covariance` and `Jacobian` typedefs, which are specializations of
|
||||
* `Eigen::Matrix<double, Dim, Dim>`, where `Dim` is `traits<M>::dimension`.
|
||||
* For dynamically-sized manifolds (`Dim == Eigen::Dynamic`), these Eigen types
|
||||
* represent dynamically-sized matrices.
|
||||
*/
|
||||
template <typename M>
|
||||
class ManifoldEKF {
|
||||
public:
|
||||
/// Compile-time dimension of the manifold M.
|
||||
static constexpr int Dim = traits<M>::dimension;
|
||||
|
||||
/// Tangent vector type for the manifold M.
|
||||
using TangentVector = typename traits<M>::TangentVector;
|
||||
/// Covariance matrix type (P, Q).
|
||||
using Covariance = Eigen::Matrix<double, Dim, Dim>;
|
||||
/// State transition Jacobian type (F).
|
||||
using Jacobian = Eigen::Matrix<double, Dim, Dim>;
|
||||
|
||||
|
||||
/**
|
||||
* Constructor: initialize with state and covariance.
|
||||
* @param X0 Initial state on manifold M.
|
||||
* @param P0 Initial covariance in the tangent space at X0
|
||||
*/
|
||||
ManifoldEKF(const M& X0, const Covariance& P0) : X_(X0) {
|
||||
static_assert(IsManifold<M>::value,
|
||||
"Template parameter M must be a GTSAM Manifold.");
|
||||
|
||||
if constexpr (Dim == Eigen::Dynamic) {
|
||||
n_ = traits<M>::GetDimension(X0);
|
||||
// Validate dimensions of initial covariance P0.
|
||||
if (P0.rows() != n_ || P0.cols() != n_) {
|
||||
throw std::invalid_argument(
|
||||
"ManifoldEKF: Initial covariance P0 dimensions (" +
|
||||
std::to_string(P0.rows()) + "x" + std::to_string(P0.cols()) +
|
||||
") do not match state's tangent space dimension (" +
|
||||
std::to_string(n_) + ").");
|
||||
}
|
||||
}
|
||||
else {
|
||||
n_ = Dim;
|
||||
}
|
||||
|
||||
P_ = P0;
|
||||
}
|
||||
|
||||
virtual ~ManifoldEKF() = default;
|
||||
|
||||
/// @return current state estimate on manifold M.
|
||||
const M& state() const { return X_; }
|
||||
|
||||
/// @return current covariance estimate.
|
||||
const Covariance& covariance() const { return P_; }
|
||||
|
||||
/// @return runtime dimension of the manifold.
|
||||
int dimension() const { return n_; }
|
||||
|
||||
/**
|
||||
* Basic predict step: Updates state and covariance given the predicted
|
||||
* next state and the state transition Jacobian F.
|
||||
* X_{k+1} = X_next
|
||||
* P_{k+1} = F P_k F^T + Q
|
||||
* where F = d(local(X_{k+1})) / d(local(X_k)) is the Jacobian of the
|
||||
* state transition in local coordinates around X_k.
|
||||
*
|
||||
* @param X_next The predicted state at time k+1 on manifold M.
|
||||
* @param F The state transition Jacobian.
|
||||
* @param Q Process noise covariance matrix.
|
||||
*/
|
||||
void predict(const M& X_next, const Jacobian& F, const Covariance& Q) {
|
||||
if constexpr (Dim == Eigen::Dynamic) {
|
||||
if (F.rows() != n_ || F.cols() != n_ || Q.rows() != n_ || Q.cols() != n_) {
|
||||
throw std::invalid_argument(
|
||||
"ManifoldEKF::predict: Dynamic F/Q dimensions must match state dimension " +
|
||||
std::to_string(n_) + ".");
|
||||
}
|
||||
}
|
||||
|
||||
X_ = X_next;
|
||||
P_ = F * P_ * F.transpose() + Q;
|
||||
}
|
||||
|
||||
/**
|
||||
* Measurement update: Corrects the state and covariance using a pre-calculated
|
||||
* predicted measurement and its Jacobian.
|
||||
*
|
||||
* @tparam Measurement Type of the measurement vector (e.g., VectorN<m>, Vector).
|
||||
* @param prediction Predicted measurement.
|
||||
* @param H Jacobian of the measurement function h w.r.t. local(X), H = dh/dlocal(X).
|
||||
* @param z Observed measurement.
|
||||
* @param R Measurement noise covariance.
|
||||
*/
|
||||
template <typename Measurement>
|
||||
void update(const Measurement& prediction,
|
||||
const Eigen::Matrix<double, traits<Measurement>::dimension, Dim>& H,
|
||||
const Measurement& z,
|
||||
const Eigen::Matrix<double, traits<Measurement>::dimension, traits<Measurement>::dimension>& R) {
|
||||
|
||||
static_assert(IsManifold<Measurement>::value,
|
||||
"Template parameter Measurement must be a GTSAM Manifold for LocalCoordinates.");
|
||||
|
||||
static constexpr int MeasDim = traits<Measurement>::dimension;
|
||||
|
||||
int m_runtime;
|
||||
if constexpr (MeasDim == Eigen::Dynamic) {
|
||||
m_runtime = traits<Measurement>::GetDimension(z);
|
||||
if (traits<Measurement>::GetDimension(prediction) != m_runtime) {
|
||||
throw std::invalid_argument(
|
||||
"ManifoldEKF::update: Dynamic measurement 'prediction' and 'z' have different dimensions.");
|
||||
}
|
||||
if (H.rows() != m_runtime || H.cols() != n_ || R.rows() != m_runtime || R.cols() != m_runtime) {
|
||||
throw std::invalid_argument(
|
||||
"ManifoldEKF::update: Jacobian H or Noise R dimensions mismatch for dynamic measurement.");
|
||||
}
|
||||
}
|
||||
else {
|
||||
m_runtime = MeasDim;
|
||||
if constexpr (Dim == Eigen::Dynamic) {
|
||||
if (H.cols() != n_) {
|
||||
throw std::invalid_argument(
|
||||
"ManifoldEKF::update: Jacobian H columns must match state dimension " + std::to_string(n_) + ".");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Innovation: y = z - h(x_pred). In tangent space: local(h(x_pred), z)
|
||||
typename traits<Measurement>::TangentVector innovation =
|
||||
traits<Measurement>::Local(prediction, z);
|
||||
|
||||
// Innovation covariance: S = H P H^T + R
|
||||
// S will be Eigen::Matrix<double, MeasDim, MeasDim>
|
||||
Eigen::Matrix<double, MeasDim, MeasDim> S = H * P_ * H.transpose() + R;
|
||||
|
||||
// Kalman Gain: K = P H^T S^-1
|
||||
// K will be Eigen::Matrix<double, Dim, MeasDim>
|
||||
Eigen::Matrix<double, Dim, MeasDim> K = P_ * H.transpose() * S.inverse();
|
||||
|
||||
// Correction vector in tangent space of M: delta_xi = K * innovation
|
||||
const TangentVector delta_xi = K * innovation; // delta_xi is Dim x 1 (or n_ x 1 if dynamic)
|
||||
|
||||
// Update state using retract: X_new = retract(X_old, delta_xi)
|
||||
X_ = traits<M>::Retract(X_, delta_xi);
|
||||
|
||||
// Update covariance using Joseph form for numerical stability
|
||||
Jacobian I_n; // Eigen::Matrix<double, Dim, Dim>
|
||||
if constexpr (Dim == Eigen::Dynamic) {
|
||||
I_n = Jacobian::Identity(n_, n_);
|
||||
}
|
||||
else {
|
||||
I_n = Jacobian::Identity();
|
||||
}
|
||||
|
||||
// I_KH will be Eigen::Matrix<double, Dim, Dim>
|
||||
Jacobian I_KH = I_n - K * H;
|
||||
P_ = I_KH * P_ * I_KH.transpose() + K * R * K.transpose();
|
||||
}
|
||||
|
||||
/**
|
||||
* Measurement update: Corrects the state and covariance using a measurement
|
||||
* model function.
|
||||
*
|
||||
* @tparam Measurement Type of the measurement vector.
|
||||
* @tparam MeasurementFunction Functor/lambda providing measurement and its Jacobian.
|
||||
* Signature: `Measurement h(const M& x, Jac& H_jacobian)`
|
||||
* where H = d(h)/d(local(X)).
|
||||
* @param h Measurement model function.
|
||||
* @param z Observed measurement.
|
||||
* @param R Measurement noise covariance.
|
||||
*/
|
||||
template <typename Measurement, typename MeasurementFunction>
|
||||
void update(MeasurementFunction&& h, const Measurement& z,
|
||||
const Eigen::Matrix<double, traits<Measurement>::dimension, traits<Measurement>::dimension>& R) {
|
||||
static_assert(IsManifold<Measurement>::value,
|
||||
"Template parameter Measurement must be a GTSAM Manifold.");
|
||||
|
||||
static constexpr int MeasDim = traits<Measurement>::dimension;
|
||||
|
||||
int m_runtime;
|
||||
if constexpr (MeasDim == Eigen::Dynamic) {
|
||||
m_runtime = traits<Measurement>::GetDimension(z);
|
||||
}
|
||||
else {
|
||||
m_runtime = MeasDim;
|
||||
}
|
||||
|
||||
// Predict measurement and get Jacobian H = dh/dlocal(X)
|
||||
Matrix H(m_runtime, n_);
|
||||
Measurement prediction = h(X_, H);
|
||||
|
||||
// Call the other update function
|
||||
update(prediction, H, z, R);
|
||||
}
|
||||
|
||||
protected:
|
||||
M X_; ///< Manifold state estimate.
|
||||
Covariance P_; ///< Covariance (Eigen::Matrix<double, Dim, Dim>).
|
||||
int n_; ///< Runtime tangent space dimension of M.
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,187 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
*
|
||||
* See LICENSE for the license information
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testInvariantEKF_Pose2.cpp
|
||||
* @brief Unit test for the InvariantEKF using Pose2 state.
|
||||
* Based on the logic from IEKF_SE2Example.cpp
|
||||
* @date April 26, 2025
|
||||
* @authors Frank Dellaert (adapted from example by Scott Baker, Matt Kielo)
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/numericalDerivative.h>
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/navigation/InvariantEKF.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
// Create a 2D GPS measurement function that returns the predicted measurement h
|
||||
// and Jacobian H. The predicted measurement h is the translation of the state X.
|
||||
// (Copied from IEKF_SE2Example.cpp)
|
||||
Vector2 h_gps(const Pose2& X, OptionalJacobian<2, 3> H = {}) {
|
||||
return X.translation(H);
|
||||
}
|
||||
|
||||
|
||||
TEST(IEKF_Pose2, PredictUpdateSequence) {
|
||||
// GIVEN: Initial state, covariance, noises, motions, measurements
|
||||
// (from IEKF_SE2Example.cpp)
|
||||
Pose2 X0(0.0, 0.0, 0.0);
|
||||
Matrix3 P0 = Matrix3::Identity() * 0.1;
|
||||
|
||||
Matrix3 Q = (Vector3(0.05, 0.05, 0.001)).asDiagonal();
|
||||
Matrix2 R = I_2x2 * 0.01;
|
||||
|
||||
Pose2 U1(1.0, 1.0, 0.5), U2(1.0, 1.0, 0.0);
|
||||
|
||||
Vector2 z1, z2;
|
||||
z1 << 1.0, 0.0;
|
||||
z2 << 1.0, 1.0;
|
||||
|
||||
// Create the filter
|
||||
InvariantEKF<Pose2> ekf(X0, P0);
|
||||
EXPECT(assert_equal(X0, ekf.state()));
|
||||
EXPECT(assert_equal(P0, ekf.covariance()));
|
||||
|
||||
// --- First Prediction ---
|
||||
ekf.predict(U1, Q);
|
||||
|
||||
// Calculate expected state and covariance
|
||||
Pose2 X1_expected = X0.compose(U1);
|
||||
Matrix3 Ad_U1_inv = U1.inverse().AdjointMap();
|
||||
Matrix3 P1_expected = Ad_U1_inv * P0 * Ad_U1_inv.transpose() + Q;
|
||||
|
||||
// Verify
|
||||
EXPECT(assert_equal(X1_expected, ekf.state(), 1e-9));
|
||||
EXPECT(assert_equal(P1_expected, ekf.covariance(), 1e-9));
|
||||
|
||||
|
||||
// --- First Update ---
|
||||
ekf.update(h_gps, z1, R);
|
||||
|
||||
// Calculate expected state and covariance (manual Kalman steps)
|
||||
Matrix H1; // H = dh/dlocal(X) -> 2x3
|
||||
Vector2 z_pred1 = h_gps(X1_expected, H1);
|
||||
Vector2 y1 = z_pred1 - z1; // Innovation
|
||||
Matrix S1 = H1 * P1_expected * H1.transpose() + R;
|
||||
Matrix K1 = P1_expected * H1.transpose() * S1.inverse(); // Gain (3x2)
|
||||
Vector3 delta_xi1 = -K1 * y1; // Correction (tangent space)
|
||||
Pose2 X1_updated_expected = X1_expected.retract(delta_xi1);
|
||||
Matrix3 I_KH1 = Matrix3::Identity() - K1 * H1;
|
||||
Matrix3 P1_updated_expected = I_KH1 * P1_expected; // Standard form P = (I-KH)P
|
||||
|
||||
// Verify
|
||||
EXPECT(assert_equal(X1_updated_expected, ekf.state(), 1e-9));
|
||||
EXPECT(assert_equal(P1_updated_expected, ekf.covariance(), 1e-9));
|
||||
|
||||
|
||||
// --- Second Prediction ---
|
||||
ekf.predict(U2, Q);
|
||||
|
||||
// Calculate expected state and covariance
|
||||
Pose2 X2_expected = X1_updated_expected.compose(U2);
|
||||
Matrix3 Ad_U2_inv = U2.inverse().AdjointMap();
|
||||
Matrix3 P2_expected = Ad_U2_inv * P1_updated_expected * Ad_U2_inv.transpose() + Q;
|
||||
|
||||
// Verify
|
||||
EXPECT(assert_equal(X2_expected, ekf.state(), 1e-9));
|
||||
EXPECT(assert_equal(P2_expected, ekf.covariance(), 1e-9));
|
||||
|
||||
|
||||
// --- Second Update ---
|
||||
ekf.update(h_gps, z2, R);
|
||||
|
||||
// Calculate expected state and covariance (manual Kalman steps)
|
||||
Matrix H2; // 2x3
|
||||
Vector2 z_pred2 = h_gps(X2_expected, H2);
|
||||
Vector2 y2 = z_pred2 - z2; // Innovation
|
||||
Matrix S2 = H2 * P2_expected * H2.transpose() + R;
|
||||
Matrix K2 = P2_expected * H2.transpose() * S2.inverse(); // Gain (3x2)
|
||||
Vector3 delta_xi2 = -K2 * y2; // Correction (tangent space)
|
||||
Pose2 X2_updated_expected = X2_expected.retract(delta_xi2);
|
||||
Matrix3 I_KH2 = Matrix3::Identity() - K2 * H2;
|
||||
Matrix3 P2_updated_expected = I_KH2 * P2_expected; // Standard form
|
||||
|
||||
// Verify
|
||||
EXPECT(assert_equal(X2_updated_expected, ekf.state(), 1e-9));
|
||||
EXPECT(assert_equal(P2_updated_expected, ekf.covariance(), 1e-9));
|
||||
|
||||
}
|
||||
|
||||
// Define simple dynamics and measurement for a 2x2 Matrix state
|
||||
namespace exampleDynamicMatrix {
|
||||
Matrix f(const Matrix& p, const Vector& vTangent, double dt) {
|
||||
return traits<Matrix>::Retract(p, vTangent * dt);
|
||||
}
|
||||
double h(const Matrix& p, OptionalJacobian<-1, -1> H = {}) {
|
||||
if (H) {
|
||||
H->resize(1, p.size());
|
||||
(*H) << 1.0, 0.0, 0.0, 1.0; // Assuming 2x2
|
||||
}
|
||||
return p(0, 0) + p(1, 1); // trace !
|
||||
}
|
||||
} // namespace exampleDynamicMatrix
|
||||
|
||||
TEST(InvariantEKF_DynamicMatrix, PredictAndUpdate) {
|
||||
// --- Setup ---
|
||||
Matrix p0Matrix = (Matrix(2, 2) << 1.0, 2.0, 3.0, 4.0).finished();
|
||||
Matrix p0Covariance = I_4x4 * 0.01;
|
||||
Vector velocityTangent = (Vector(4) << 0.5, 0.1, -0.1, -0.5).finished();
|
||||
double dt = 0.1;
|
||||
Matrix Q = I_4x4 * 0.001;
|
||||
Matrix R = Matrix::Identity(1, 1) * 0.005;
|
||||
|
||||
InvariantEKF<Matrix> ekf(p0Matrix, p0Covariance);
|
||||
EXPECT_LONGS_EQUAL(4, ekf.state().size());
|
||||
EXPECT_LONGS_EQUAL(4, ekf.dimension());
|
||||
|
||||
// --- Predict ---
|
||||
ekf.predict(velocityTangent, dt, Q);
|
||||
|
||||
// Verification for Predict
|
||||
Matrix pPredictedExpected = exampleDynamicMatrix::f(p0Matrix, velocityTangent, dt);
|
||||
Matrix pCovariancePredictedExpected = p0Covariance + Q;
|
||||
EXPECT(assert_equal(pPredictedExpected, ekf.state(), 1e-9));
|
||||
EXPECT(assert_equal(pCovariancePredictedExpected, ekf.covariance(), 1e-9));
|
||||
|
||||
// --- Update ---
|
||||
// Use the state after prediction for the update step
|
||||
Matrix pStateBeforeUpdate = ekf.state();
|
||||
Matrix pCovarianceBeforeUpdate = ekf.covariance();
|
||||
|
||||
double zTrue = exampleDynamicMatrix::h(pStateBeforeUpdate);
|
||||
double zObserved = zTrue - 0.03; // Simulated measurement with some error
|
||||
|
||||
ekf.update(exampleDynamicMatrix::h, zObserved, R);
|
||||
|
||||
// Verification for Update (Manual Kalman Steps)
|
||||
Matrix H(1, 4);
|
||||
double zPredictionManual = exampleDynamicMatrix::h(pStateBeforeUpdate, H);
|
||||
double innovationY_tangent = zObserved - zPredictionManual;
|
||||
Matrix S = H * pCovarianceBeforeUpdate * H.transpose() + R;
|
||||
Matrix kalmanGainK = pCovarianceBeforeUpdate * H.transpose() * S.inverse();
|
||||
Vector deltaXiTangent = kalmanGainK * innovationY_tangent;
|
||||
Matrix pUpdatedExpected = traits<Matrix>::Retract(pStateBeforeUpdate, deltaXiTangent);
|
||||
Matrix I_KH = I_4x4 - kalmanGainK * H;
|
||||
Matrix pUpdatedCovarianceExpected = I_KH * pCovarianceBeforeUpdate * I_KH.transpose() + kalmanGainK * R * kalmanGainK.transpose();
|
||||
|
||||
EXPECT(assert_equal(pUpdatedExpected, ekf.state(), 1e-9));
|
||||
EXPECT(assert_equal(pUpdatedCovarianceExpected, ekf.covariance(), 1e-9));
|
||||
}
|
||||
|
||||
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
|
@ -0,0 +1,200 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
*
|
||||
* See LICENSE for the license information
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testGroupEKF.cpp
|
||||
* @brief Unit test for LieGroupEKF, as well as dynamics used in Rot3 example.
|
||||
* @date April 26, 2025
|
||||
* @authors Scott Baker, Matt Kielo, Frank Dellaert
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/numericalDerivative.h>
|
||||
#include <gtsam/geometry/Rot3.h>
|
||||
#include <gtsam/navigation/LieGroupEKF.h>
|
||||
#include <gtsam/navigation/NavState.h>
|
||||
|
||||
using namespace gtsam;
|
||||
|
||||
// Duplicate the dynamics function in GEKF_Rot3Example
|
||||
namespace exampleSO3 {
|
||||
static constexpr double k = 0.5;
|
||||
Vector3 dynamics(const Rot3& X, OptionalJacobian<3, 3> H = {}) {
|
||||
// φ = Logmap(R), Dφ = ∂φ/∂δR
|
||||
Matrix3 D_phi;
|
||||
Vector3 phi = Rot3::Logmap(X, D_phi);
|
||||
// zero out yaw
|
||||
phi[2] = 0.0;
|
||||
D_phi.row(2).setZero();
|
||||
|
||||
if (H) *H = -k * D_phi; // ∂(–kφ)/∂δR
|
||||
return -k * phi; // xi ∈ 𝔰𝔬(3)
|
||||
}
|
||||
} // namespace exampleSO3
|
||||
|
||||
TEST(GroupeEKF, DynamicsJacobian) {
|
||||
// Construct a nontrivial state and IMU input
|
||||
Rot3 R = Rot3::RzRyRx(0.1, -0.2, 0.3);
|
||||
|
||||
// Analytic Jacobian
|
||||
Matrix3 actualH;
|
||||
exampleSO3::dynamics(R, actualH);
|
||||
|
||||
// Numeric Jacobian w.r.t. the state X
|
||||
auto f = [&](const Rot3& X_) { return exampleSO3::dynamics(X_); };
|
||||
Matrix3 expectedH = numericalDerivative11<Vector3, Rot3>(f, R);
|
||||
|
||||
// Compare
|
||||
EXPECT(assert_equal(expectedH, actualH));
|
||||
}
|
||||
|
||||
TEST(GroupeEKF, PredictNumericState) {
|
||||
// GIVEN
|
||||
Rot3 R0 = Rot3::RzRyRx(0.2, -0.1, 0.3);
|
||||
Matrix3 P0 = Matrix3::Identity() * 0.2;
|
||||
double dt = 0.1;
|
||||
|
||||
// Analytic Jacobian
|
||||
Matrix3 actualH;
|
||||
LieGroupEKF<Rot3> ekf0(R0, P0);
|
||||
ekf0.predictMean(exampleSO3::dynamics, dt, actualH);
|
||||
|
||||
// wrap predict into a state->state functor (mapping on SO(3))
|
||||
auto g = [&](const Rot3& R) -> Rot3 {
|
||||
LieGroupEKF<Rot3> ekf(R, P0);
|
||||
return ekf.predictMean(exampleSO3::dynamics, dt);
|
||||
};
|
||||
|
||||
// numeric Jacobian of g at R0
|
||||
Matrix3 expectedH = numericalDerivative11<Rot3, Rot3>(g, R0);
|
||||
|
||||
EXPECT(assert_equal(expectedH, actualH));
|
||||
}
|
||||
|
||||
TEST(GroupeEKF, StateAndControl) {
|
||||
auto f = [](const Rot3& X, const Vector2& dummy_u,
|
||||
OptionalJacobian<3, 3> H = {}) {
|
||||
return exampleSO3::dynamics(X, H);
|
||||
};
|
||||
|
||||
// GIVEN
|
||||
Rot3 R0 = Rot3::RzRyRx(0.2, -0.1, 0.3);
|
||||
Matrix3 P0 = Matrix3::Identity() * 0.2;
|
||||
Vector2 dummy_u(1, 2);
|
||||
double dt = 0.1;
|
||||
Matrix3 Q = Matrix3::Zero();
|
||||
|
||||
// Analytic Jacobian
|
||||
Matrix3 actualH;
|
||||
LieGroupEKF<Rot3> ekf0(R0, P0);
|
||||
ekf0.predictMean(f, dummy_u, dt, actualH);
|
||||
|
||||
// wrap predict into a state->state functor (mapping on SO(3))
|
||||
auto g = [&](const Rot3& R) -> Rot3 {
|
||||
LieGroupEKF<Rot3> ekf(R, P0);
|
||||
return ekf.predictMean(f, dummy_u, dt, Q);
|
||||
};
|
||||
|
||||
// numeric Jacobian of g at R0
|
||||
Matrix3 expectedH = numericalDerivative11<Rot3, Rot3>(g, R0);
|
||||
|
||||
EXPECT(assert_equal(expectedH, actualH));
|
||||
}
|
||||
|
||||
// Namespace for dynamic Matrix LieGroupEKF test
|
||||
namespace exampleLieGroupDynamicMatrix {
|
||||
// Constant tangent vector for dynamics (same as "velocityTangent" in IEKF test)
|
||||
const Vector kFixedVelocityTangent = (Vector(4) << 0.5, 0.1, -0.1, -0.5).finished();
|
||||
|
||||
// Dynamics function: xi = f(X, H_X)
|
||||
// Returns a constant tangent vector, so Df_DX = 0.
|
||||
// H_X is D(xi)/D(X_local), where X_local is the tangent space perturbation of X.
|
||||
Vector f(const Matrix& X, OptionalJacobian<Eigen::Dynamic, Eigen::Dynamic> H_X = {}) {
|
||||
if (H_X) {
|
||||
size_t state_dim = X.size();
|
||||
size_t tangent_dim = kFixedVelocityTangent.size();
|
||||
// Ensure Jacobian dimensions are consistent even if state or tangent is 0-dim
|
||||
H_X->setZero(tangent_dim, state_dim);
|
||||
}
|
||||
return kFixedVelocityTangent;
|
||||
}
|
||||
|
||||
// Measurement function h(X, H)
|
||||
double h(const Matrix& p, OptionalJacobian<-1, -1> H = {}) {
|
||||
// Specialized for a 2x2 matrix!
|
||||
if (p.rows() != 2 || p.cols() != 2) {
|
||||
throw std::invalid_argument("Matrix must be 2x2.");
|
||||
}
|
||||
if (H) {
|
||||
H->resize(1, p.size());
|
||||
*H << 1.0, 0.0, 0.0, 1.0; // d(trace)/dp00, d(trace)/dp01, d(trace)/dp10, d(trace)/dp11
|
||||
}
|
||||
return p(0, 0) + p(1, 1); // Trace of the matrix
|
||||
}
|
||||
} // namespace exampleLieGroupDynamicMatrix
|
||||
|
||||
TEST(LieGroupEKF_DynamicMatrix, PredictAndUpdate) {
|
||||
// --- Setup ---
|
||||
Matrix p0Matrix = (Matrix(2, 2) << 1.0, 2.0, 3.0, 4.0).finished();
|
||||
Matrix p0Covariance = I_4x4 * 0.01;
|
||||
double dt = 0.1;
|
||||
Matrix process_noise_Q = I_4x4 * 0.001;
|
||||
Matrix measurement_noise_R = Matrix::Identity(1, 1) * 0.005;
|
||||
|
||||
LieGroupEKF<Matrix> ekf(p0Matrix, p0Covariance);
|
||||
EXPECT_LONGS_EQUAL(4, ekf.state().size());
|
||||
EXPECT_LONGS_EQUAL(4, ekf.dimension());
|
||||
|
||||
// --- Predict ---
|
||||
// ekf.predict takes f(X, H_X), dt, process_noise_Q
|
||||
ekf.predict(exampleLieGroupDynamicMatrix::f, dt, process_noise_Q);
|
||||
|
||||
// Verification for Predict
|
||||
// For f, Df_DXk = 0 (Jacobian of xi w.r.t X_local is Zero).
|
||||
// State transition Jacobian A = Ad_Uinv + Dexp * Df_DXk * dt.
|
||||
// For Matrix (VectorSpace): Ad_Uinv = I, Dexp = I.
|
||||
// So, A = I + I * 0 * dt = I.
|
||||
// Covariance update: P_next = A * P_current * A.transpose() + Q = I * P_current * I + Q = P_current + Q.
|
||||
Matrix pPredictedExpected = traits<Matrix>::Retract(p0Matrix, exampleLieGroupDynamicMatrix::kFixedVelocityTangent * dt);
|
||||
Matrix pCovariancePredictedExpected = p0Covariance + process_noise_Q;
|
||||
|
||||
EXPECT(assert_equal(pPredictedExpected, ekf.state(), 1e-9));
|
||||
EXPECT(assert_equal(pCovariancePredictedExpected, ekf.covariance(), 1e-9));
|
||||
|
||||
// --- Update ---
|
||||
Matrix pStateBeforeUpdate = ekf.state();
|
||||
Matrix pCovarianceBeforeUpdate = ekf.covariance();
|
||||
|
||||
double zTrue = exampleLieGroupDynamicMatrix::h(pStateBeforeUpdate);
|
||||
double zObserved = zTrue - 0.03; // Simulated measurement with some error
|
||||
|
||||
ekf.update(exampleLieGroupDynamicMatrix::h, zObserved, measurement_noise_R);
|
||||
|
||||
// Verification for Update (Manual Kalman Steps)
|
||||
Matrix H_update(1, 4); // Measurement Jacobian: 1x4 for 2x2 matrix, trace measurement
|
||||
double zPredictionManual = exampleLieGroupDynamicMatrix::h(pStateBeforeUpdate, H_update);
|
||||
// Innovation: y_tangent = traits<Measurement>::Local(prediction, observation)
|
||||
// For double (scalar), Local(A,B) is B-A.
|
||||
double innovationY_tangent = zObserved - zPredictionManual;
|
||||
Matrix S_innovation_cov = H_update * pCovarianceBeforeUpdate * H_update.transpose() + measurement_noise_R;
|
||||
Matrix K_gain = pCovarianceBeforeUpdate * H_update.transpose() * S_innovation_cov.inverse();
|
||||
Vector deltaXiTangent = K_gain * innovationY_tangent; // Tangent space correction for Matrix state
|
||||
Matrix pUpdatedExpected = traits<Matrix>::Retract(pStateBeforeUpdate, deltaXiTangent);
|
||||
Matrix I_KH = I_4x4 - K_gain * H_update; // I_4x4 because state dimension is 4
|
||||
Matrix pUpdatedCovarianceExpected = I_KH * pCovarianceBeforeUpdate * I_KH.transpose() + K_gain * measurement_noise_R * K_gain.transpose();
|
||||
|
||||
EXPECT(assert_equal(pUpdatedExpected, ekf.state(), 1e-9));
|
||||
EXPECT(assert_equal(pUpdatedCovarianceExpected, ekf.covariance(), 1e-9));
|
||||
}
|
||||
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
|
@ -0,0 +1,235 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
*
|
||||
* See LICENSE for the license information
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testManifoldEKF.cpp
|
||||
* @brief Unit test for the ManifoldEKF base class using Unit3.
|
||||
* @date April 26, 2025
|
||||
* @authors Scott Baker, Matt Kielo, Frank Dellaert
|
||||
*/
|
||||
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/numericalDerivative.h>
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
#include <gtsam/geometry/Unit3.h>
|
||||
#include <gtsam/navigation/ManifoldEKF.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
using namespace gtsam;
|
||||
|
||||
// Define simple dynamics for Unit3: constant velocity in the tangent space
|
||||
namespace exampleUnit3 {
|
||||
|
||||
// Predicts the next state given current state, tangent velocity, and dt
|
||||
Unit3 f(const Unit3& p, const Vector2& v, double dt) {
|
||||
return p.retract(v * dt);
|
||||
}
|
||||
|
||||
// Define a measurement model: measure the z-component of the Unit3 direction
|
||||
// H is the Jacobian dh/d(local(p))
|
||||
double measureZ(const Unit3& p, OptionalJacobian<1, 2> H) {
|
||||
if (H) {
|
||||
// H = d(p.point3().z()) / d(local(p))
|
||||
// Calculate numerically for simplicity in test
|
||||
auto h = [](const Unit3& p_) { return p_.point3().z(); };
|
||||
*H = numericalDerivative11<double, Unit3, 2>(h, p);
|
||||
}
|
||||
return p.point3().z();
|
||||
}
|
||||
|
||||
} // namespace exampleUnit3
|
||||
|
||||
// Test fixture for ManifoldEKF with Unit3
|
||||
struct Unit3EKFTest {
|
||||
Unit3 p0;
|
||||
Matrix2 P0;
|
||||
Vector2 velocity;
|
||||
double dt;
|
||||
Matrix2 Q; // Process noise
|
||||
Matrix1 R; // Measurement noise
|
||||
|
||||
Unit3EKFTest() :
|
||||
p0(Unit3(Point3(1, 0, 0))), // Start pointing along X-axis
|
||||
P0(I_2x2 * 0.01),
|
||||
velocity((Vector2() << 0.0, M_PI / 4.0).finished()), // Rotate towards +Z axis
|
||||
dt(0.1),
|
||||
Q(I_2x2 * 0.001),
|
||||
R(Matrix1::Identity() * 0.01) {
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
TEST(ManifoldEKF_Unit3, Predict) {
|
||||
Unit3EKFTest data;
|
||||
|
||||
// Initialize the EKF
|
||||
ManifoldEKF<Unit3> ekf(data.p0, data.P0);
|
||||
|
||||
// --- Prepare inputs for ManifoldEKF::predict ---
|
||||
// 1. Compute expected next state
|
||||
Unit3 p_next_expected = exampleUnit3::f(data.p0, data.velocity, data.dt);
|
||||
|
||||
// 2. Compute state transition Jacobian F = d(local(p_next)) / d(local(p))
|
||||
// We can compute this numerically using the f function.
|
||||
// GTSAM's numericalDerivative handles derivatives *between* manifolds.
|
||||
auto predict_wrapper = [&](const Unit3& p) -> Unit3 {
|
||||
return exampleUnit3::f(p, data.velocity, data.dt);
|
||||
};
|
||||
Matrix2 F = numericalDerivative11<Unit3, Unit3>(predict_wrapper, data.p0);
|
||||
|
||||
// --- Perform EKF prediction ---
|
||||
ekf.predict(p_next_expected, F, data.Q);
|
||||
|
||||
// --- Verification ---
|
||||
// Check state
|
||||
EXPECT(assert_equal(p_next_expected, ekf.state(), 1e-8));
|
||||
|
||||
// Check covariance
|
||||
Matrix2 P_expected = F * data.P0 * F.transpose() + data.Q;
|
||||
EXPECT(assert_equal(P_expected, ekf.covariance(), 1e-8));
|
||||
|
||||
// Check F manually for a simple case (e.g., zero velocity should give Identity)
|
||||
Vector2 zero_velocity = Vector2::Zero();
|
||||
auto predict_wrapper_zero = [&](const Unit3& p) -> Unit3 {
|
||||
return exampleUnit3::f(p, zero_velocity, data.dt);
|
||||
};
|
||||
Matrix2 F_zero = numericalDerivative11<Unit3, Unit3>(predict_wrapper_zero, data.p0);
|
||||
EXPECT(assert_equal<Matrix2>(I_2x2, F_zero, 1e-8));
|
||||
|
||||
}
|
||||
|
||||
TEST(ManifoldEKF_Unit3, Update) {
|
||||
Unit3EKFTest data;
|
||||
|
||||
// Use a slightly different starting point and covariance for variety
|
||||
Unit3 p_start = Unit3(Point3(0, 1, 0)).retract((Vector2() << 0.1, 0).finished()); // Perturb pointing along Y
|
||||
Matrix2 P_start = I_2x2 * 0.05;
|
||||
ManifoldEKF<Unit3> ekf(p_start, P_start);
|
||||
|
||||
// Simulate a measurement (e.g., true value + noise)
|
||||
double z_true = exampleUnit3::measureZ(p_start, {});
|
||||
double z_observed = z_true + 0.02; // Add some noise
|
||||
|
||||
// --- Perform EKF update ---
|
||||
ekf.update(exampleUnit3::measureZ, z_observed, data.R);
|
||||
|
||||
// --- Verification (Manual Kalman Update Steps) ---
|
||||
// 1. Predict measurement and get Jacobian H
|
||||
Matrix12 H; // Note: Jacobian is 1x2 for Unit3
|
||||
double z_pred = exampleUnit3::measureZ(p_start, H);
|
||||
|
||||
// 2. Innovation and Covariance
|
||||
double y = z_pred - z_observed; // Innovation (using vector subtraction for z)
|
||||
Matrix1 S = H * P_start * H.transpose() + data.R; // 1x1 matrix
|
||||
|
||||
// 3. Kalman Gain K
|
||||
Matrix K = P_start * H.transpose() * S.inverse(); // 2x1 matrix
|
||||
|
||||
// 4. State Correction (in tangent space)
|
||||
Vector2 delta_xi = -K * y; // 2x1 vector
|
||||
|
||||
// 5. Expected Updated State and Covariance
|
||||
Unit3 p_updated_expected = p_start.retract(delta_xi);
|
||||
Matrix2 I_KH = I_2x2 - K * H;
|
||||
Matrix2 P_updated_expected = I_KH * P_start;
|
||||
|
||||
// --- Compare EKF result with manual calculation ---
|
||||
EXPECT(assert_equal(p_updated_expected, ekf.state(), 1e-8));
|
||||
EXPECT(assert_equal(P_updated_expected, ekf.covariance(), 1e-8));
|
||||
}
|
||||
|
||||
// Define simple dynamics and measurement for a 2x2 Matrix state
|
||||
namespace exampleDynamicMatrix {
|
||||
|
||||
// Predicts the next state given current state (Matrix), tangent "velocity" (Vector), and dt.
|
||||
Matrix f(const Matrix& p, const Vector& vTangent, double dt) {
|
||||
return traits<Matrix>::Retract(p, vTangent * dt); // +
|
||||
}
|
||||
|
||||
// Define a measurement model: measure the trace of the Matrix (assumed 2x2 here)
|
||||
double h(const Matrix& p, OptionalJacobian<-1, -1> H = {}) {
|
||||
// Specialized for a 2x2 matrix!
|
||||
if (p.rows() != 2 || p.cols() != 2) {
|
||||
throw std::invalid_argument("Matrix must be 2x2.");
|
||||
}
|
||||
if (H) {
|
||||
H->resize(1, p.size());
|
||||
*H << 1.0, 0.0, 0.0, 1.0; // d(trace)/dp00, d(trace)/dp01, d(trace)/dp10, d(trace)/dp11
|
||||
}
|
||||
return p(0, 0) + p(1, 1); // Trace of the matrix
|
||||
}
|
||||
|
||||
} // namespace exampleDynamicMatrix
|
||||
|
||||
TEST(ManifoldEKF_DynamicMatrix, CombinedPredictAndUpdate) {
|
||||
Matrix pInitial = (Matrix(2, 2) << 1.0, 2.0, 3.0, 4.0).finished();
|
||||
Matrix pInitialCovariance = I_4x4 * 0.01; // Covariance for 2x2 matrix (4x4)
|
||||
Vector vTangent = (Vector(4) << 0.5, 0.1, -0.1, -0.5).finished(); // [dp00, dp10, dp01, dp11]/sec
|
||||
double deltaTime = 0.1;
|
||||
Matrix processNoiseCovariance = I_4x4 * 0.001; // Process noise covariance (4x4)
|
||||
Matrix measurementNoiseCovariance = Matrix::Identity(1, 1) * 0.005; // Measurement noise covariance (1x1)
|
||||
|
||||
ManifoldEKF<Matrix> ekf(pInitial, pInitialCovariance);
|
||||
// For a 2x2 Matrix, tangent space dimension is 2*2=4.
|
||||
EXPECT_LONGS_EQUAL(4, ekf.state().size());
|
||||
EXPECT_LONGS_EQUAL(pInitial.rows() * pInitial.cols(), ekf.state().size());
|
||||
|
||||
// Predict Step
|
||||
Matrix pPredictedMean = exampleDynamicMatrix::f(pInitial, vTangent, deltaTime);
|
||||
|
||||
// For this linear prediction model (pNext = pCurrent + V*dt in tangent space),
|
||||
// Derivative w.r.t deltaXi is Identity.
|
||||
Matrix fJacobian = I_4x4;
|
||||
|
||||
ekf.predict(pPredictedMean, fJacobian, processNoiseCovariance);
|
||||
|
||||
EXPECT(assert_equal(pPredictedMean, ekf.state(), 1e-9));
|
||||
Matrix pPredictedCovarianceExpected = fJacobian * pInitialCovariance * fJacobian.transpose() + processNoiseCovariance;
|
||||
EXPECT(assert_equal(pPredictedCovarianceExpected, ekf.covariance(), 1e-9));
|
||||
|
||||
// Update Step
|
||||
Matrix pCurrentForUpdate = ekf.state();
|
||||
Matrix pCurrentCovarianceForUpdate = ekf.covariance();
|
||||
|
||||
// True trace of pCurrentForUpdate (which is pPredictedMean)
|
||||
double zTrue = exampleDynamicMatrix::h(pCurrentForUpdate);
|
||||
EXPECT_DOUBLES_EQUAL(5.0, zTrue, 1e-9);
|
||||
double zObserved = zTrue - 0.03;
|
||||
|
||||
ekf.update(exampleDynamicMatrix::h, zObserved, measurementNoiseCovariance);
|
||||
|
||||
// Manual Kalman Update Steps for Verification
|
||||
Matrix hJacobian(1, 4); // Measurement Jacobian H (1x4 for 2x2 matrix, trace measurement)
|
||||
double zPredictionManual = exampleDynamicMatrix::h(pCurrentForUpdate, hJacobian);
|
||||
Matrix hJacobianExpected = (Matrix(1, 4) << 1.0, 0.0, 0.0, 1.0).finished();
|
||||
EXPECT(assert_equal(hJacobianExpected, hJacobian, 1e-9));
|
||||
|
||||
// Innovation: y = zObserved - zPredictionManual (since measurement is double)
|
||||
double yInnovation = zObserved - zPredictionManual;
|
||||
Matrix innovationCovariance = hJacobian * pCurrentCovarianceForUpdate * hJacobian.transpose() + measurementNoiseCovariance;
|
||||
|
||||
Matrix kalmanGain = pCurrentCovarianceForUpdate * hJacobian.transpose() * innovationCovariance.inverse(); // K is 4x1
|
||||
|
||||
// State Correction (in tangent space of Matrix)
|
||||
Vector deltaXiTangent = kalmanGain * yInnovation; // deltaXi is 4x1 Vector
|
||||
|
||||
Matrix pUpdatedManualExpected = traits<Matrix>::Retract(pCurrentForUpdate, deltaXiTangent);
|
||||
Matrix pUpdatedCovarianceManualExpected = (I_4x4 - kalmanGain * hJacobian) * pCurrentCovarianceForUpdate;
|
||||
|
||||
EXPECT(assert_equal(pUpdatedManualExpected, ekf.state(), 1e-9));
|
||||
EXPECT(assert_equal(pUpdatedCovarianceManualExpected, ekf.covariance(), 1e-9));
|
||||
}
|
||||
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
|
@ -30,7 +30,7 @@
|
|||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
#include <gtsam/slam/dataset.h> // For writeG2o
|
||||
#include <gtsam_unstable/nonlinear/IncrementalFixedLagSmoother.h>
|
||||
#include <gtsam/nonlinear/IncrementalFixedLagSmoother.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
|
@ -0,0 +1,160 @@
|
|||
# Smart Factors
|
||||
|
||||
Smart factors in GTSAM provide an efficient way to handle constraints involving landmarks (like 3D points) in Structure from Motion (SfM) or SLAM problems without explicitly including the landmark variables in the optimized state. Instead, the landmark is implicitly represented and marginalized out, leading to factors that directly constrain only the camera-related variables (poses and/or calibrations). This often results in a smaller state space and can significantly speed up optimization, especially when using iterative linear solvers.
|
||||
|
||||
The core idea is based on the **Schur complement**. If we consider a factor graph involving cameras $C_i$ and a single landmark $p$, the Hessian matrix of the linearized system has a block structure:
|
||||
|
||||
```math
|
||||
H = \begin{bmatrix} H_{pp} & H_{pc} \\ H_{cp} & H_{cc} \end{bmatrix}
|
||||
```
|
||||
|
||||
where $H_{pp}$ relates to the landmark, $H_{cc}$ relates to the cameras, and $H_{pc}$ ($H_{cp}$) are the off-diagonal blocks. Marginalizing out the landmark variable $\delta_p$ corresponds to solving the system using the Schur complement $S = H_{cc} - H_{cp} H_{pp}^{-1} H_{pc}$. Smart factors effectively represent or compute this Schur complement system directly.
|
||||
|
||||
Key Reference:
|
||||
{cite}`10.1109/ICRA.2014.6907483`.
|
||||
|
||||
## Mathematical Details
|
||||
|
||||
### 1. Triangulation
|
||||
|
||||
The core internal operation of a smart factor is triangulation. Given a set of cameras $C_i$ (which include pose and calibration) and corresponding 2D measurements $z_i$, the factor finds the most likely 3D landmark position $p^*$ by solving:
|
||||
|
||||
```math
|
||||
p^* = \underset{p}{\arg \min} \sum_i \| \Pi(C_i, p) - z_i \|_{\Sigma_i}^2
|
||||
```
|
||||
|
||||
where $\Pi(C_i, p)$ is the projection function of the $i$-th camera, and $\Sigma_i$ is the noise covariance for the $i$-th measurement (though typically a shared noise model $\Sigma$ is used). GTSAM uses the robust `gtsam.triangulateSafe` function internally, which returns a `gtsam.TriangulationResult` containing the point estimate $p^*$ and status information (valid, degenerate, behind camera, etc.).
|
||||
|
||||
### 2. Error Function
|
||||
|
||||
The nonlinear error minimized by the smart factor is the sum of squared reprojection errors using the *implicitly triangulated* point $p^*$:
|
||||
|
||||
```math
|
||||
\text{error}( \{C_i\} ) = \frac{1}{2} \sum_i \| \Pi(C_i, p^*) - z_i \|_{\Sigma}^2
|
||||
```
|
||||
|
||||
Note that $p^*$ itself is a function of the cameras $\{C_i\}$.
|
||||
|
||||
### 3. Linearization
|
||||
|
||||
When a smart factor is linearized (e.g., during optimization), it computes a Gaussian factor that only involves the camera variables $\{\delta_{C_i}\}$. This is achieved by implicitly performing the Schur complement.
|
||||
|
||||
Let the standard projection factor error for camera $i$ be $e_i(\delta_{C_i}, \delta_p) = F_i \delta_{C_i} + E_i \delta_p - b_i$, where $F_i$ and $E_i$ are the Jacobians with respect to the camera and point, evaluated at the current estimates $C_i^0$ and the triangulated point $p^*$, and $b_i = z_i - \Pi(C_i^0, p^*)$ is the residual. The combined linearized system is:
|
||||
|
||||
```math
|
||||
\sum_i \| F_i \delta_{C_i} + E_i \delta_p - b_i \|_{\Sigma}^2 =
|
||||
\left\| \begin{bmatrix} F & E \end{bmatrix} \begin{bmatrix} \delta_C \\ \delta_p \end{bmatrix} - b \right\|_{\Sigma}^2
|
||||
```
|
||||
|
||||
where $F = \text{blkdiag}(F_i)$, $E = [E_1^T, ..., E_m^T]^T$, $b = [b_1^T, ..., b_m^T]^T$, and $\delta_C = [\delta_{C_1}^T, ..., \delta_{C_m}^T]^T$. The noise model $\Sigma$ is typically block-diagonal with identical blocks.
|
||||
|
||||
Marginalizing $\delta_p$ yields a Gaussian factor on $\delta_C$ with Hessian $H_S$ and information vector $\eta_S$:
|
||||
|
||||
```math
|
||||
H_S = F^T \Sigma^{-1} F - F^T \Sigma^{-1} E (E^T \Sigma^{-1} E)^{-1} E^T \Sigma^{-1} F
|
||||
```
|
||||
|
||||
```math
|
||||
\eta_S = F^T \Sigma^{-1} b - F^T \Sigma^{-1} E (E^T \Sigma^{-1} E)^{-1} E^T \Sigma^{-1} b
|
||||
```
|
||||
|
||||
Let $P = (E^T \Sigma^{-1} E)^{-1}$ be the point covariance and define the projection matrix $Q = \Sigma^{-1} (I - E P E^T \Sigma^{-1})$. Then the system simplifies to:
|
||||
|
||||
```math
|
||||
H_S = F^T Q F
|
||||
```
|
||||
|
||||
```math
|
||||
\eta_S = F^T Q b
|
||||
```
|
||||
|
||||
The smart factor's linearization computes or represents this Schur complement system $(H_S, \eta_S)$.
|
||||
|
||||
## The Smart Factor Family
|
||||
|
||||
GTSAM provides several types of smart factors, primarily for projection measurements. They inherit common functionality from `gtsam.SmartFactorBase`.
|
||||
|
||||
### `gtsam.SmartProjectionFactor`
|
||||
|
||||
This factor is used when **both camera poses and camera calibration** are unknown and being optimized. It connects multiple `CAMERA` variables, where the `CAMERA` type (e.g., `gtsam.PinholeCameraCal3_S2`) encapsulates both the pose and the intrinsic calibration.
|
||||
|
||||
See also: [https://github.com/borglab/gtsam/blob/develop/gtsam/slam/SmartFactorBase.h], [SmartProjectionFactor Notebook](SmartProjectionFactor.ipynb).
|
||||
|
||||
### `gtsam.SmartProjectionPoseFactor`
|
||||
|
||||
This factor is optimized for the common case where **camera calibration is known and fixed**, and only the camera **poses** (`gtsam.Pose3`) are being optimized. It assumes a single, shared calibration object (`gtsam.Cal3_S2`, `gtsam.Cal3Bundler`, etc.) for all measurements within the factor.
|
||||
|
||||
See also: [https://github.com/borglab/gtsam/blob/develop/gtsam/slam/SmartProjectionPoseFactor.h], [SmartProjectionPoseFactor Notebook](SmartProjectionPoseFactor.ipynb).
|
||||
|
||||
### `gtsam.SmartProjectionRigFactor`
|
||||
|
||||
This factor extends the concept to **multi-camera rigs** where the *relative* poses and calibrations of the cameras within the rig are fixed, but the **rig's body pose** (`gtsam.Pose3`) is being optimized. It allows multiple measurements from different cameras within the rig, potentially associated with the same body pose key.
|
||||
|
||||
**Note:** Due to implementation details, the `CAMERA` template parameter used with this factor should typically be `gtsam.PinholePose` rather than `gtsam.PinholeCamera`.
|
||||
|
||||
See also: [https://github.com/borglab/gtsam/blob/develop/gtsam/slam/SmartProjectionRigFactor.h], [SmartProjectionRigFactor Notebook](SmartProjectionRigFactor.ipynb).
|
||||
|
||||
## Configuration
|
||||
|
||||
The behavior of smart factors is controlled by `gtsam.SmartProjectionParams`. Key parameters include:
|
||||
|
||||
* `linearizationMode`: Selects which type of `GaussianFactor` is returned by `linearize`, default is `HESSIAN`, see below.
|
||||
* `degeneracyMode`: Defines how to handle cases where triangulation fails or is ill-conditioned (e.g., collinear cameras). Options include ignoring it, returning a zero-information factor, or treating the point as being at infinity.
|
||||
* `triangulation`: Parameters passed to `triangulateSafe`, such as rank tolerance for SVD and outlier rejection thresholds.
|
||||
* `retriangulationThreshold`: If the camera poses change significantly between linearizations (measured by Frobenius norm), the point is re-triangulated.
|
||||
* `throwCheirality`/`verboseCheirality`: Control behavior when the triangulated point ends up behind one or more cameras.
|
||||
|
||||
See also: [https://github.com/borglab/gtsam/blob/develop/gtsam/slam/SmartFactorParams.h], [SmartFactorParams Notebook](SmartProjectionParams.ipynb).
|
||||
|
||||
## Linear Factor Representations
|
||||
|
||||
Depending on the `linearizationMode` specified in `gtsam.SmartProjectionParams`, the `linearize` method of a smart factor returns different types of `gtsam.GaussianFactor` objects, all representing the same underlying Schur complement system but optimized for different solvers:
|
||||
|
||||
### 1. `gtsam.RegularHessianFactor` (`HESSIAN` mode, default)
|
||||
|
||||
When a smart factor is linearized using the `HESSIAN` mode, it computes and returns a `gtsam.RegularHessianFactor`. In this specific context, this factor explicitly represents the dense **Schur complement** system obtained after marginalizing out the 3D landmark.
|
||||
|
||||
* **Defining Feature:** This factor requires that all involved camera variables have the *same, fixed dimension D* (e.g., $D=6$ for `gtsam.Pose3`), specified via a template parameter. This contrasts with the base `gtsam.HessianFactor` which handles variable dimensions.
|
||||
* **Stored Information:** It stores the blocks of the effective Hessian matrix $H_S$ (denoted internally as $G_{ij}$) and the effective information vector $\eta_S$ (denoted internally as $g_i$) that act *only* on the camera variables.
|
||||
* **Optimized Operators (Benefit for CG):** Crucially, this regularity allows `RegularHessianFactor` to provide highly optimized implementations for key linear algebra operations, especially the raw-memory (`double*`) version of `multiplyHessianAdd`. This operation computes the Hessian-vector product ($y \leftarrow y + \alpha H_S x$) extremely efficiently by leveraging `Eigen::Map` and fixed-size matrix operations, making it ideal for **iterative solvers like Conjugate Gradient (CG)** which rely heavily on this product. The base `HessianFactor`'s implementations are more general and typically involve more overhead.
|
||||
* **Formation Cost:** Creating this factor from a smart factor can still be computationally expensive (forming the Schur complement $H_S = F^T Q F$), especially for landmarks observed by many cameras.
|
||||
* **Solver Suitability:** While efficient for CG (using the `double*` methods), it is also suitable for **direct linear solvers** (like Cholesky or QR factorization) as it provides the explicit system matrix $H_S$.
|
||||
|
||||
Source: [https://github.com/borglab/gtsam/blob/develop/gtsam/linear/RegularHessianFactor.h].
|
||||
|
||||
### 2. `gtsam.RegularImplicitSchurFactor` (`IMPLICIT_SCHUR` mode)
|
||||
|
||||
This factor *stores* the ingredients $F_i$, $E_i$, $P$, and $b_i$ (or rather, their whitened versions) instead of the final Hessian $H_S$. Its key feature is an efficient `multiplyHessianAdd` method that computes the Hessian-vector product $y \leftarrow y + \alpha H_S x$ *without ever forming* the potentially large $H_S$ matrix. It computes this via:
|
||||
|
||||
```math
|
||||
v_i = F_i x_i \\
|
||||
w = E^T \Sigma^{-1} v \\
|
||||
d = P w \\
|
||||
u_i = E_i d \\
|
||||
y_i \leftarrow y_i + \alpha F_i^T \Sigma^{-1} (v_i - u_i)
|
||||
```
|
||||
This is highly efficient for iterative linear solvers like Conjugate Gradient (CG), which primarily rely on Hessian-vector products.
|
||||
|
||||
Source: [https://github.com/borglab/gtsam/blob/develop/gtsam/slam/RegularImplicitSchurFactor.h].
|
||||
|
||||
### 3. `gtsam.JacobianFactorQ` (`JACOBIAN_Q` mode)
|
||||
|
||||
This factor represents the Schur complement system as a Jacobian factor. It computes a projection matrix $Q_{proj} = \Sigma^{-1/2} (I - E P E^T \Sigma^{-1}) \Sigma^{-1/2}$ (where $\Sigma = \text{blkdiag}(\Sigma_i)$) and represents the error term $\| Q_{proj}^{1/2} (F \delta_C - b) \|^2$. It stores the projected Jacobians $A_i = Q_{proj, i}^{1/2} F_i$ and the projected right-hand side $b' = Q_{proj}^{1/2} b$.
|
||||
|
||||
Source: [https://github.com/borglab/gtsam/blob/develop/gtsam/slam/JacobianFactorQ.h].
|
||||
|
||||
### 4. `gtsam.JacobianFactorSVD` (`JACOBIAN_SVD` mode)
|
||||
|
||||
This factor uses the "Nullspace Trick". It computes $E_{null}$, a matrix whose columns form an orthonormal basis for the left nullspace of $E$ (i.e., $E_{null}^T E = 0$). Multiplying the original linearized system by $E_{null}^T \Sigma^{-1}$ yields a smaller system involving only $\delta_C$:
|
||||
|
||||
```math
|
||||
\| E_{null}^T \Sigma^{-1} F \delta_C - E_{null}^T \Sigma^{-1} b \|^2_{(E_{null}^T \Sigma^{-1} E_{null})^{-1}}
|
||||
```
|
||||
|
||||
The factor stores the projected Jacobian $A = E_{null}^T \Sigma^{-1} F$, the projected right-hand side $b' = E_{null}^T \Sigma^{-1} b$, and an appropriate noise model derived from $(E_{null}^T \Sigma^{-1} E_{null})^{-1}$. This method is mathematically equivalent to the Schur complement but can offer computational advantages in certain scenarios.
|
||||
|
||||
Source: [https://github.com/borglab/gtsam/blob/develop/gtsam/slam/JacobianFactorSVD.h].
|
||||
|
||||
## Conclusion
|
||||
|
||||
Smart factors provide a powerful mechanism for efficiently handling landmark-based constraints in SLAM and SfM. By implicitly marginalizing landmarks, they reduce the size of the state space and enable the use of specialized linear factor representations like `gtsam.RegularImplicitSchurFactor`, which are highly effective when combined with iterative solvers like Conjugate Gradient. Understanding the underlying mathematical connection to the Schur complement and the different linearization options allows users to choose the most appropriate configuration for their specific problem and solver.
|
|
@ -30,7 +30,7 @@
|
|||
"id": "colab-button-cell-fls",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"<a href=\"https://colab.research.google.com/github/borglab/gtsam/blob/develop/python/examples/EKF_SLAM.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
"<a href=\"https://colab.research.google.com/github/borglab/gtsam/blob/develop/python/gtsam/examples/EKF_SLAM.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -102,7 +102,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 3,
|
||||
"id": "params-code-fls",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
@ -145,7 +145,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 4,
|
||||
"id": "ground-truth-call-fls",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
|
@ -196,7 +196,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 5,
|
||||
"id": "smoother-init-code",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
|
@ -281,7 +281,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 6,
|
||||
"id": "smoother-loop-code",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
|
@ -295,7 +295,7 @@
|
|||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "65c6048ecb324a96ac367007900bb015",
|
||||
"model_id": "21976a68ac2846559c4b7b45e35d06e2",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
|
@ -402,7 +402,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 7,
|
||||
"id": "plotly-animation-call-code-fls",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
|
@ -418,7 +418,7 @@
|
|||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "7a03e84472a1445ebfd1e59f4a5b76ff",
|
||||
"model_id": "a0452bf011b144be88c1c3c1a17465b5",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
|
@ -441,6 +441,7 @@
|
|||
"data": {
|
||||
"application/vnd.plotly.v1+json": {
|
||||
"config": {
|
||||
"displayModeBar": false,
|
||||
"plotlyServerURL": "https://plot.ly"
|
||||
},
|
||||
"data": [
|
||||
|
@ -14455,6 +14456,7 @@
|
|||
}
|
||||
],
|
||||
"layout": {
|
||||
"autosize": true,
|
||||
"height": 600,
|
||||
"hovermode": "closest",
|
||||
"images": [
|
||||
|
@ -14478,6 +14480,12 @@
|
|||
"x": 0.52,
|
||||
"y": 1
|
||||
},
|
||||
"margin": {
|
||||
"b": 50,
|
||||
"l": 0,
|
||||
"r": 0,
|
||||
"t": 50
|
||||
},
|
||||
"shapes": [
|
||||
{
|
||||
"fillcolor": "rgba(255,0,255,0.2)",
|
||||
|
@ -14491,7 +14499,7 @@
|
|||
"yref": "y"
|
||||
}
|
||||
],
|
||||
"showlegend": true,
|
||||
"showlegend": false,
|
||||
"sliders": [
|
||||
{
|
||||
"active": 0,
|
||||
|
@ -16337,7 +16345,7 @@
|
|||
"direction": "left",
|
||||
"pad": {
|
||||
"r": 10,
|
||||
"t": 87
|
||||
"t": 20
|
||||
},
|
||||
"showactive": false,
|
||||
"type": "buttons",
|
||||
|
@ -16347,7 +16355,6 @@
|
|||
"yanchor": "top"
|
||||
}
|
||||
],
|
||||
"width": 1000,
|
||||
"xaxis": {
|
||||
"constrain": "domain",
|
||||
"domain": [
|
||||
|
@ -16390,7 +16397,7 @@
|
|||
")\n",
|
||||
"\n",
|
||||
"print(\"Displaying animation...\")\n",
|
||||
"fig.show()"
|
||||
"fig.show(config={'displayModeBar': False})"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
|
@ -353,7 +353,7 @@ def configure_figure_layout(
|
|||
type="buttons",
|
||||
showactive=False,
|
||||
direction="left",
|
||||
pad={"r": 10, "t": 87},
|
||||
pad={"r": 10, "t": 20},
|
||||
x=plot_domain[0],
|
||||
xanchor="left",
|
||||
y=0,
|
||||
|
@ -401,20 +401,21 @@ def configure_figure_layout(
|
|||
scaleratio=1,
|
||||
domain=[0, 1],
|
||||
),
|
||||
width=1000,
|
||||
autosize=True,
|
||||
height=600,
|
||||
hovermode="closest",
|
||||
updatemenus=updatemenus,
|
||||
sliders=sliders,
|
||||
shapes=initial_shapes, # Initial shapes (frame 0)
|
||||
images=([initial_image] if initial_image else []), # Initial image (frame 0)
|
||||
showlegend=True, # Keep legend for clarity
|
||||
legend=dict(
|
||||
x=plot_domain[0],
|
||||
y=1,
|
||||
traceorder="normal", # Position legend
|
||||
bgcolor="rgba(255,255,255,0.5)",
|
||||
),
|
||||
showlegend=False,
|
||||
margin=dict(l=0, r=0, t=50, b=50),
|
||||
)
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue