Template on size_t, Darshan's updates to cleanup comments, default coordinate to exponential, separate filter and demo specific functions, rename stateAction to operator *, fix brace initialization
parent
51e20eca58
commit
17bf752576
|
@ -0,0 +1,259 @@
|
|||
/**
|
||||
* @file ABC.h
|
||||
* @brief Core components for Attitude-Bias-Calibration systems
|
||||
*
|
||||
* This file contains fundamental components and utilities for the ABC system
|
||||
* based on the paper "Overcoming Bias: Equivariant Filter Design for Biased
|
||||
* Attitude Estimation with Online Calibration" by Fornasier et al.
|
||||
* Authors: Darshan Rajasekaran & Jennifer Oum
|
||||
*/
|
||||
#ifndef ABC_H
|
||||
#define ABC_H
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <gtsam/geometry/Rot3.h>
|
||||
#include <gtsam/geometry/Unit3.h>
|
||||
|
||||
namespace gtsam {
|
||||
namespace abc_eqf_lib {
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
//========================================================================
|
||||
// Utility Functions
|
||||
//========================================================================
|
||||
|
||||
//========================================================================
|
||||
// Utility Functions
|
||||
//========================================================================
|
||||
|
||||
/// Check if a vector is a unit vector
|
||||
|
||||
bool checkNorm(const Vector3& x, double tol = 1e-3);
|
||||
|
||||
/// Check if vector contains NaN values
|
||||
|
||||
bool hasNaN(const Vector3& vec);
|
||||
|
||||
/// Create a block diagonal matrix from two matrices
|
||||
|
||||
Matrix blockDiag(const Matrix& A, const Matrix& B);
|
||||
|
||||
/// Repeat a block matrix n times along the diagonal
|
||||
|
||||
Matrix repBlock(const Matrix& A, int n);
|
||||
|
||||
// Utility Functions Implementation
|
||||
|
||||
/**
|
||||
* @brief Verifies if a vector has unit norm within tolerance
|
||||
* @param x 3d vector
|
||||
* @param tol optional tolerance
|
||||
* @return Bool indicating that the vector norm is approximately 1
|
||||
*/
|
||||
bool checkNorm(const Vector3& x, double tol) {
|
||||
return abs(x.norm() - 1) < tol || std::isnan(x.norm());
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Checks if the input vector has any NaNs
|
||||
* @param vec A 3-D vector
|
||||
* @return true if present, false otherwise
|
||||
*/
|
||||
bool hasNaN(const Vector3& vec) {
|
||||
return std::isnan(vec[0]) || std::isnan(vec[1]) || std::isnan(vec[2]);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Creates a block diagonal matrix from input matrices
|
||||
* @param A Matrix A
|
||||
* @param B Matrix B
|
||||
* @return A single consolidated matrix with A in the top left and B in the
|
||||
* bottom right
|
||||
*/
|
||||
Matrix blockDiag(const Matrix& A, const Matrix& B) {
|
||||
if (A.size() == 0) {
|
||||
return B;
|
||||
} else if (B.size() == 0) {
|
||||
return A;
|
||||
} else {
|
||||
Matrix result(A.rows() + B.rows(), A.cols() + B.cols());
|
||||
result.setZero();
|
||||
result.block(0, 0, A.rows(), A.cols()) = A;
|
||||
result.block(A.rows(), A.cols(), B.rows(), B.cols()) = B;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Creates a block diagonal matrix by repeating a matrix 'n' times
|
||||
* @param A A matrix
|
||||
* @param n Number of times to be repeated
|
||||
* @return Block diag matrix with A repeated 'n' times
|
||||
*/
|
||||
Matrix repBlock(const Matrix& A, int n) {
|
||||
if (n <= 0) return Matrix();
|
||||
|
||||
Matrix result = A;
|
||||
for (int i = 1; i < n; i++) {
|
||||
result = blockDiag(result, A);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
//========================================================================
|
||||
// Core Data Types
|
||||
//========================================================================
|
||||
|
||||
/// Input struct for the Biased Attitude System
|
||||
|
||||
struct Input {
|
||||
Vector3 w; /// Angular velocity (3-vector)
|
||||
Matrix Sigma; /// Noise covariance (6x6 matrix)
|
||||
static Input random(); /// Random input
|
||||
Matrix3 W() const { /// Return w as a skew symmetric matrix
|
||||
return Rot3::Hat(w);
|
||||
}
|
||||
};
|
||||
|
||||
/// Measurement struct
|
||||
struct Measurement {
|
||||
Unit3 y; /// Measurement direction in sensor frame
|
||||
Unit3 d; /// Known direction in global frame
|
||||
Matrix3 Sigma; /// Covariance matrix of the measurement
|
||||
int cal_idx = -1; /// Calibration index (-1 for calibrated sensor)
|
||||
};
|
||||
|
||||
/// State class representing the state of the Biased Attitude System
|
||||
template <size_t N>
|
||||
class State {
|
||||
public:
|
||||
Rot3 R; // Attitude rotation matrix R
|
||||
Vector3 b; // Gyroscope bias b
|
||||
std::array<Rot3, N> S; // Sensor calibrations S
|
||||
|
||||
/// Constructor
|
||||
State(const Rot3& R = Rot3::Identity(), const Vector3& b = Vector3::Zero(),
|
||||
const std::array<Rot3, N>& S = std::array<Rot3, N>{})
|
||||
: R(R), b(b), S(S) {}
|
||||
|
||||
/// Identity function
|
||||
static State identity() {
|
||||
std::array<Rot3, N> S_id{};
|
||||
S_id.fill(Rot3::Identity());
|
||||
return State(Rot3::Identity(), Vector3::Zero(), S_id);
|
||||
}
|
||||
/**
|
||||
* Compute Local coordinates in the state relative to another state.
|
||||
* @param other The other state
|
||||
* @return Local coordinates in the tangent space
|
||||
*/
|
||||
Vector localCoordinates(const State<N>& other) const {
|
||||
Vector eps(6 + 3 * N);
|
||||
|
||||
// First 3 elements - attitude
|
||||
eps.head<3>() = Rot3::Logmap(R.between(other.R));
|
||||
// Next 3 elements - bias
|
||||
// Next 3 elements - bias
|
||||
eps.segment<3>(3) = other.b - b;
|
||||
|
||||
// Remaining elements - calibrations
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
eps.segment<3>(6 + 3 * i) = Rot3::Logmap(S[i].between(other.S[i]));
|
||||
}
|
||||
|
||||
return eps;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retract from tangent space back to the manifold
|
||||
* @param v Vector in the tangent space
|
||||
* @return New state
|
||||
*/
|
||||
State retract(const Vector& v) const {
|
||||
if (v.size() != static_cast<Eigen::Index>(6 + 3 * N)) {
|
||||
throw std::invalid_argument(
|
||||
"Vector size does not match state dimensions");
|
||||
}
|
||||
Rot3 newR = R * Rot3::Expmap(v.head<3>());
|
||||
Vector3 newB = b + v.segment<3>(3);
|
||||
std::array<Rot3, N> newS;
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
newS[i] = S[i] * Rot3::Expmap(v.segment<3>(6 + 3 * i));
|
||||
}
|
||||
return State(newR, newB, newS);
|
||||
}
|
||||
};
|
||||
|
||||
//========================================================================
|
||||
// Symmetry Group
|
||||
//========================================================================
|
||||
|
||||
/**
|
||||
* Symmetry group (SO(3) |x so(3)) x SO(3) x ... x SO(3)
|
||||
* Each element of the B list is associated with a calibration state
|
||||
*/
|
||||
template <size_t N>
|
||||
struct G {
|
||||
Rot3 A; /// First SO(3) element
|
||||
Matrix3 a; /// so(3) element (skew-symmetric matrix)
|
||||
std::array<Rot3, N> B; /// List of SO(3) elements for calibration
|
||||
|
||||
/// Initialize the symmetry group G
|
||||
G(const Rot3& A = Rot3::Identity(), const Matrix3& a = Matrix3::Zero(),
|
||||
const std::array<Rot3, N>& B = std::array<Rot3, N>{})
|
||||
: A(A), a(a), B(B) {}
|
||||
|
||||
/// Group multiplication
|
||||
G operator*(const G<N>& other) const {
|
||||
std::array<Rot3, N> newB;
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
newB[i] = B[i] * other.B[i];
|
||||
}
|
||||
return G(A * other.A, a + Rot3::Hat(A.matrix() * Rot3::Vee(other.a)), newB);
|
||||
}
|
||||
|
||||
/// Group inverse
|
||||
G inv() const {
|
||||
Matrix3 Ainv = A.inverse().matrix();
|
||||
std::array<Rot3, N> Binv;
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
Binv[i] = B[i].inverse();
|
||||
}
|
||||
return G(A.inverse(), -Rot3::Hat(Ainv * Rot3::Vee(a)), Binv);
|
||||
}
|
||||
|
||||
/// Identity element
|
||||
static G identity(int n) {
|
||||
std::array<Rot3, N> B;
|
||||
B.fill(Rot3::Identity());
|
||||
return G(Rot3::Identity(), Matrix3::Zero(), B);
|
||||
}
|
||||
|
||||
/// Exponential map of the tangent space elements to the group
|
||||
static G exp(const Vector& x) {
|
||||
if (x.size() != static_cast<Eigen::Index>(6 + 3 * N)) {
|
||||
throw std::invalid_argument("Vector size mismatch for group exponential");
|
||||
}
|
||||
Rot3 A = Rot3::Expmap(x.head<3>());
|
||||
Vector3 a_vee = Rot3::ExpmapDerivative(-x.head<3>()) * x.segment<3>(3);
|
||||
Matrix3 a = Rot3::Hat(a_vee);
|
||||
std::array<Rot3, N> B;
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
B[i] = Rot3::Expmap(x.segment<3>(6 + 3 * i));
|
||||
}
|
||||
return G(A, a, B);
|
||||
}
|
||||
};
|
||||
} // namespace abc_eqf_lib
|
||||
|
||||
template <size_t N>
|
||||
struct traits<abc_eqf_lib::State<N>>
|
||||
: internal::LieGroupTraits<abc_eqf_lib::State<N>> {};
|
||||
|
||||
template <size_t N>
|
||||
struct traits<abc_eqf_lib::G<N>> : internal::LieGroupTraits<abc_eqf_lib::G<N>> {
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
||||
#endif // ABC_H
|
File diff suppressed because it is too large
Load Diff
|
@ -3,17 +3,33 @@
|
|||
* @brief Demonstration of the full Attitude-Bias-Calibration Equivariant Filter
|
||||
*
|
||||
* This demo shows the Equivariant Filter (EqF) for attitude estimation
|
||||
* with both gyroscope bias and sensor extrinsic calibration, based on the paper:
|
||||
* "Overcoming Bias: Equivariant Filter Design for Biased Attitude Estimation
|
||||
* with Online Calibration" by Fornasier et al.
|
||||
* Authors: Darshan Rajasekaran & Jennifer Oum
|
||||
* with both gyroscope bias and sensor extrinsic calibration, based on the
|
||||
* paper: "Overcoming Bias: Equivariant Filter Design for Biased Attitude
|
||||
* Estimation with Online Calibration" by Fornasier et al. Authors: Darshan
|
||||
* Rajasekaran & Jennifer Oum
|
||||
*/
|
||||
|
||||
#include "ABC_EQF.h"
|
||||
|
||||
// Use namespace for convenience
|
||||
using namespace abc_eqf_lib;
|
||||
using namespace gtsam;
|
||||
constexpr size_t N = 1; // Number of calibration states
|
||||
using M = abc_eqf_lib::State<N>;
|
||||
using Group = abc_eqf_lib::G<N>;
|
||||
using EqFilter = abc_eqf_lib::EqF<N>;
|
||||
using gtsam::abc_eqf_lib::EqF;
|
||||
using gtsam::abc_eqf_lib::Input;
|
||||
using gtsam::abc_eqf_lib::Measurement;
|
||||
|
||||
/// Data structure for ground-truth, input and output data
|
||||
struct Data {
|
||||
M xi; /// Ground-truth state
|
||||
Input u; /// Input measurements
|
||||
std::vector<Measurement> y; /// Output measurements
|
||||
int n_meas; /// Number of measurements
|
||||
double t; /// Time
|
||||
double dt; /// Time step
|
||||
};
|
||||
|
||||
//========================================================================
|
||||
// Data Processing Functions
|
||||
|
@ -31,402 +47,398 @@ using namespace gtsam;
|
|||
* - std_w_x, std_w_y, std_w_z: Angular velocity measurement standard deviations
|
||||
* - std_b_x, std_b_y, std_b_z: Bias process noise standard deviations
|
||||
* - y_x_0, y_y_0, y_z_0, y_x_1, y_y_1, y_z_1: Direction measurements
|
||||
* - std_y_x_0, std_y_y_0, std_y_z_0, std_y_x_1, std_y_y_1, std_y_z_1: Direction measurement standard deviations
|
||||
* - std_y_x_0, std_y_y_0, std_y_z_0, std_y_x_1, std_y_y_1, std_y_z_1: Direction
|
||||
* measurement standard deviations
|
||||
* - d_x_0, d_y_0, d_z_0, d_x_1, d_y_1, d_z_1: Reference directions
|
||||
*
|
||||
* @param filename Path to the CSV file
|
||||
* @param startRow First row to load (default: 0)
|
||||
* @param maxRows Maximum number of rows to load (default: all)
|
||||
* @param downsample Downsample factor (default: 1, which means no downsampling)
|
||||
* @return Vector of Data objects
|
||||
*/
|
||||
std::vector<Data> loadDataFromCSV(const std::string& filename,
|
||||
int startRow = 0,
|
||||
int maxRows = -1,
|
||||
int downsample = 1);
|
||||
std::vector<Data> loadDataFromCSV(const std::string& filename, int startRow = 0,
|
||||
int maxRows = -1, int downsample = 1);
|
||||
|
||||
/**
|
||||
* Process data with EqF and print summary results
|
||||
* @param filter Initialized EqF filter
|
||||
* @param data_list Vector of Data objects to process
|
||||
* @param printInterval Progress indicator interval (used internally)
|
||||
*/
|
||||
void processDataWithEqF(EqF& filter, const std::vector<Data>& data_list, int printInterval = 10);
|
||||
/// Process data with EqF and print summary results
|
||||
void processDataWithEqF(EqFilter& filter, const std::vector<Data>& data_list,
|
||||
int printInterval = 10);
|
||||
|
||||
//========================================================================
|
||||
// Data Processing Functions Implementation
|
||||
//========================================================================
|
||||
|
||||
/**
|
||||
* @brief Loads the test data from the csv file
|
||||
* @param filename path to the csv file is specified
|
||||
* @param startRow First row to load based on csv, 0 by default
|
||||
* @param maxRows maximum rows to load, defaults to all rows
|
||||
* @param downsample Downsample factor, default 1
|
||||
* @return A list of data objects
|
||||
*/
|
||||
/*
|
||||
* Loads the test data from the csv file
|
||||
* startRow First row to load based on csv, 0 by default
|
||||
* maxRows maximum rows to load, defaults to all rows
|
||||
* downsample Downsample factor, default 1
|
||||
* A list of data objects
|
||||
*/
|
||||
|
||||
std::vector<Data> loadDataFromCSV(const std::string& filename, int startRow,
|
||||
int maxRows, int downsample) {
|
||||
std::vector<Data> data_list;
|
||||
std::ifstream file(filename);
|
||||
|
||||
if (!file.is_open()) {
|
||||
throw std::runtime_error("Failed to open file: " + filename);
|
||||
}
|
||||
|
||||
std::vector<Data> loadDataFromCSV(const std::string& filename,
|
||||
int startRow,
|
||||
int maxRows,
|
||||
int downsample) {
|
||||
std::vector<Data> data_list;
|
||||
std::ifstream file(filename);
|
||||
std::cout << "Loading data from " << filename << "..." << std::flush;
|
||||
|
||||
if (!file.is_open()) {
|
||||
throw std::runtime_error("Failed to open file: " + filename);
|
||||
}
|
||||
std::string line;
|
||||
int lineNumber = 0;
|
||||
int rowCount = 0;
|
||||
int errorCount = 0;
|
||||
double prevTime = 0.0;
|
||||
|
||||
std::cout << "Loading data from " << filename << "..." << std::flush;
|
||||
// Skip header
|
||||
std::getline(file, line);
|
||||
lineNumber++;
|
||||
|
||||
std::string line;
|
||||
int lineNumber = 0;
|
||||
int rowCount = 0;
|
||||
int errorCount = 0;
|
||||
double prevTime = 0.0;
|
||||
// Skip to startRow
|
||||
while (lineNumber < startRow && std::getline(file, line)) {
|
||||
lineNumber++;
|
||||
}
|
||||
|
||||
// Skip header
|
||||
std::getline(file, line);
|
||||
// Read data
|
||||
while (std::getline(file, line) && (maxRows == -1 || rowCount < maxRows)) {
|
||||
lineNumber++;
|
||||
|
||||
// Skip to startRow
|
||||
while (lineNumber < startRow && std::getline(file, line)) {
|
||||
lineNumber++;
|
||||
// Apply downsampling
|
||||
if ((lineNumber - startRow - 1) % downsample != 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Read data
|
||||
while (std::getline(file, line) && (maxRows == -1 || rowCount < maxRows)) {
|
||||
lineNumber++;
|
||||
std::istringstream ss(line);
|
||||
std::string token;
|
||||
std::vector<double> values;
|
||||
|
||||
// Apply downsampling
|
||||
if ((lineNumber - startRow - 1) % downsample != 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
std::istringstream ss(line);
|
||||
std::string token;
|
||||
std::vector<double> values;
|
||||
|
||||
// Parse line into values
|
||||
while (std::getline(ss, token, ',')) {
|
||||
try {
|
||||
values.push_back(std::stod(token));
|
||||
} catch (const std::exception& e) {
|
||||
errorCount++;
|
||||
values.push_back(0.0); // Use default value
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we have enough values
|
||||
if (values.size() < 39) {
|
||||
errorCount++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Extract values
|
||||
double t = values[0];
|
||||
double dt = (rowCount == 0) ? 0.0 : t - prevTime;
|
||||
prevTime = t;
|
||||
|
||||
// Create ground truth state
|
||||
Quaternion quat(values[1], values[2], values[3], values[4]); // w, x, y, z
|
||||
Rot3 R = Rot3(quat);
|
||||
|
||||
Vector3 b(values[5], values[6], values[7]);
|
||||
|
||||
Quaternion calQuat(values[8], values[9], values[10], values[11]); // w, x, y, z
|
||||
std::vector<Rot3> S = {Rot3(calQuat)};
|
||||
|
||||
State xi(R, b, S);
|
||||
|
||||
// Create input
|
||||
Vector3 w(values[12], values[13], values[14]);
|
||||
|
||||
// Create input covariance matrix (6x6)
|
||||
// First 3x3 block for angular velocity, second 3x3 block for bias process noise
|
||||
Matrix inputCov = Matrix::Zero(6, 6);
|
||||
inputCov(0, 0) = values[15] * values[15]; // std_w_x^2
|
||||
inputCov(1, 1) = values[16] * values[16]; // std_w_y^2
|
||||
inputCov(2, 2) = values[17] * values[17]; // std_w_z^2
|
||||
inputCov(3, 3) = values[18] * values[18]; // std_b_x^2
|
||||
inputCov(4, 4) = values[19] * values[19]; // std_b_y^2
|
||||
inputCov(5, 5) = values[20] * values[20]; // std_b_z^2
|
||||
|
||||
Input u{w, inputCov};
|
||||
|
||||
// Create measurements
|
||||
std::vector<Measurement> measurements;
|
||||
|
||||
// First measurement (calibrated sensor, cal_idx = 0)
|
||||
Vector3 y0(values[21], values[22], values[23]);
|
||||
Vector3 d0(values[33], values[34], values[35]);
|
||||
|
||||
// Normalize vectors if needed
|
||||
if (abs(y0.norm() - 1.0) > 1e-5) y0.normalize();
|
||||
if (abs(d0.norm() - 1.0) > 1e-5) d0.normalize();
|
||||
|
||||
// Measurement covariance
|
||||
Matrix3 covY0 = Matrix3::Zero();
|
||||
covY0(0, 0) = values[27] * values[27]; // std_y_x_0^2
|
||||
covY0(1, 1) = values[28] * values[28]; // std_y_y_0^2
|
||||
covY0(2, 2) = values[29] * values[29]; // std_y_z_0^2
|
||||
|
||||
// Create measurement
|
||||
measurements.push_back(Measurement{Unit3(y0), Unit3(d0), covY0, 0});
|
||||
|
||||
// Second measurement (calibrated sensor, cal_idx = -1)
|
||||
Vector3 y1(values[24], values[25], values[26]);
|
||||
Vector3 d1(values[36], values[37], values[38]);
|
||||
|
||||
// Normalize vectors if needed
|
||||
if (abs(y1.norm() - 1.0) > 1e-5) y1.normalize();
|
||||
if (abs(d1.norm() - 1.0) > 1e-5) d1.normalize();
|
||||
|
||||
// Measurement covariance
|
||||
Matrix3 covY1 = Matrix3::Zero();
|
||||
covY1(0, 0) = values[30] * values[30]; // std_y_x_1^2
|
||||
covY1(1, 1) = values[31] * values[31]; // std_y_y_1^2
|
||||
covY1(2, 2) = values[32] * values[32]; // std_y_z_1^2
|
||||
|
||||
// Create measurement
|
||||
measurements.push_back(Measurement{Unit3(y1), Unit3(d1), covY1, -1});
|
||||
|
||||
// Create Data object and add to list
|
||||
data_list.push_back(Data{xi, 1, u, measurements, 2, t, dt});
|
||||
|
||||
rowCount++;
|
||||
|
||||
// Show loading progress every 1000 rows
|
||||
if (rowCount % 1000 == 0) {
|
||||
std::cout << "." << std::flush;
|
||||
}
|
||||
// Parse line into values
|
||||
while (std::getline(ss, token, ',')) {
|
||||
try {
|
||||
values.push_back(std::stod(token));
|
||||
} catch (const std::exception& e) {
|
||||
errorCount++;
|
||||
values.push_back(0.0); // Use default value
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << " Done!" << std::endl;
|
||||
std::cout << "Loaded " << data_list.size() << " data points";
|
||||
|
||||
if (errorCount > 0) {
|
||||
std::cout << " (" << errorCount << " errors encountered)";
|
||||
// Check if we have enough values
|
||||
if (values.size() < 39) {
|
||||
errorCount++;
|
||||
continue;
|
||||
}
|
||||
|
||||
std::cout << std::endl;
|
||||
// Extract values
|
||||
double t = values[0];
|
||||
double dt = (rowCount == 0) ? 0.0 : t - prevTime;
|
||||
prevTime = t;
|
||||
|
||||
return data_list;
|
||||
}
|
||||
/**
|
||||
* @brief Takes in the data and runs an EqF on it and reports the results
|
||||
* @param filter Initialized EqF filter
|
||||
* @param data_list std::vector<Data>
|
||||
* @param printInterval Progress indicator
|
||||
* Prints the performance statstics like average error etc
|
||||
* Uses Rot3 between, logmap and rpy functions
|
||||
*/
|
||||
void processDataWithEqF(EqF& filter, const std::vector<Data>& data_list, int printInterval) {
|
||||
if (data_list.empty()) {
|
||||
std::cerr << "No data to process" << std::endl;
|
||||
return;
|
||||
// Create ground truth state
|
||||
Quaternion quat(values[1], values[2], values[3], values[4]); // w, x, y, z
|
||||
Rot3 R = Rot3(quat);
|
||||
|
||||
Vector3 b(values[5], values[6], values[7]);
|
||||
|
||||
Quaternion calQuat(values[8], values[9], values[10],
|
||||
values[11]); // w, x, y, z
|
||||
std::array<Rot3, N> S = {Rot3(calQuat)};
|
||||
|
||||
M xi(R, b, S);
|
||||
|
||||
// Create input
|
||||
Vector3 w(values[12], values[13], values[14]);
|
||||
|
||||
// Create input covariance matrix (6x6)
|
||||
// First 3x3 block for angular velocity, second 3x3 block for bias process
|
||||
// noise
|
||||
Matrix inputCov = Matrix::Zero(6, 6);
|
||||
inputCov(0, 0) = values[15] * values[15]; // std_w_x^2
|
||||
inputCov(1, 1) = values[16] * values[16]; // std_w_y^2
|
||||
inputCov(2, 2) = values[17] * values[17]; // std_w_z^2
|
||||
inputCov(3, 3) = values[18] * values[18]; // std_b_x^2
|
||||
inputCov(4, 4) = values[19] * values[19]; // std_b_y^2
|
||||
inputCov(5, 5) = values[20] * values[20]; // std_b_z^2
|
||||
|
||||
Input u{w, inputCov};
|
||||
|
||||
// Create measurements
|
||||
std::vector<Measurement> measurements;
|
||||
|
||||
// First measurement (calibrated sensor, cal_idx = 0)
|
||||
Vector3 y0(values[21], values[22], values[23]);
|
||||
Vector3 d0(values[33], values[34], values[35]);
|
||||
|
||||
// Normalize vectors if needed
|
||||
if (abs(y0.norm() - 1.0) > 1e-5) y0.normalize();
|
||||
if (abs(d0.norm() - 1.0) > 1e-5) d0.normalize();
|
||||
|
||||
// Measurement covariance
|
||||
Matrix3 covY0 = Matrix3::Zero();
|
||||
covY0(0, 0) = values[27] * values[27]; // std_y_x_0^2
|
||||
covY0(1, 1) = values[28] * values[28]; // std_y_y_0^2
|
||||
covY0(2, 2) = values[29] * values[29]; // std_y_z_0^2
|
||||
|
||||
// Create measurement
|
||||
measurements.push_back(Measurement{Unit3(y0), Unit3(d0), covY0, 0});
|
||||
|
||||
// Second measurement (calibrated sensor, cal_idx = -1)
|
||||
Vector3 y1(values[24], values[25], values[26]);
|
||||
Vector3 d1(values[36], values[37], values[38]);
|
||||
|
||||
// Normalize vectors if needed
|
||||
if (abs(y1.norm() - 1.0) > 1e-5) y1.normalize();
|
||||
if (abs(d1.norm() - 1.0) > 1e-5) d1.normalize();
|
||||
|
||||
// Measurement covariance
|
||||
Matrix3 covY1 = Matrix3::Zero();
|
||||
covY1(0, 0) = values[30] * values[30]; // std_y_x_1^2
|
||||
covY1(1, 1) = values[31] * values[31]; // std_y_y_1^2
|
||||
covY1(2, 2) = values[32] * values[32]; // std_y_z_1^2
|
||||
|
||||
// Create measurement
|
||||
measurements.push_back(Measurement{Unit3(y1), Unit3(d1), covY1, -1});
|
||||
|
||||
// Create Data object and add to list
|
||||
data_list.push_back(Data{xi, u, measurements, 2, t, dt});
|
||||
|
||||
rowCount++;
|
||||
|
||||
// Show loading progress every 1000 rows
|
||||
if (rowCount % 1000 == 0) {
|
||||
std::cout << "." << std::flush;
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << "Processing " << data_list.size() << " data points with EqF..." << std::endl;
|
||||
std::cout << " Done!" << std::endl;
|
||||
std::cout << "Loaded " << data_list.size() << " data points";
|
||||
|
||||
// Track performance metrics
|
||||
std::vector<double> att_errors;
|
||||
std::vector<double> bias_errors;
|
||||
std::vector<double> cal_errors;
|
||||
if (errorCount > 0) {
|
||||
std::cout << " (" << errorCount << " errors encountered)";
|
||||
}
|
||||
|
||||
// Track time for performance measurement
|
||||
auto start = std::chrono::high_resolution_clock::now();
|
||||
std::cout << std::endl;
|
||||
|
||||
int totalMeasurements = 0;
|
||||
int validMeasurements = 0;
|
||||
|
||||
// Define constant for converting radians to degrees
|
||||
const double RAD_TO_DEG = 180.0 / M_PI;
|
||||
|
||||
// Print a progress indicator
|
||||
int progressStep = data_list.size() / 10; // 10 progress updates
|
||||
if (progressStep < 1) progressStep = 1;
|
||||
|
||||
std::cout << "Progress: ";
|
||||
|
||||
for (size_t i = 0; i < data_list.size(); i++) {
|
||||
const Data& data = data_list[i];
|
||||
|
||||
// Propagate filter with current input and time step
|
||||
filter.propagation(data.u, data.dt);
|
||||
|
||||
// Process all measurements
|
||||
for (const auto& y : data.y) {
|
||||
totalMeasurements++;
|
||||
|
||||
// Skip invalid measurements
|
||||
Vector3 y_vec = y.y.unitVector();
|
||||
Vector3 d_vec = y.d.unitVector();
|
||||
if (std::isnan(y_vec[0]) || std::isnan(y_vec[1]) || std::isnan(y_vec[2]) ||
|
||||
std::isnan(d_vec[0]) || std::isnan(d_vec[1]) || std::isnan(d_vec[2])) {
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
filter.update(y);
|
||||
validMeasurements++;
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error updating at t=" << data.t
|
||||
<< ": " << e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
// Get current state estimate
|
||||
State estimate = filter.stateEstimate();
|
||||
|
||||
// Calculate errors
|
||||
Vector3 att_error = Rot3::Logmap(data.xi.R.between(estimate.R));
|
||||
Vector3 bias_error = estimate.b - data.xi.b;
|
||||
Vector3 cal_error = Vector3::Zero();
|
||||
if (!data.xi.S.empty() && !estimate.S.empty()) {
|
||||
cal_error = Rot3::Logmap(data.xi.S[0].between(estimate.S[0]));
|
||||
}
|
||||
|
||||
// Store errors
|
||||
att_errors.push_back(att_error.norm());
|
||||
bias_errors.push_back(bias_error.norm());
|
||||
cal_errors.push_back(cal_error.norm());
|
||||
|
||||
// Show progress dots
|
||||
if (i % progressStep == 0) {
|
||||
std::cout << "." << std::flush;
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << " Done!" << std::endl;
|
||||
|
||||
auto end = std::chrono::high_resolution_clock::now();
|
||||
std::chrono::duration<double> elapsed = end - start;
|
||||
|
||||
// Calculate average errors
|
||||
double avg_att_error = 0.0;
|
||||
double avg_bias_error = 0.0;
|
||||
double avg_cal_error = 0.0;
|
||||
|
||||
if (!att_errors.empty()) {
|
||||
avg_att_error = std::accumulate(att_errors.begin(), att_errors.end(), 0.0) / att_errors.size();
|
||||
avg_bias_error = std::accumulate(bias_errors.begin(), bias_errors.end(), 0.0) / bias_errors.size();
|
||||
avg_cal_error = std::accumulate(cal_errors.begin(), cal_errors.end(), 0.0) / cal_errors.size();
|
||||
}
|
||||
|
||||
// Calculate final errors from last data point
|
||||
const Data& final_data = data_list.back();
|
||||
State final_estimate = filter.stateEstimate();
|
||||
Vector3 final_att_error = Rot3::Logmap(final_data.xi.R.between(final_estimate.R));
|
||||
Vector3 final_bias_error = final_estimate.b - final_data.xi.b;
|
||||
Vector3 final_cal_error = Vector3::Zero();
|
||||
if (!final_data.xi.S.empty() && !final_estimate.S.empty()) {
|
||||
final_cal_error = Rot3::Logmap(final_data.xi.S[0].between(final_estimate.S[0]));
|
||||
}
|
||||
|
||||
// Print summary statistics
|
||||
std::cout << "\n=== Filter Performance Summary ===" << std::endl;
|
||||
std::cout << "Processing time: " << elapsed.count() << " seconds" << std::endl;
|
||||
std::cout << "Processed measurements: " << totalMeasurements << " (valid: " << validMeasurements << ")" << std::endl;
|
||||
|
||||
// Average errors
|
||||
std::cout << "\n-- Average Errors --" << std::endl;
|
||||
std::cout << "Attitude: " << (avg_att_error * RAD_TO_DEG) << "°" << std::endl;
|
||||
std::cout << "Bias: " << avg_bias_error << std::endl;
|
||||
std::cout << "Calibration: " << (avg_cal_error * RAD_TO_DEG) << "°" << std::endl;
|
||||
|
||||
// Final errors
|
||||
std::cout << "\n-- Final Errors --" << std::endl;
|
||||
std::cout << "Attitude: " << (final_att_error.norm() * RAD_TO_DEG) << "°" << std::endl;
|
||||
std::cout << "Bias: " << final_bias_error.norm() << std::endl;
|
||||
std::cout << "Calibration: " << (final_cal_error.norm() * RAD_TO_DEG) << "°" << std::endl;
|
||||
|
||||
// Print a brief comparison of final estimate vs ground truth
|
||||
std::cout << "\n-- Final State vs Ground Truth --" << std::endl;
|
||||
std::cout << "Attitude (RPY) - Estimate: "
|
||||
<< (final_estimate.R.rpy() * RAD_TO_DEG).transpose() << "° | Truth: "
|
||||
<< (final_data.xi.R.rpy() * RAD_TO_DEG).transpose() << "°" << std::endl;
|
||||
std::cout << "Bias - Estimate: " << final_estimate.b.transpose()
|
||||
<< " | Truth: " << final_data.xi.b.transpose() << std::endl;
|
||||
|
||||
if (!final_estimate.S.empty() && !final_data.xi.S.empty()) {
|
||||
std::cout << "Calibration (RPY) - Estimate: "
|
||||
<< (final_estimate.S[0].rpy() * RAD_TO_DEG).transpose() << "° | Truth: "
|
||||
<< (final_data.xi.S[0].rpy() * RAD_TO_DEG).transpose() << "°" << std::endl;
|
||||
}
|
||||
return data_list;
|
||||
}
|
||||
|
||||
/**
|
||||
* Main function for the EqF demo
|
||||
* @param argc Number of arguments
|
||||
* @param argv Array of arguments
|
||||
* @return Exit code
|
||||
*/
|
||||
/// Takes in the data and runs an EqF on it and reports the results
|
||||
void processDataWithEqF(EqFilter& filter, const std::vector<Data>& data_list,
|
||||
int printInterval) {
|
||||
if (data_list.empty()) {
|
||||
std::cerr << "No data to process" << std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
std::cout << "Processing " << data_list.size() << " data points with EqF..."
|
||||
<< std::endl;
|
||||
|
||||
// Track performance metrics
|
||||
std::vector<double> att_errors;
|
||||
std::vector<double> bias_errors;
|
||||
std::vector<double> cal_errors;
|
||||
|
||||
// Track time for performance measurement
|
||||
auto start = std::chrono::high_resolution_clock::now();
|
||||
|
||||
int totalMeasurements = 0;
|
||||
int validMeasurements = 0;
|
||||
|
||||
// Define constant for converting radians to degrees
|
||||
const double RAD_TO_DEG = 180.0 / M_PI;
|
||||
|
||||
// Print a progress indicator
|
||||
int progressStep = data_list.size() / 10; // 10 progress updates
|
||||
if (progressStep < 1) progressStep = 1;
|
||||
|
||||
std::cout << "Progress: ";
|
||||
|
||||
for (size_t i = 0; i < data_list.size(); i++) {
|
||||
const Data& data = data_list[i];
|
||||
|
||||
// Propagate filter with current input and time step
|
||||
filter.propagation(data.u, data.dt);
|
||||
|
||||
// Process all measurements
|
||||
for (const auto& y : data.y) {
|
||||
totalMeasurements++;
|
||||
|
||||
// Skip invalid measurements
|
||||
Vector3 y_vec = y.y.unitVector();
|
||||
Vector3 d_vec = y.d.unitVector();
|
||||
if (std::isnan(y_vec[0]) || std::isnan(y_vec[1]) ||
|
||||
std::isnan(y_vec[2]) || std::isnan(d_vec[0]) ||
|
||||
std::isnan(d_vec[1]) || std::isnan(d_vec[2])) {
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
filter.update(y);
|
||||
validMeasurements++;
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error updating at t=" << data.t << ": " << e.what()
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
// Get current state estimate
|
||||
M estimate = filter.stateEstimate();
|
||||
|
||||
// Calculate errors
|
||||
Vector3 att_error = Rot3::Logmap(data.xi.R.between(estimate.R));
|
||||
Vector3 bias_error = estimate.b - data.xi.b;
|
||||
Vector3 cal_error = Vector3::Zero();
|
||||
if (!data.xi.S.empty() && !estimate.S.empty()) {
|
||||
cal_error = Rot3::Logmap(data.xi.S[0].between(estimate.S[0]));
|
||||
}
|
||||
|
||||
// Store errors
|
||||
att_errors.push_back(att_error.norm());
|
||||
bias_errors.push_back(bias_error.norm());
|
||||
cal_errors.push_back(cal_error.norm());
|
||||
|
||||
// Show progress dots
|
||||
if (i % progressStep == 0) {
|
||||
std::cout << "." << std::flush;
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << " Done!" << std::endl;
|
||||
|
||||
auto end = std::chrono::high_resolution_clock::now();
|
||||
std::chrono::duration<double> elapsed = end - start;
|
||||
|
||||
// Calculate average errors
|
||||
double avg_att_error = 0.0;
|
||||
double avg_bias_error = 0.0;
|
||||
double avg_cal_error = 0.0;
|
||||
|
||||
if (!att_errors.empty()) {
|
||||
avg_att_error = std::accumulate(att_errors.begin(), att_errors.end(), 0.0) /
|
||||
att_errors.size();
|
||||
avg_bias_error =
|
||||
std::accumulate(bias_errors.begin(), bias_errors.end(), 0.0) /
|
||||
bias_errors.size();
|
||||
avg_cal_error = std::accumulate(cal_errors.begin(), cal_errors.end(), 0.0) /
|
||||
cal_errors.size();
|
||||
}
|
||||
|
||||
// Calculate final errors from last data point
|
||||
const Data& final_data = data_list.back();
|
||||
M final_estimate = filter.stateEstimate();
|
||||
Vector3 final_att_error =
|
||||
Rot3::Logmap(final_data.xi.R.between(final_estimate.R));
|
||||
Vector3 final_bias_error = final_estimate.b - final_data.xi.b;
|
||||
Vector3 final_cal_error = Vector3::Zero();
|
||||
if (!final_data.xi.S.empty() && !final_estimate.S.empty()) {
|
||||
final_cal_error =
|
||||
Rot3::Logmap(final_data.xi.S[0].between(final_estimate.S[0]));
|
||||
}
|
||||
|
||||
// Print summary statistics
|
||||
std::cout << "\n=== Filter Performance Summary ===" << std::endl;
|
||||
std::cout << "Processing time: " << elapsed.count() << " seconds"
|
||||
<< std::endl;
|
||||
std::cout << "Processed measurements: " << totalMeasurements
|
||||
<< " (valid: " << validMeasurements << ")" << std::endl;
|
||||
|
||||
// Average errors
|
||||
std::cout << "\n-- Average Errors --" << std::endl;
|
||||
std::cout << "Attitude: " << (avg_att_error * RAD_TO_DEG) << "°" << std::endl;
|
||||
std::cout << "Bias: " << avg_bias_error << std::endl;
|
||||
std::cout << "Calibration: " << (avg_cal_error * RAD_TO_DEG) << "°"
|
||||
<< std::endl;
|
||||
|
||||
// Final errors
|
||||
std::cout << "\n-- Final Errors --" << std::endl;
|
||||
std::cout << "Attitude: " << (final_att_error.norm() * RAD_TO_DEG) << "°"
|
||||
<< std::endl;
|
||||
std::cout << "Bias: " << final_bias_error.norm() << std::endl;
|
||||
std::cout << "Calibration: " << (final_cal_error.norm() * RAD_TO_DEG) << "°"
|
||||
<< std::endl;
|
||||
|
||||
// Print a brief comparison of final estimate vs ground truth
|
||||
std::cout << "\n-- Final State vs Ground Truth --" << std::endl;
|
||||
std::cout << "Attitude (RPY) - Estimate: "
|
||||
<< (final_estimate.R.rpy() * RAD_TO_DEG).transpose()
|
||||
<< "° | Truth: " << (final_data.xi.R.rpy() * RAD_TO_DEG).transpose()
|
||||
<< "°" << std::endl;
|
||||
std::cout << "Bias - Estimate: " << final_estimate.b.transpose()
|
||||
<< " | Truth: " << final_data.xi.b.transpose() << std::endl;
|
||||
|
||||
if (!final_estimate.S.empty() && !final_data.xi.S.empty()) {
|
||||
std::cout << "Calibration (RPY) - Estimate: "
|
||||
<< (final_estimate.S[0].rpy() * RAD_TO_DEG).transpose()
|
||||
<< "° | Truth: "
|
||||
<< (final_data.xi.S[0].rpy() * RAD_TO_DEG).transpose() << "°"
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
std::cout << "ABC-EqF: Attitude-Bias-Calibration Equivariant Filter Demo" << std::endl;
|
||||
std::cout << "==============================================================" << std::endl;
|
||||
std::cout << "ABC-EqF: Attitude-Bias-Calibration Equivariant Filter Demo"
|
||||
<< std::endl;
|
||||
std::cout << "=============================================================="
|
||||
<< std::endl;
|
||||
|
||||
try {
|
||||
// Parse command line options
|
||||
std::string csvFilePath;
|
||||
int maxRows = -1; // Process all rows by default
|
||||
int downsample = 1; // No downsampling by default
|
||||
try {
|
||||
// Parse command line options
|
||||
std::string csvFilePath;
|
||||
int maxRows = -1; // Process all rows by default
|
||||
int downsample = 1; // No downsampling by default
|
||||
|
||||
if (argc > 1) {
|
||||
csvFilePath = argv[1];
|
||||
} else {
|
||||
// Try to find the EQFdata file in the GTSAM examples directory
|
||||
try {
|
||||
csvFilePath = findExampleDataFile("EqFdata.csv");
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error: Could not find EqFdata.csv" << std::endl;
|
||||
std::cerr << "Usage: " << argv[0] << " [csv_file_path] [max_rows] [downsample]" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Optional command line parameters
|
||||
if (argc > 2) {
|
||||
maxRows = std::stoi(argv[2]);
|
||||
}
|
||||
|
||||
if (argc > 3) {
|
||||
downsample = std::stoi(argv[3]);
|
||||
}
|
||||
|
||||
// Load data from CSV file
|
||||
std::vector<Data> data = loadDataFromCSV(csvFilePath, 0, maxRows, downsample);
|
||||
|
||||
if (data.empty()) {
|
||||
std::cerr << "No data available to process. Exiting." << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Initialize the EqF filter with one calibration state
|
||||
int n_cal = 1;
|
||||
int n_sensors = 2;
|
||||
|
||||
// Initial covariance - larger values allow faster convergence
|
||||
Matrix initialSigma = Matrix::Identity(6 + 3*n_cal, 6 + 3*n_cal);
|
||||
initialSigma.diagonal().head<3>() = Vector3::Constant(0.1); // Attitude uncertainty
|
||||
initialSigma.diagonal().segment<3>(3) = Vector3::Constant(0.01); // Bias uncertainty
|
||||
initialSigma.diagonal().tail<3>() = Vector3::Constant(0.1); // Calibration uncertainty
|
||||
|
||||
// Create filter
|
||||
EqF filter(initialSigma, n_cal, n_sensors);
|
||||
|
||||
// Process data
|
||||
processDataWithEqF(filter, data);
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error: " << e.what() << std::endl;
|
||||
if (argc > 1) {
|
||||
csvFilePath = argv[1];
|
||||
} else {
|
||||
// Try to find the EQFdata file in the GTSAM examples directory
|
||||
try {
|
||||
csvFilePath = findExampleDataFile("EqFdata.csv");
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error: Could not find EqFdata.csv" << std::endl;
|
||||
std::cerr << "Usage: " << argv[0]
|
||||
<< " [csv_file_path] [max_rows] [downsample]" << std::endl;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << "\nEqF demonstration completed successfully." << std::endl;
|
||||
return 0;
|
||||
// Optional command line parameters
|
||||
if (argc > 2) {
|
||||
maxRows = std::stoi(argv[2]);
|
||||
}
|
||||
|
||||
if (argc > 3) {
|
||||
downsample = std::stoi(argv[3]);
|
||||
}
|
||||
|
||||
// Load data from CSV file
|
||||
std::vector<Data> data =
|
||||
loadDataFromCSV(csvFilePath, 0, maxRows, downsample);
|
||||
|
||||
if (data.empty()) {
|
||||
std::cerr << "No data available to process. Exiting." << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Initialize the EqF filter with one calibration state
|
||||
int n_sensors = 2;
|
||||
|
||||
// Initial covariance - larger values allow faster convergence
|
||||
Matrix initialSigma = Matrix::Identity(6 + 3 * N, 6 + 3 * N);
|
||||
initialSigma.diagonal().head<3>() =
|
||||
Vector3::Constant(0.1); // Attitude uncertainty
|
||||
initialSigma.diagonal().segment<3>(3) =
|
||||
Vector3::Constant(0.01); // Bias uncertainty
|
||||
initialSigma.diagonal().tail<3>() =
|
||||
Vector3::Constant(0.1); // Calibration uncertainty
|
||||
|
||||
// Create filter
|
||||
EqFilter filter(initialSigma, n_sensors);
|
||||
|
||||
// Process data
|
||||
processDataWithEqF(filter, data);
|
||||
|
||||
} catch (const std::exception& e) {
|
||||
std::cerr << "Error: " << e.what() << std::endl;
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::cout << "\nEqF demonstration completed successfully." << std::endl;
|
||||
return 0;
|
||||
}
|
Loading…
Reference in New Issue