From b4c70f2ef94a1a19de68e9f8a1edc13da66a2210 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 3 Sep 2022 22:39:10 -0400 Subject: [PATCH] add code for simplified hybrid estimation --- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 59 ++++- gtsam/hybrid/tests/testHybridEstimation.cpp | 226 ++++++++++++++++++++ 2 files changed, 281 insertions(+), 4 deletions(-) create mode 100644 gtsam/hybrid/tests/testHybridEstimation.cpp diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index c031b9729..9dbbb0468 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -55,6 +55,52 @@ namespace gtsam { template class EliminateableFactorGraph; +std::string GTDKeyFormatter(const Key &key) { + constexpr size_t kMax_uchar_ = std::numeric_limits::max(); + constexpr size_t key_bits = sizeof(gtsam::Key) * 8; + constexpr size_t ch1_bits = sizeof(uint8_t) * 8; + constexpr size_t ch2_bits = sizeof(uint8_t) * 8; + constexpr size_t link_bits = sizeof(uint8_t) * 8; + constexpr size_t joint_bits = sizeof(uint8_t) * 8; + constexpr size_t time_bits = + key_bits - ch1_bits - ch2_bits - link_bits - joint_bits; + + constexpr gtsam::Key ch1_mask = gtsam::Key(kMax_uchar_) + << (key_bits - ch1_bits); + constexpr gtsam::Key ch2_mask = gtsam::Key(kMax_uchar_) + << (key_bits - ch1_bits - ch2_bits); + constexpr gtsam::Key link_mask = gtsam::Key(kMax_uchar_) + << (time_bits + joint_bits); + constexpr gtsam::Key joint_mask = gtsam::Key(kMax_uchar_) << time_bits; + constexpr gtsam::Key time_mask = + ~(ch1_mask | ch2_mask | link_mask | joint_mask); + + uint8_t c1_, c2_, link_idx_, joint_idx_; + uint64_t t_; + + c1_ = (uint8_t)((key & ch1_mask) >> (key_bits - ch1_bits)); + c2_ = (uint8_t)((key & ch2_mask) >> (key_bits - ch1_bits - ch2_bits)); + link_idx_ = (uint8_t)((key & link_mask) >> (time_bits + joint_bits)); + joint_idx_ = (uint8_t)((key & joint_mask) >> time_bits); + t_ = key & time_mask; + + std::string s = ""; + if (c1_ != 0) { + s += c1_; + } + if (c2_ != 0) { + s += c2_; + } + if (link_idx_ != kMax_uchar_) { + s += "[" + std::to_string((int)(link_idx_)) + "]"; + } + if (joint_idx_ != kMax_uchar_) { + s += "(" + std::to_string((int)(joint_idx_)) + ")"; + } + s += std::to_string(t_); + return s; +} + /* ************************************************************************ */ static GaussianMixtureFactor::Sum &addGaussian( GaussianMixtureFactor::Sum &sum, const GaussianFactor::shared_ptr &factor) { @@ -213,10 +259,10 @@ hybridElimination(const HybridGaussianFactorGraph &factors, result = EliminatePreferCholesky(graph, frontalKeys); if (keysOfEliminated.empty()) { - keysOfEliminated = - result.first->keys(); // Initialize the keysOfEliminated to be the + // Initialize the keysOfEliminated to be the keys of the + // eliminated GaussianConditional + keysOfEliminated = result.first->keys(); } - // keysOfEliminated of the GaussianConditional if (keysOfSeparator.empty()) { keysOfSeparator = result.second->keys(); } @@ -237,13 +283,18 @@ hybridElimination(const HybridGaussianFactorGraph &factors, // If there are no more continuous parents, then we should create here a // DiscreteFactor, with the error for each discrete choice. + // frontalKeys.print(); + // separatorFactors.print("", GTDKeyFormatter, [](const GaussianFactor::shared_ptr &factor) { factor->print(); return "";}); + // std::cout << "=====================================" << std::endl; if (keysOfSeparator.empty()) { - VectorValues empty_values; + VectorValues empty_values; //TODO(Varun) Shouldn't this be from the optimized leaf? auto factorError = [&](const GaussianFactor::shared_ptr &factor) { if (!factor) return 0.0; // TODO(fan): does this make sense? return exp(-factor->error(empty_values)); }; DecisionTree fdt(separatorFactors, factorError); + std::cout << "\n\nFactor Decision Tree" << std::endl; + fdt.print("", GTDKeyFormatter, [](double x) { return std::to_string(x); }); auto discreteFactor = boost::make_shared(discreteSeparator, fdt); diff --git a/gtsam/hybrid/tests/testHybridEstimation.cpp b/gtsam/hybrid/tests/testHybridEstimation.cpp new file mode 100644 index 000000000..1aa3060ce --- /dev/null +++ b/gtsam/hybrid/tests/testHybridEstimation.cpp @@ -0,0 +1,226 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testHybridEstimation.cpp + * @brief Unit tests for Hybrid Estimation + * @author Varun Agrawal + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Include for test suite +#include + +using namespace std; +using namespace gtsam; + +using noiseModel::Isotropic; +using symbol_shorthand::M; +using symbol_shorthand::X; + +class Robot { + size_t K_; + DiscreteKeys modes_; + HybridNonlinearFactorGraph nonlinearFactorGraph_; + HybridGaussianFactorGraph linearizedFactorGraph_; + Values linearizationPoint_; + + public: + Robot(size_t K, std::vector measurements) { + // Create DiscreteKeys for binary K modes + for (size_t k = 0; k < K; k++) { + modes_.emplace_back(M(k), 2); + } + + ////// Create hybrid factor graph. + // Add measurement factors + auto measurement_noise = noiseModel::Isotropic::Sigma(1, 1.0); + for (size_t k = 0; k < K; k++) { + nonlinearFactorGraph_.emplace_nonlinear>( + X(k), measurements.at(k), measurement_noise); + } + + // 2 noise models where moving has a higher covariance. + auto still_noise_model = noiseModel::Isotropic::Sigma(1, 1e-2); + auto moving_noise_model = noiseModel::Isotropic::Sigma(1, 1e2); + + // Add "motion models". + // The idea is that the robot has a higher "freedom" (aka higher covariance) + // for movement + using MotionModel = BetweenFactor; + + for (size_t k = 1; k < K; k++) { + KeyVector keys = {X(k - 1), X(k)}; + DiscreteKeys dkeys{modes_[k - 1]}; + auto still = boost::make_shared(X(k - 1), X(k), 0.0, + still_noise_model), + moving = boost::make_shared(X(k - 1), X(k), 0.0, + moving_noise_model); + std::vector> components = {still, moving}; + nonlinearFactorGraph_.emplace_hybrid(keys, dkeys, + components); + } + + // Create the linearization point. + for (size_t k = 0; k < K; k++) { + linearizationPoint_.insert(X(k), static_cast(k + 1)); + } + + linearizedFactorGraph_ = + nonlinearFactorGraph_.linearize(linearizationPoint_); + } + + void print() const { + nonlinearFactorGraph_.print(); + linearizationPoint_.print(); + linearizedFactorGraph_.print(); + } + + HybridValues optimize() const { + Ordering hybridOrdering = linearizedFactorGraph_.getHybridOrdering(); + HybridBayesNet::shared_ptr hybridBayesNet = + linearizedFactorGraph_.eliminateSequential(hybridOrdering); + + HybridValues delta = hybridBayesNet->optimize(); + return delta; + } +}; + +/* ****************************************************************************/ +/** + * I am trying to test if setting the hybrid mixture components to just differ + * in covariance makes sense. This is done by setting the "moving" covariance to + * be 1e2 while the "still" covariance is 1e-2. + */ +TEST(Estimation, StillRobot) { + size_t K = 2; + vector measurements = {0, 0}; + + Robot robot(K, measurements); + // robot.print(); + + HybridValues delta = robot.optimize(); + + delta.continuous().print("delta update:"); + + if (delta.discrete()[M(0)] == 0) { + std::cout << "The robot is stationary!" << std::endl; + } else { + std::cout << "The robot has moved!" << std::endl; + } + EXPECT_LONGS_EQUAL(0, delta.discrete()[M(0)]); +} + +TEST(Estimation, MovingRobot) { + size_t K = 2; + vector measurements = {0, 2}; + + Robot robot(K, measurements); + // robot.print(); + + HybridValues delta = robot.optimize(); + + delta.continuous().print("delta update:"); + + if (delta.discrete()[M(0)] == 0) { + std::cout << "The robot is stationary!" << std::endl; + } else { + std::cout << "The robot has moved!" << std::endl; + } + EXPECT_LONGS_EQUAL(1, delta.discrete()[M(0)]); +} + +/// A robot with a single leg. +class SingleLeg { + size_t K_; + DiscreteKeys modes_; + HybridNonlinearFactorGraph nonlinearFactorGraph_; + HybridGaussianFactorGraph linearizedFactorGraph_; + Values linearizationPoint_; + + public: + SingleLeg(size_t K, std::vector measurements) { + // Create DiscreteKeys for binary K modes + for (size_t k = 0; k < K; k++) { + modes_.emplace_back(M(k), 2); + } + + ////// Create hybrid factor graph. + // Add measurement factors + auto measurement_noise = noiseModel::Isotropic::Sigma(1, 1.0); + for (size_t k = 0; k < K; k++) { + nonlinearFactorGraph_.emplace_nonlinear>( + X(k), measurements.at(k), measurement_noise); + } + + // 2 noise models where moving has a higher covariance. + auto still_noise_model = noiseModel::Isotropic::Sigma(1, 1e-2); + auto moving_noise_model = noiseModel::Isotropic::Sigma(1, 1e2); + + // Add "motion models". + // The idea is that the robot has a higher "freedom" (aka higher covariance) + // for movement + using MotionModel = BetweenFactor; + + for (size_t k = 1; k < K; k++) { + KeyVector keys = {X(k - 1), X(k)}; + DiscreteKeys dkeys{modes_[k - 1]}; + auto still = boost::make_shared(X(k - 1), X(k), 0.0, + still_noise_model), + moving = boost::make_shared(X(k - 1), X(k), 0.0, + moving_noise_model); + std::vector> components = {still, moving}; + nonlinearFactorGraph_.emplace_hybrid(keys, dkeys, + components); + } + + // Create the linearization point. + for (size_t k = 0; k < K; k++) { + linearizationPoint_.insert(X(k), static_cast(k + 1)); + } + + linearizedFactorGraph_ = + nonlinearFactorGraph_.linearize(linearizationPoint_); + } + + void print() const { + nonlinearFactorGraph_.print(); + linearizationPoint_.print(); + linearizedFactorGraph_.print(); + } + + HybridValues optimize() const { + Ordering hybridOrdering = linearizedFactorGraph_.getHybridOrdering(); + HybridBayesNet::shared_ptr hybridBayesNet = + linearizedFactorGraph_.eliminateSequential(hybridOrdering); + + HybridValues delta = hybridBayesNet->optimize(); + return delta; + } +}; + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */