From dc2c98383738b977b986caacd7468b85dc5d6890 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 29 Sep 2024 11:53:29 -0700 Subject: [PATCH 001/204] Document and fix bug in modes --- gtsam/hybrid/tests/Switching.h | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/gtsam/hybrid/tests/Switching.h b/gtsam/hybrid/tests/Switching.h index 82876fd2c..547facce9 100644 --- a/gtsam/hybrid/tests/Switching.h +++ b/gtsam/hybrid/tests/Switching.h @@ -114,11 +114,11 @@ inline std::pair> makeBinaryOrdering( return {new_order, levels}; } -/* *************************************************************************** - */ +/* ****************************************************************************/ using MotionModel = BetweenFactor; // Test fixture with switching network. +/// ϕ(X(0)) .. ϕ(X(k),X(k+1)) .. ϕ(X(k);z_k) .. ϕ(M(0)) .. ϕ(M(k),M(k+1)) struct Switching { size_t K; DiscreteKeys modes; @@ -140,8 +140,8 @@ struct Switching { : K(K) { using noiseModel::Isotropic; - // Create DiscreteKeys for binary K modes. - for (size_t k = 0; k < K; k++) { + // Create DiscreteKeys for K-1 binary modes. + for (size_t k = 0; k < K - 1; k++) { modes.emplace_back(M(k), 2); } @@ -153,25 +153,26 @@ struct Switching { } // Create hybrid factor graph. - // Add a prior on X(0). + + // Add a prior ϕ(X(0)) on X(0). nonlinearFactorGraph.emplace_shared>( X(0), measurements.at(0), Isotropic::Sigma(1, prior_sigma)); - // Add "motion models". + // Add "motion models" ϕ(X(k),X(k+1)). for (size_t k = 0; k < K - 1; k++) { auto motion_models = motionModels(k, between_sigma); nonlinearFactorGraph.emplace_shared(modes[k], motion_models); } - // Add measurement factors + // Add measurement factors ϕ(X(k);z_k). auto measurement_noise = Isotropic::Sigma(1, prior_sigma); for (size_t k = 1; k < K; k++) { nonlinearFactorGraph.emplace_shared>( X(k), measurements.at(k), measurement_noise); } - // Add "mode chain" + // Add "mode chain" ϕ(M(0)) ϕ(M(0),M(1)) ... ϕ(M(K-3),M(K-2)) addModeChain(&nonlinearFactorGraph, discrete_transition_prob); // Create the linearization point. @@ -179,8 +180,6 @@ struct Switching { linearizationPoint.insert(X(k), static_cast(k + 1)); } - // The ground truth is robot moving forward - // and one less than the linearization point linearizedFactorGraph = *nonlinearFactorGraph.linearize(linearizationPoint); } @@ -196,7 +195,7 @@ struct Switching { } /** - * @brief Add "mode chain" to HybridNonlinearFactorGraph from M(0) to M(K-2). + * @brief Add "mode chain" to HybridNonlinearFactorGraph from M(0) to M(K-1). * E.g. if K=4, we want M0, M1 and M2. * * @param fg The factor graph to which the mode chain is added. From 28a2cd347514a79c3cc9c6044fa451bd959832eb Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 29 Sep 2024 13:25:34 -0700 Subject: [PATCH 002/204] Check invariants --- gtsam/hybrid/tests/testHybridGaussianConditional.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp index 02163df9e..803d42f03 100644 --- a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp @@ -168,6 +168,9 @@ TEST(HybridGaussianConditional, ContinuousParents) { // Check that the continuous parent keys are correct: EXPECT(continuousParentKeys.size() == 1); EXPECT(continuousParentKeys[0] == X(0)); + + EXPECT(HybridGaussianConditional::CheckInvariants(hybrid_conditional, hv0)); + EXPECT(HybridGaussianConditional::CheckInvariants(hybrid_conditional, hv1)); } /* ************************************************************************* */ From 8675dc62df1277f84f6b689a2afbd64df5fb6b5e Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 29 Sep 2024 15:41:17 -0700 Subject: [PATCH 003/204] Throw if sampling conditions not satisfied --- gtsam/discrete/DiscreteConditional.cpp | 18 +++++++++++++----- gtsam/discrete/DiscreteConditional.h | 2 +- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/gtsam/discrete/DiscreteConditional.cpp b/gtsam/discrete/DiscreteConditional.cpp index 3f0c9f511..5ab0c59ec 100644 --- a/gtsam/discrete/DiscreteConditional.cpp +++ b/gtsam/discrete/DiscreteConditional.cpp @@ -259,8 +259,18 @@ size_t DiscreteConditional::argmax(const DiscreteValues& parentsValues) const { /* ************************************************************************** */ void DiscreteConditional::sampleInPlace(DiscreteValues* values) const { - assert(nrFrontals() == 1); - Key j = (firstFrontalKey()); + // throw if more than one frontal: + if (nrFrontals() != 1) { + throw std::invalid_argument( + "DiscreteConditional::sampleInPlace can only be called on single " + "variable conditionals"); + } + Key j = firstFrontalKey(); + // throw if values already contains j: + if (values->count(j) > 0) { + throw std::invalid_argument( + "DiscreteConditional::sampleInPlace: values already contains j"); + } size_t sampled = sample(*values); // Sample variable given parents (*values)[j] = sampled; // store result in partial solution } @@ -467,9 +477,7 @@ double DiscreteConditional::evaluate(const HybridValues& x) const { } /* ************************************************************************* */ -double DiscreteConditional::negLogConstant() const { - return 0.0; -} +double DiscreteConditional::negLogConstant() const { return 0.0; } /* ************************************************************************* */ diff --git a/gtsam/discrete/DiscreteConditional.h b/gtsam/discrete/DiscreteConditional.h index e16100d0a..f59e29285 100644 --- a/gtsam/discrete/DiscreteConditional.h +++ b/gtsam/discrete/DiscreteConditional.h @@ -168,7 +168,7 @@ class GTSAM_EXPORT DiscreteConditional static_cast(this)->print(s, formatter); } - /// Evaluate, just look up in AlgebraicDecisonTree + /// Evaluate, just look up in AlgebraicDecisionTree double evaluate(const DiscreteValues& values) const { return ADT::operator()(values); } From 80a4cd1bfca60ce28667f000dce397435a5eaeda Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 29 Sep 2024 15:42:02 -0700 Subject: [PATCH 004/204] Only sample if not provided --- gtsam/discrete/DiscreteBayesNet.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/gtsam/discrete/DiscreteBayesNet.cpp b/gtsam/discrete/DiscreteBayesNet.cpp index 1c5c81e45..56265b0a4 100644 --- a/gtsam/discrete/DiscreteBayesNet.cpp +++ b/gtsam/discrete/DiscreteBayesNet.cpp @@ -58,7 +58,12 @@ DiscreteValues DiscreteBayesNet::sample(DiscreteValues result) const { // sample each node in turn in topological sort order (parents first) for (auto it = std::make_reverse_iterator(end()); it != std::make_reverse_iterator(begin()); ++it) { - (*it)->sampleInPlace(&result); + const DiscreteConditional::shared_ptr& conditional = *it; + // Sample the conditional only if value for j not already in result + const Key j = conditional->firstFrontalKey(); + if (result.count(j) == 0) { + conditional->sampleInPlace(&result); + } } return result; } From 846c7a1a99b7f573dd7f6ec23c9d91c2e63e7f9a Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 29 Sep 2024 22:56:16 -0700 Subject: [PATCH 005/204] Make testable --- gtsam/hybrid/HybridGaussianFactorGraph.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.h b/gtsam/hybrid/HybridGaussianFactorGraph.h index 923f48e38..7e3aac663 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.h +++ b/gtsam/hybrid/HybridGaussianFactorGraph.h @@ -231,4 +231,9 @@ class GTSAM_EXPORT HybridGaussianFactorGraph GaussianFactorGraph operator()(const DiscreteValues& assignment) const; }; +// traits +template <> +struct traits + : public Testable {}; + } // namespace gtsam From 21171b3a9a59926d2bdaf7545f6b9bc56565f62a Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 29 Sep 2024 23:57:37 -0700 Subject: [PATCH 006/204] Fix equality --- gtsam/hybrid/HybridGaussianConditional.cpp | 9 ++++----- gtsam/hybrid/HybridGaussianFactor.cpp | 7 +++---- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index 1712e06a9..fb89f72fc 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -192,11 +192,10 @@ bool HybridGaussianConditional::equals(const HybridFactor &lf, // Check the base and the factors: return BaseFactor::equals(*e, tol) && - conditionals_.equals(e->conditionals_, - [tol](const GaussianConditional::shared_ptr &f1, - const GaussianConditional::shared_ptr &f2) { - return f1->equals(*(f2), tol); - }); + conditionals_.equals( + e->conditionals_, [tol](const auto &f1, const auto &f2) { + return (!f1 && !f2) || (f1 && f2 && f1->equals(*f2, tol)); + }); } /* *******************************************************************************/ diff --git a/gtsam/hybrid/HybridGaussianFactor.cpp b/gtsam/hybrid/HybridGaussianFactor.cpp index 7f8e808bf..6a9fc5c35 100644 --- a/gtsam/hybrid/HybridGaussianFactor.cpp +++ b/gtsam/hybrid/HybridGaussianFactor.cpp @@ -154,10 +154,9 @@ bool HybridGaussianFactor::equals(const HybridFactor &lf, double tol) const { // Check the base and the factors: return Base::equals(*e, tol) && - factors_.equals(e->factors_, - [tol](const sharedFactor &f1, const sharedFactor &f2) { - return f1->equals(*f2, tol); - }); + factors_.equals(e->factors_, [tol](const auto &f1, const auto &f2) { + return (!f1 && !f2) || (f1 && f2 && f1->equals(*f2, tol)); + }); } /* *******************************************************************************/ From d042359a997949255a4ddd2e34e1d7d01bd8eba7 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 29 Sep 2024 23:57:48 -0700 Subject: [PATCH 007/204] choose method --- gtsam/hybrid/HybridGaussianConditional.cpp | 2 +- gtsam/hybrid/HybridGaussianConditional.h | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index fb89f72fc..1db13e95b 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -168,7 +168,7 @@ size_t HybridGaussianConditional::nrComponents() const { } /* *******************************************************************************/ -GaussianConditional::shared_ptr HybridGaussianConditional::operator()( +GaussianConditional::shared_ptr HybridGaussianConditional::choose( const DiscreteValues &discreteValues) const { auto &ptr = conditionals_(discreteValues); if (!ptr) return nullptr; diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index 827b7f309..68c63e7bd 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -159,9 +159,15 @@ class GTSAM_EXPORT HybridGaussianConditional /// @{ /// @brief Return the conditional Gaussian for the given discrete assignment. - GaussianConditional::shared_ptr operator()( + GaussianConditional::shared_ptr choose( const DiscreteValues &discreteValues) const; + /// @brief Syntactic sugar for choose. + GaussianConditional::shared_ptr operator()( + const DiscreteValues &discreteValues) const { + return choose(discreteValues); + } + /// Returns the total number of continuous components size_t nrComponents() const; From 5b909e3c2918a74228d5ee3ad50f7d4afe13cbd6 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 29 Sep 2024 23:58:21 -0700 Subject: [PATCH 008/204] Clarify choose method --- gtsam/hybrid/HybridBayesNet.cpp | 2 +- gtsam/hybrid/HybridBayesNet.h | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/gtsam/hybrid/HybridBayesNet.cpp b/gtsam/hybrid/HybridBayesNet.cpp index 6e96afb25..3c77e3f9a 100644 --- a/gtsam/hybrid/HybridBayesNet.cpp +++ b/gtsam/hybrid/HybridBayesNet.cpp @@ -206,7 +206,7 @@ GaussianBayesNet HybridBayesNet::choose( for (auto &&conditional : *this) { if (auto gm = conditional->asHybrid()) { // If conditional is hybrid, select based on assignment. - gbn.push_back((*gm)(assignment)); + gbn.push_back(gm->choose(assignment)); } else if (auto gc = conditional->asGaussian()) { // If continuous only, add Gaussian conditional. gbn.push_back(gc); diff --git a/gtsam/hybrid/HybridBayesNet.h b/gtsam/hybrid/HybridBayesNet.h index 22a43f3bd..62688e8b2 100644 --- a/gtsam/hybrid/HybridBayesNet.h +++ b/gtsam/hybrid/HybridBayesNet.h @@ -127,6 +127,8 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { * @brief Get the Gaussian Bayes Net which corresponds to a specific discrete * value assignment. * + * @note Any pure discrete factors are ignored. + * * @param assignment The discrete value assignment for the discrete keys. * @return GaussianBayesNet */ From 12349b9201d2fbdce6e245d8acd0da5acf85f24a Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 30 Sep 2024 00:32:06 -0700 Subject: [PATCH 009/204] Helper method can be static --- gtsam/hybrid/HybridGaussianFactor.cpp | 17 ++++++++--------- gtsam/hybrid/HybridGaussianFactor.h | 4 ---- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactor.cpp b/gtsam/hybrid/HybridGaussianFactor.cpp index 6a9fc5c35..b04db4977 100644 --- a/gtsam/hybrid/HybridGaussianFactor.cpp +++ b/gtsam/hybrid/HybridGaussianFactor.cpp @@ -212,16 +212,15 @@ GaussianFactorGraphTree HybridGaussianFactor::asGaussianFactorGraphTree() } /* *******************************************************************************/ -double HybridGaussianFactor::potentiallyPrunedComponentError( - const sharedFactor &gf, const VectorValues &values) const { +/// Helper method to compute the error of a component. +static double PotentiallyPrunedComponentError( + const GaussianFactor::shared_ptr &gf, const VectorValues &values) { // Check if valid pointer if (gf) { return gf->error(values); } else { - // If not valid, pointer, it means this component was pruned, - // so we return maximum error. - // This way the negative exponential will give - // a probability value close to 0.0. + // If nullptr this component was pruned, so we return maximum error. This + // way the negative exponential will give a probability value close to 0.0. return std::numeric_limits::max(); } } @@ -230,8 +229,8 @@ double HybridGaussianFactor::potentiallyPrunedComponentError( AlgebraicDecisionTree HybridGaussianFactor::errorTree( const VectorValues &continuousValues) const { // functor to convert from sharedFactor to double error value. - auto errorFunc = [this, &continuousValues](const sharedFactor &gf) { - return this->potentiallyPrunedComponentError(gf, continuousValues); + auto errorFunc = [&continuousValues](const sharedFactor &gf) { + return PotentiallyPrunedComponentError(gf, continuousValues); }; DecisionTree error_tree(factors_, errorFunc); return error_tree; @@ -241,7 +240,7 @@ AlgebraicDecisionTree HybridGaussianFactor::errorTree( double HybridGaussianFactor::error(const HybridValues &values) const { // Directly index to get the component, no need to build the whole tree. const sharedFactor gf = factors_(values.discrete()); - return potentiallyPrunedComponentError(gf, values.continuous()); + return PotentiallyPrunedComponentError(gf, values.continuous()); } } // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactor.h b/gtsam/hybrid/HybridGaussianFactor.h index f23d065b6..e5a575409 100644 --- a/gtsam/hybrid/HybridGaussianFactor.h +++ b/gtsam/hybrid/HybridGaussianFactor.h @@ -189,10 +189,6 @@ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { */ static Factors augment(const FactorValuePairs &factors); - /// Helper method to compute the error of a component. - double potentiallyPrunedComponentError( - const sharedFactor &gf, const VectorValues &continuousValues) const; - /// Helper struct to assist private constructor below. struct ConstructorHelper; From 44fb786b7aac9227424c785465e98dda037f18f6 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 30 Sep 2024 01:38:06 -0700 Subject: [PATCH 010/204] Much more comprehensive tests --- gtsam/hybrid/tests/testHybridBayesNet.cpp | 118 +++++++++++++++++++--- 1 file changed, 102 insertions(+), 16 deletions(-) diff --git a/gtsam/hybrid/tests/testHybridBayesNet.cpp b/gtsam/hybrid/tests/testHybridBayesNet.cpp index b555f6bd9..79979ac83 100644 --- a/gtsam/hybrid/tests/testHybridBayesNet.cpp +++ b/gtsam/hybrid/tests/testHybridBayesNet.cpp @@ -62,32 +62,117 @@ TEST(HybridBayesNet, Add) { } /* ****************************************************************************/ -// Test evaluate for a pure discrete Bayes net P(Asia). +// Test API for a pure discrete Bayes net P(Asia). TEST(HybridBayesNet, EvaluatePureDiscrete) { HybridBayesNet bayesNet; - bayesNet.emplace_shared(Asia, "4/6"); - HybridValues values; - values.insert(asiaKey, 0); - EXPECT_DOUBLES_EQUAL(0.4, bayesNet.evaluate(values), 1e-9); + const auto pAsia = std::make_shared(Asia, "4/6"); + bayesNet.push_back(pAsia); + HybridValues zero{{}, {{asiaKey, 0}}}, one{{}, {{asiaKey, 1}}}; + + // choose + GaussianBayesNet empty; + EXPECT(assert_equal(empty, bayesNet.choose(zero.discrete()), 1e-9)); + + // evaluate + EXPECT_DOUBLES_EQUAL(0.4, bayesNet.evaluate(zero), 1e-9); + EXPECT_DOUBLES_EQUAL(0.4, bayesNet(zero), 1e-9); + + // optimize + EXPECT(assert_equal(one, bayesNet.optimize())); + EXPECT(assert_equal(VectorValues{}, bayesNet.optimize(one.discrete()))); + + // sample + std::mt19937_64 rng(42); + EXPECT(assert_equal(zero, bayesNet.sample(&rng))); + EXPECT(assert_equal(one, bayesNet.sample(one, &rng))); + EXPECT(assert_equal(zero, bayesNet.sample(zero, &rng))); + + // error + EXPECT_DOUBLES_EQUAL(-log(0.4), bayesNet.error(zero), 1e-9); + EXPECT_DOUBLES_EQUAL(-log(0.6), bayesNet.error(one), 1e-9); + + // logProbability + EXPECT_DOUBLES_EQUAL(log(0.4), bayesNet.logProbability(zero), 1e-9); + EXPECT_DOUBLES_EQUAL(log(0.6), bayesNet.logProbability(one), 1e-9); + + // toFactorGraph + HybridGaussianFactorGraph expectedFG{pAsia}, fg = bayesNet.toFactorGraph({}); + EXPECT(assert_equal(expectedFG, fg)); + + // prune, imperative :-( + EXPECT(assert_equal(bayesNet, bayesNet.prune(2))); + EXPECT_LONGS_EQUAL(1, bayesNet.prune(1).at(0)->size()); } /* ****************************************************************************/ // Test creation of a tiny hybrid Bayes net. TEST(HybridBayesNet, Tiny) { - auto bn = tiny::createHybridBayesNet(); - EXPECT_LONGS_EQUAL(3, bn.size()); + auto bayesNet = tiny::createHybridBayesNet(); // P(z|x,mode)P(x)P(mode) + EXPECT_LONGS_EQUAL(3, bayesNet.size()); const VectorValues vv{{Z(0), Vector1(5.0)}, {X(0), Vector1(5.0)}}; - auto fg = bn.toFactorGraph(vv); + HybridValues zero{vv, {{M(0), 0}}}, one{vv, {{M(0), 1}}}; + + // Check Invariants for components + HybridGaussianConditional::shared_ptr hgc = bayesNet.at(0)->asHybrid(); + GaussianConditional::shared_ptr gc0 = hgc->choose(zero.discrete()), + gc1 = hgc->choose(one.discrete()); + GaussianConditional::shared_ptr px = bayesNet.at(1)->asGaussian(); + GaussianConditional::CheckInvariants(*gc0, vv); + GaussianConditional::CheckInvariants(*gc1, vv); + GaussianConditional::CheckInvariants(*px, vv); + HybridGaussianConditional::CheckInvariants(*hgc, zero); + HybridGaussianConditional::CheckInvariants(*hgc, one); + + // choose + GaussianBayesNet expectedChosen; + expectedChosen.push_back(gc0); + expectedChosen.push_back(px); + auto chosen0 = bayesNet.choose(zero.discrete()); + auto chosen1 = bayesNet.choose(one.discrete()); + EXPECT(assert_equal(expectedChosen, chosen0, 1e-9)); + + // logProbability + const double logP0 = chosen0.logProbability(vv) + log(0.4); // 0.4 is prior + const double logP1 = chosen1.logProbability(vv) + log(0.6); // 0.6 is prior + EXPECT_DOUBLES_EQUAL(logP0, bayesNet.logProbability(zero), 1e-9); + EXPECT_DOUBLES_EQUAL(logP1, bayesNet.logProbability(one), 1e-9); + + // evaluate + EXPECT_DOUBLES_EQUAL(exp(logP0), bayesNet.evaluate(zero), 1e-9); + + // optimize + EXPECT(assert_equal(one, bayesNet.optimize())); + EXPECT(assert_equal(chosen0.optimize(), bayesNet.optimize(zero.discrete()))); + + // sample + std::mt19937_64 rng(42); + EXPECT(assert_equal({{M(0), 1}}, bayesNet.sample(&rng).discrete())); + + // error + const double error0 = chosen0.error(vv) + gc0->negLogConstant() - + px->negLogConstant() - log(0.4); + const double error1 = chosen1.error(vv) + gc1->negLogConstant() - + px->negLogConstant() - log(0.6); + EXPECT_DOUBLES_EQUAL(error0, bayesNet.error(zero), 1e-9); + EXPECT_DOUBLES_EQUAL(error1, bayesNet.error(one), 1e-9); + EXPECT_DOUBLES_EQUAL(error0 + logP0, error1 + logP1, 1e-9); + + // toFactorGraph + auto fg = bayesNet.toFactorGraph({{Z(0), Vector1(5.0)}}); EXPECT_LONGS_EQUAL(3, fg.size()); // Check that the ratio of probPrime to evaluate is the same for all modes. std::vector ratio(2); - for (size_t mode : {0, 1}) { - const HybridValues hv{vv, {{M(0), mode}}}; - ratio[mode] = std::exp(-fg.error(hv)) / bn.evaluate(hv); - } + ratio[0] = std::exp(-fg.error(zero)) / bayesNet.evaluate(zero); + ratio[1] = std::exp(-fg.error(one)) / bayesNet.evaluate(one); EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); + + // prune, imperative :-( + auto pruned = bayesNet.prune(1); + EXPECT_LONGS_EQUAL(1, pruned.at(0)->asHybrid()->nrComponents()); + EXPECT(!pruned.equals(bayesNet)); + } /* ****************************************************************************/ @@ -223,12 +308,15 @@ TEST(HybridBayesNet, Optimize) { /* ****************************************************************************/ // Test Bayes net error TEST(HybridBayesNet, Pruning) { + // Create switching network with three continuous variables and two discrete: + // ϕ(x0) ϕ(x0,x1,m0) ϕ(x1,x2,m1) ϕ(x0;z0) ϕ(x1;z1) ϕ(x2;z2) ϕ(m0) ϕ(m0,m1) Switching s(3); HybridBayesNet::shared_ptr posterior = s.linearizedFactorGraph.eliminateSequential(); EXPECT_LONGS_EQUAL(5, posterior->size()); + // Optimize HybridValues delta = posterior->optimize(); auto actualTree = posterior->evaluate(delta.continuous()); @@ -254,7 +342,6 @@ TEST(HybridBayesNet, Pruning) { logProbability += posterior->at(0)->asHybrid()->logProbability(hybridValues); logProbability += posterior->at(1)->asHybrid()->logProbability(hybridValues); logProbability += posterior->at(2)->asHybrid()->logProbability(hybridValues); - // NOTE(dellaert): the discrete errors were not added in logProbability tree! logProbability += posterior->at(3)->asDiscrete()->logProbability(hybridValues); logProbability += @@ -316,10 +403,9 @@ TEST(HybridBayesNet, UpdateDiscreteConditionals) { #endif // regression - DiscreteKeys dkeys{{M(0), 2}, {M(1), 2}, {M(2), 2}}; DecisionTreeFactor::ADT potentials( - dkeys, std::vector{0, 0, 0, 0.505145423, 0, 1, 0, 0.494854577}); - DiscreteConditional expected_discrete_conditionals(1, dkeys, potentials); + s.modes, std::vector{0, 0, 0, 0.505145423, 0, 1, 0, 0.494854577}); + DiscreteConditional expected_discrete_conditionals(1, s.modes, potentials); // Prune! posterior->prune(maxNrLeaves); From 3cd816341ccbc2b0b3e617d9723431c1c4775ec9 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 30 Sep 2024 12:49:29 -0700 Subject: [PATCH 011/204] refactor printErrors --- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 92 ++++++++-------------- 1 file changed, 34 insertions(+), 58 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index c107aa8a8..8a2a7fd15 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -74,6 +74,32 @@ const Ordering HybridOrdering(const HybridGaussianFactorGraph &graph) { index, KeyVector(discrete_keys.begin(), discrete_keys.end()), true); } +/* ************************************************************************ */ +static void printFactor(const std::shared_ptr &factor, + const DiscreteValues &assignment, + const KeyFormatter &keyFormatter) { + if (auto hgf = std::dynamic_pointer_cast(factor)) { + hgf->operator()(assignment) + ->print("HybridGaussianFactor, component:", keyFormatter); + } else if (auto gf = std::dynamic_pointer_cast(factor)) { + factor->print("GaussianFactor:\n", keyFormatter); + } else if (auto df = std::dynamic_pointer_cast(factor)) { + factor->print("DiscreteFactor:\n", keyFormatter); + } else if (auto hc = std::dynamic_pointer_cast(factor)) { + if (hc->isContinuous()) { + factor->print("GaussianConditional:\n", keyFormatter); + } else if (hc->isDiscrete()) { + factor->print("DiscreteConditional:\n", keyFormatter); + } else { + hc->asHybrid() + ->choose(assignment) + ->print("HybridConditional, component:\n", keyFormatter); + } + } else { + factor->print("Unknown factor type\n", keyFormatter); + } +} + /* ************************************************************************ */ void HybridGaussianFactorGraph::printErrors( const HybridValues &values, const std::string &str, @@ -83,69 +109,19 @@ void HybridGaussianFactorGraph::printErrors( &printCondition) const { std::cout << str << "size: " << size() << std::endl << std::endl; - std::stringstream ss; - for (size_t i = 0; i < factors_.size(); i++) { auto &&factor = factors_[i]; - std::cout << "Factor " << i << ": "; - - // Clear the stringstream - ss.str(std::string()); - - if (auto hgf = std::dynamic_pointer_cast(factor)) { - if (factor == nullptr) { - std::cout << "nullptr" - << "\n"; - } else { - hgf->operator()(values.discrete())->print(ss.str(), keyFormatter); - std::cout << "error = " << factor->error(values) << std::endl; - } - } else if (auto hc = std::dynamic_pointer_cast(factor)) { - if (factor == nullptr) { - std::cout << "nullptr" - << "\n"; - } else { - if (hc->isContinuous()) { - factor->print(ss.str(), keyFormatter); - std::cout << "error = " << hc->asGaussian()->error(values) << "\n"; - } else if (hc->isDiscrete()) { - factor->print(ss.str(), keyFormatter); - std::cout << "error = " << hc->asDiscrete()->error(values.discrete()) - << "\n"; - } else { - // Is hybrid - auto conditionalComponent = - hc->asHybrid()->operator()(values.discrete()); - conditionalComponent->print(ss.str(), keyFormatter); - std::cout << "error = " << conditionalComponent->error(values) - << "\n"; - } - } - } else if (auto gf = std::dynamic_pointer_cast(factor)) { - const double errorValue = (factor != nullptr ? gf->error(values) : .0); - if (!printCondition(factor.get(), errorValue, i)) - continue; // User-provided filter did not pass - - if (factor == nullptr) { - std::cout << "nullptr" - << "\n"; - } else { - factor->print(ss.str(), keyFormatter); - std::cout << "error = " << errorValue << "\n"; - } - } else if (auto df = std::dynamic_pointer_cast(factor)) { - if (factor == nullptr) { - std::cout << "nullptr" - << "\n"; - } else { - factor->print(ss.str(), keyFormatter); - std::cout << "error = " << df->error(values.discrete()) << std::endl; - } - - } else { + if (factor == nullptr) { + std::cout << "Factor " << i << ": nullptr\n"; continue; } + const double errorValue = factor->error(values); + if (!printCondition(factor.get(), errorValue, i)) + continue; // User-provided filter did not pass + // Print the factor + std::cout << "Factor " << i << ", error = " << errorValue << "\n"; + printFactor(factor, values.discrete(), keyFormatter); std::cout << "\n"; } std::cout.flush(); From e15c44ec5c711f9efae695c0d03fa3b8ed24bbb8 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 29 Sep 2024 22:56:41 -0700 Subject: [PATCH 012/204] Make prune functional --- gtsam/hybrid/HybridGaussianConditional.cpp | 50 ++++++++++++++++++++-- gtsam/hybrid/HybridGaussianConditional.h | 4 +- 2 files changed, 50 insertions(+), 4 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index 1db13e95b..3c5130f42 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -342,13 +342,57 @@ HybridGaussianConditional::prunerFunc(const DecisionTreeFactor &discreteProbs) { } /* *******************************************************************************/ -void HybridGaussianConditional::prune(const DecisionTreeFactor &discreteProbs) { +HybridGaussianConditional::shared_ptr HybridGaussianConditional::prune( + const DecisionTreeFactor &discreteProbs) const { + auto discreteProbsKeySet = DiscreteKeysAsSet(discreteProbs.discreteKeys()); + auto hybridGaussianCondKeySet = DiscreteKeysAsSet(this->discreteKeys()); + // Functional which loops over all assignments and create a set of // GaussianConditionals - auto pruner = prunerFunc(discreteProbs); + auto pruner = [&](const Assignment &choices, + const GaussianConditional::shared_ptr &conditional) + -> GaussianConditional::shared_ptr { + // typecast so we can use this to get probability value + const DiscreteValues values(choices); + + // Case where the hybrid gaussian conditional has the same + // discrete keys as the decision tree. + if (hybridGaussianCondKeySet == discreteProbsKeySet) { + if (discreteProbs(values) == 0.0) { + // empty aka null pointer + std::shared_ptr null; + return null; + } else { + return conditional; + } + } else { + std::vector set_diff; + std::set_difference( + discreteProbsKeySet.begin(), discreteProbsKeySet.end(), + hybridGaussianCondKeySet.begin(), hybridGaussianCondKeySet.end(), + std::back_inserter(set_diff)); + + const std::vector assignments = + DiscreteValues::CartesianProduct(set_diff); + for (const DiscreteValues &assignment : assignments) { + DiscreteValues augmented_values(values); + augmented_values.insert(assignment); + + // If any one of the sub-branches are non-zero, + // we need this conditional. + if (discreteProbs(augmented_values) > 0.0) { + return conditional; + } + } + // If we are here, it means that all the sub-branches are 0, + // so we prune. + return nullptr; + } + }; auto pruned_conditionals = conditionals_.apply(pruner); - conditionals_.root_ = pruned_conditionals.root_; + return std::make_shared(discreteKeys(), + pruned_conditionals); } /* *******************************************************************************/ diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index 68c63e7bd..ede748b16 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -225,8 +225,10 @@ class GTSAM_EXPORT HybridGaussianConditional * `discreteProbs`. * * @param discreteProbs A pruned set of probabilities for the discrete keys. + * @return Shared pointer to possibly a pruned HybridGaussianConditional */ - void prune(const DecisionTreeFactor &discreteProbs); + HybridGaussianConditional::shared_ptr prune( + const DecisionTreeFactor &discreteProbs) const; /// @} From d2880e991396967851f84be83b8ed56f3af8207f Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 29 Sep 2024 23:07:36 -0700 Subject: [PATCH 013/204] Kill obsolete prunerFunc --- gtsam/hybrid/HybridBayesNet.cpp | 112 ++++---------------------------- gtsam/hybrid/HybridBayesNet.h | 9 ++- 2 files changed, 20 insertions(+), 101 deletions(-) diff --git a/gtsam/hybrid/HybridBayesNet.cpp b/gtsam/hybrid/HybridBayesNet.cpp index 3c77e3f9a..703c657cf 100644 --- a/gtsam/hybrid/HybridBayesNet.cpp +++ b/gtsam/hybrid/HybridBayesNet.cpp @@ -37,94 +37,6 @@ bool HybridBayesNet::equals(const This &bn, double tol) const { return Base::equals(bn, tol); } -/* ************************************************************************* */ -/** - * @brief Helper function to get the pruner functional. - * - * @param prunedDiscreteProbs The prob. decision tree of only discrete keys. - * @param conditional Conditional to prune. Used to get full assignment. - * @return std::function &, double)> - */ -std::function &, double)> prunerFunc( - const DecisionTreeFactor &prunedDiscreteProbs, - const HybridConditional &conditional) { - // Get the discrete keys as sets for the decision tree - // and the hybrid Gaussian conditional. - std::set discreteProbsKeySet = - DiscreteKeysAsSet(prunedDiscreteProbs.discreteKeys()); - std::set conditionalKeySet = - DiscreteKeysAsSet(conditional.discreteKeys()); - - auto pruner = [prunedDiscreteProbs, discreteProbsKeySet, conditionalKeySet]( - const Assignment &choices, - double probability) -> double { - // This corresponds to 0 probability - double pruned_prob = 0.0; - - // typecast so we can use this to get probability value - DiscreteValues values(choices); - // Case where the hybrid Gaussian conditional has the same - // discrete keys as the decision tree. - if (conditionalKeySet == discreteProbsKeySet) { - if (prunedDiscreteProbs(values) == 0) { - return pruned_prob; - } else { - return probability; - } - } else { - // Due to branch merging (aka pruning) in DecisionTree, it is possible we - // get a `values` which doesn't have the full set of keys. - std::set valuesKeys; - for (auto kvp : values) { - valuesKeys.insert(kvp.first); - } - std::set conditionalKeys; - for (auto kvp : conditionalKeySet) { - conditionalKeys.insert(kvp.first); - } - // If true, then values is missing some keys - if (conditionalKeys != valuesKeys) { - // Get the keys present in conditionalKeys but not in valuesKeys - std::vector missing_keys; - std::set_difference(conditionalKeys.begin(), conditionalKeys.end(), - valuesKeys.begin(), valuesKeys.end(), - std::back_inserter(missing_keys)); - // Insert missing keys with a default assignment. - for (auto missing_key : missing_keys) { - values[missing_key] = 0; - } - } - - // Now we generate the full assignment by enumerating - // over all keys in the prunedDiscreteProbs. - // First we find the differing keys - std::vector set_diff; - std::set_difference(discreteProbsKeySet.begin(), - discreteProbsKeySet.end(), conditionalKeySet.begin(), - conditionalKeySet.end(), - std::back_inserter(set_diff)); - - // Now enumerate over all assignments of the differing keys - const std::vector assignments = - DiscreteValues::CartesianProduct(set_diff); - for (const DiscreteValues &assignment : assignments) { - DiscreteValues augmented_values(values); - augmented_values.insert(assignment); - - // If any one of the sub-branches are non-zero, - // we need this probability. - if (prunedDiscreteProbs(augmented_values) > 0.0) { - return probability; - } - } - // If we are here, it means that all the sub-branches are 0, - // so we prune. - return pruned_prob; - } - }; - return pruner; -} - /* ************************************************************************* */ DecisionTreeFactor HybridBayesNet::pruneDiscreteConditionals( size_t maxNrLeaves) { @@ -164,9 +76,10 @@ DecisionTreeFactor HybridBayesNet::pruneDiscreteConditionals( } /* ************************************************************************* */ -HybridBayesNet HybridBayesNet::prune(size_t maxNrLeaves) { +HybridBayesNet HybridBayesNet::prune(size_t maxNrLeaves) const { + HybridBayesNet copy(*this); DecisionTreeFactor prunedDiscreteProbs = - this->pruneDiscreteConditionals(maxNrLeaves); + copy.pruneDiscreteConditionals(maxNrLeaves); /* To prune, we visitWith every leaf in the HybridGaussianConditional. * For each leaf, using the assignment we can check the discrete decision tree @@ -179,13 +92,10 @@ HybridBayesNet HybridBayesNet::prune(size_t maxNrLeaves) { // Go through all the conditionals in the // Bayes Net and prune them as per prunedDiscreteProbs. - for (auto &&conditional : *this) { + for (auto &&conditional : copy) { if (auto gm = conditional->asHybrid()) { // Make a copy of the hybrid Gaussian conditional and prune it! - auto prunedHybridGaussianConditional = - std::make_shared(*gm); - prunedHybridGaussianConditional->prune( - prunedDiscreteProbs); // imperative :-( + auto prunedHybridGaussianConditional = gm->prune(prunedDiscreteProbs); // Type-erase and add to the pruned Bayes Net fragment. prunedBayesNetFragment.push_back(prunedHybridGaussianConditional); @@ -336,10 +246,14 @@ AlgebraicDecisionTree HybridBayesNet::logProbability( }); } else if (auto dc = conditional->asDiscrete()) { // If discrete, add the discrete logProbability in the right branch - result = result.apply( - [dc](const Assignment &assignment, double leaf_value) { - return leaf_value + dc->logProbability(DiscreteValues(assignment)); - }); + if (result.nrLeaves() == 1) { + result = dc->errorTree().apply([](double error) { return -error; }); + } else { + result = result.apply([dc](const Assignment &assignment, + double leaf_value) { + return leaf_value + dc->logProbability(DiscreteValues(assignment)); + }); + } } } diff --git a/gtsam/hybrid/HybridBayesNet.h b/gtsam/hybrid/HybridBayesNet.h index 62688e8b2..9052a7a16 100644 --- a/gtsam/hybrid/HybridBayesNet.h +++ b/gtsam/hybrid/HybridBayesNet.h @@ -201,8 +201,13 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { */ HybridValues sample() const; - /// Prune the Hybrid Bayes Net such that we have at most maxNrLeaves leaves. - HybridBayesNet prune(size_t maxNrLeaves); + /** + * @brief Prune the Bayes Net such that we have at most maxNrLeaves leaves. + * + * @param maxNrLeaves Continuous values at which to compute the error. + * @return A pruned HybridBayesNet + */ + HybridBayesNet prune(size_t maxNrLeaves) const; /** * @brief Compute conditional error for each discrete assignment, From 28f5ed0a6edca9e00b8b74054f7f34b2a2da0caf Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 29 Sep 2024 23:08:00 -0700 Subject: [PATCH 014/204] Inline lambda --- gtsam/hybrid/HybridGaussianConditional.cpp | 66 ++-------------------- gtsam/hybrid/HybridGaussianConditional.h | 11 ---- 2 files changed, 4 insertions(+), 73 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index 3c5130f42..478f94f18 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -288,59 +288,6 @@ std::set DiscreteKeysAsSet(const DiscreteKeys &discreteKeys) { return s; } -/* ************************************************************************* */ -std::function &, const GaussianConditional::shared_ptr &)> -HybridGaussianConditional::prunerFunc(const DecisionTreeFactor &discreteProbs) { - // Get the discrete keys as sets for the decision tree - // and the hybrid gaussian conditional. - auto discreteProbsKeySet = DiscreteKeysAsSet(discreteProbs.discreteKeys()); - auto hybridGaussianCondKeySet = DiscreteKeysAsSet(this->discreteKeys()); - - auto pruner = [discreteProbs, discreteProbsKeySet, hybridGaussianCondKeySet]( - const Assignment &choices, - const GaussianConditional::shared_ptr &conditional) - -> GaussianConditional::shared_ptr { - // typecast so we can use this to get probability value - const DiscreteValues values(choices); - - // Case where the hybrid gaussian conditional has the same - // discrete keys as the decision tree. - if (hybridGaussianCondKeySet == discreteProbsKeySet) { - if (discreteProbs(values) == 0.0) { - // empty aka null pointer - std::shared_ptr null; - return null; - } else { - return conditional; - } - } else { - std::vector set_diff; - std::set_difference( - discreteProbsKeySet.begin(), discreteProbsKeySet.end(), - hybridGaussianCondKeySet.begin(), hybridGaussianCondKeySet.end(), - std::back_inserter(set_diff)); - - const std::vector assignments = - DiscreteValues::CartesianProduct(set_diff); - for (const DiscreteValues &assignment : assignments) { - DiscreteValues augmented_values(values); - augmented_values.insert(assignment); - - // If any one of the sub-branches are non-zero, - // we need this conditional. - if (discreteProbs(augmented_values) > 0.0) { - return conditional; - } - } - // If we are here, it means that all the sub-branches are 0, - // so we prune. - return nullptr; - } - }; - return pruner; -} - /* *******************************************************************************/ HybridGaussianConditional::shared_ptr HybridGaussianConditional::prune( const DecisionTreeFactor &discreteProbs) const { @@ -358,14 +305,10 @@ HybridGaussianConditional::shared_ptr HybridGaussianConditional::prune( // Case where the hybrid gaussian conditional has the same // discrete keys as the decision tree. if (hybridGaussianCondKeySet == discreteProbsKeySet) { - if (discreteProbs(values) == 0.0) { - // empty aka null pointer - std::shared_ptr null; - return null; - } else { - return conditional; - } + return (discreteProbs(values) == 0.0) ? nullptr : conditional; } else { + // TODO(Frank): It might be faster to "choose" based on values + // and then check whether the resulting tree has non-nullptrs. std::vector set_diff; std::set_difference( discreteProbsKeySet.begin(), discreteProbsKeySet.end(), @@ -384,8 +327,7 @@ HybridGaussianConditional::shared_ptr HybridGaussianConditional::prune( return conditional; } } - // If we are here, it means that all the sub-branches are 0, - // so we prune. + // If we are here, it means that all the sub-branches are 0, so we prune. return nullptr; } }; diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index ede748b16..8f3aa6778 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -243,17 +243,6 @@ class GTSAM_EXPORT HybridGaussianConditional /// Convert to a DecisionTree of Gaussian factor graphs. GaussianFactorGraphTree asGaussianFactorGraphTree() const; - /** - * @brief Get the pruner function from discrete probabilities. - * - * @param discreteProbs The probabilities of only discrete keys. - * @return std::function &, const GaussianConditional::shared_ptr &)> - */ - std::function &, const GaussianConditional::shared_ptr &)> - prunerFunc(const DecisionTreeFactor &prunedProbabilities); - /// Check whether `given` has values for all frontal keys. bool allFrontalsGiven(const VectorValues &given) const; From 38ed6096145337853f5ccbd80a76d77fab9b64a4 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 29 Sep 2024 23:15:19 -0700 Subject: [PATCH 015/204] Fix pruning in iSAM --- gtsam/hybrid/HybridBayesTree.cpp | 8 +++++++- gtsam/inference/BayesTreeCliqueBase.h | 3 +++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/gtsam/hybrid/HybridBayesTree.cpp b/gtsam/hybrid/HybridBayesTree.cpp index 9aee6dcf8..0766f452b 100644 --- a/gtsam/hybrid/HybridBayesTree.cpp +++ b/gtsam/hybrid/HybridBayesTree.cpp @@ -26,6 +26,10 @@ #include #include +#include + +#include "gtsam/hybrid/HybridConditional.h" + namespace gtsam { // Instantiate base class @@ -207,7 +211,9 @@ void HybridBayesTree::prune(const size_t maxNrLeaves) { if (conditional->isHybrid()) { auto hybridGaussianCond = conditional->asHybrid(); - hybridGaussianCond->prune(parentData.prunedDiscreteProbs); + // Imperative + clique->conditional() = std::make_shared( + hybridGaussianCond->prune(parentData.prunedDiscreteProbs)); } return parentData; } diff --git a/gtsam/inference/BayesTreeCliqueBase.h b/gtsam/inference/BayesTreeCliqueBase.h index a7b1cf06c..adffa2f14 100644 --- a/gtsam/inference/BayesTreeCliqueBase.h +++ b/gtsam/inference/BayesTreeCliqueBase.h @@ -140,6 +140,9 @@ namespace gtsam { /** Access the conditional */ const sharedConditional& conditional() const { return conditional_; } + /** Write access to the conditional */ + sharedConditional& conditional() { return conditional_; } + /// Return true if this clique is the root of a Bayes tree. inline bool isRoot() const { return parent_.expired(); } From a898ad3661e14c76d338476bfe46dfa94c7de8ed Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 29 Sep 2024 11:54:54 -0700 Subject: [PATCH 016/204] discretePosterior --- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 25 ++++++++++---- gtsam/hybrid/HybridGaussianFactorGraph.h | 32 +++++++++++------- .../tests/testHybridGaussianFactorGraph.cpp | 33 +++++++++++-------- 3 files changed, 59 insertions(+), 31 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 8a2a7fd15..8e6e95c17 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -42,7 +43,6 @@ #include #include #include -#include #include #include #include @@ -342,14 +342,20 @@ static std::shared_ptr createHybridGaussianFactor( return std::make_shared(discreteSeparator, newFactors); } +/* *******************************************************************************/ +/// Get the discrete keys from the HybridGaussianFactorGraph as DiscreteKeys. +static auto GetDiscreteKeys = + [](const HybridGaussianFactorGraph &hfg) -> DiscreteKeys { + const std::set discreteKeySet = hfg.discreteKeys(); + return {discreteKeySet.begin(), discreteKeySet.end()}; +}; + /* *******************************************************************************/ std::pair> HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { // Since we eliminate all continuous variables first, // the discrete separator will be *all* the discrete keys. - const std::set keysForDiscreteVariables = discreteKeys(); - DiscreteKeys discreteSeparator(keysForDiscreteVariables.begin(), - keysForDiscreteVariables.end()); + DiscreteKeys discreteSeparator = GetDiscreteKeys(*this); // Collect all the factors to create a set of Gaussian factor graphs in a // decision tree indexed by all discrete keys involved. @@ -525,14 +531,21 @@ double HybridGaussianFactorGraph::probPrime(const HybridValues &values) const { } /* ************************************************************************ */ -AlgebraicDecisionTree HybridGaussianFactorGraph::probPrime( +DecisionTreeFactor HybridGaussianFactorGraph::probPrime( const VectorValues &continuousValues) const { AlgebraicDecisionTree error_tree = this->errorTree(continuousValues); AlgebraicDecisionTree prob_tree = error_tree.apply([](double error) { // NOTE: The 0.5 term is handled by each factor return exp(-error); }); - return prob_tree; + return {GetDiscreteKeys(*this), prob_tree}; +} + +/* ************************************************************************ */ +DiscreteConditional HybridGaussianFactorGraph::discretePosterior( + const VectorValues &continuousValues) const { + auto p = probPrime(continuousValues); + return {p.size(), p}; } /* ************************************************************************ */ diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.h b/gtsam/hybrid/HybridGaussianFactorGraph.h index 7e3aac663..5d19b4f83 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.h +++ b/gtsam/hybrid/HybridGaussianFactorGraph.h @@ -18,6 +18,7 @@ #pragma once +#include #include #include #include @@ -187,17 +188,6 @@ class GTSAM_EXPORT HybridGaussianFactorGraph AlgebraicDecisionTree errorTree( const VectorValues& continuousValues) const; - /** - * @brief Compute unnormalized probability \f$ P(X | M, Z) \f$ - * for each discrete assignment, and return as a tree. - * - * @param continuousValues Continuous values at which to compute the - * probability. - * @return AlgebraicDecisionTree - */ - AlgebraicDecisionTree probPrime( - const VectorValues& continuousValues) const; - /** * @brief Compute the unnormalized posterior probability for a continuous * vector values given a specific assignment. @@ -206,6 +196,26 @@ class GTSAM_EXPORT HybridGaussianFactorGraph */ double probPrime(const HybridValues& values) const; + /** + * @brief Compute unnormalized probability \f$ P(X | M, Z) \f$ + * for each discrete assignment, and return as a tree. + * + * @param continuousValues Continuous values at which to compute probability. + * @return DecisionTreeFactor + */ + DecisionTreeFactor probPrime(const VectorValues& continuousValues) const; + + /** + * @brief Computer posterior P(M|X=x) when all continuous values X are given. + * This is very efficient as this simply probPrime normalized into a + * conditional. + * + * @param continuousValues Continuous values x to condition on. + * @return DecisionTreeFactor + */ + DiscreteConditional discretePosterior( + const VectorValues& continuousValues) const; + /** * @brief Create a decision tree of factor graphs out of this hybrid factor * graph. diff --git a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp index 6aef60386..8ba1eb762 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp @@ -603,29 +603,34 @@ TEST(HybridGaussianFactorGraph, ErrorAndProbPrime) { /* ****************************************************************************/ // Test hybrid gaussian factor graph error and unnormalized probabilities TEST(HybridGaussianFactorGraph, ErrorAndProbPrimeTree) { + // Create switching network with three continuous variables and two discrete: + // ϕ(x0) ϕ(x0,x1,m0) ϕ(x1,x2,m1) ϕ(x0;z0) ϕ(x1;z1) ϕ(x2;z2) ϕ(m0) ϕ(m0,m1) Switching s(3); - HybridGaussianFactorGraph graph = s.linearizedFactorGraph; + const HybridGaussianFactorGraph &graph = s.linearizedFactorGraph; - HybridBayesNet::shared_ptr hybridBayesNet = graph.eliminateSequential(); + const HybridBayesNet::shared_ptr hybridBayesNet = graph.eliminateSequential(); - HybridValues delta = hybridBayesNet->optimize(); - auto error_tree = graph.errorTree(delta.continuous()); + const HybridValues delta = hybridBayesNet->optimize(); - std::vector discrete_keys = {{M(0), 2}, {M(1), 2}}; + // regression test for errorTree std::vector leaves = {0.9998558, 0.4902432, 0.5193694, 0.0097568}; - AlgebraicDecisionTree expected_error(discrete_keys, leaves); - - // regression - EXPECT(assert_equal(expected_error, error_tree, 1e-7)); + AlgebraicDecisionTree expectedErrors(s.modes, leaves); + const auto error_tree = graph.errorTree(delta.continuous()); + EXPECT(assert_equal(expectedErrors, error_tree, 1e-7)); + // regression test for probPrime + const DecisionTreeFactor expectedFactor( + s.modes, std::vector{0.36793249, 0.61247742, 0.59489556, 0.99029064}); auto probabilities = graph.probPrime(delta.continuous()); - std::vector prob_leaves = {0.36793249, 0.61247742, 0.59489556, - 0.99029064}; - AlgebraicDecisionTree expected_probabilities(discrete_keys, prob_leaves); + EXPECT(assert_equal(expectedFactor, probabilities, 1e-7)); - // regression - EXPECT(assert_equal(expected_probabilities, probabilities, 1e-7)); + // regression test for discretePosterior + const DecisionTreeFactor normalized( + s.modes, std::vector{0.14341014, 0.23872714, 0.23187421, 0.38598852}); + DiscreteConditional expectedPosterior(2, normalized); + auto posterior = graph.discretePosterior(delta.continuous()); + EXPECT(assert_equal(expectedPosterior, posterior, 1e-7)); } /* ****************************************************************************/ From 2abb41059253dd177ad54d2611c9c30dc2d833f6 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 29 Sep 2024 16:36:31 -0700 Subject: [PATCH 017/204] Removed ill-named and confusing method logProbability --- gtsam/hybrid/HybridGaussianConditional.cpp | 18 ------------------ gtsam/hybrid/HybridGaussianConditional.h | 10 ---------- .../tests/testHybridGaussianConditional.cpp | 11 ----------- 3 files changed, 39 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index 478f94f18..1c3a69ce7 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -337,24 +337,6 @@ HybridGaussianConditional::shared_ptr HybridGaussianConditional::prune( pruned_conditionals); } -/* *******************************************************************************/ -AlgebraicDecisionTree HybridGaussianConditional::logProbability( - const VectorValues &continuousValues) const { - // functor to calculate (double) logProbability value from - // GaussianConditional. - auto probFunc = - [continuousValues](const GaussianConditional::shared_ptr &conditional) { - if (conditional) { - return conditional->logProbability(continuousValues); - } else { - // Return arbitrarily small logProbability if conditional is null - // Conditional is null if it is pruned out. - return -1e20; - } - }; - return DecisionTree(conditionals_, probFunc); -} - /* *******************************************************************************/ double HybridGaussianConditional::logProbability( const HybridValues &values) const { diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index 8f3aa6778..f3bf4d839 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -194,16 +194,6 @@ class GTSAM_EXPORT HybridGaussianConditional /// Getter for the underlying Conditionals DecisionTree const Conditionals &conditionals() const; - /** - * @brief Compute logProbability of the HybridGaussianConditional as a tree. - * - * @param continuousValues The continuous VectorValues. - * @return AlgebraicDecisionTree A decision tree with the same keys - * as the conditionals, and leaf values as the logProbability. - */ - AlgebraicDecisionTree logProbability( - const VectorValues &continuousValues) const; - /** * @brief Compute the logProbability of this hybrid Gaussian conditional. * diff --git a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp index 803d42f03..24eb409a1 100644 --- a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp @@ -74,17 +74,6 @@ TEST(HybridGaussianConditional, Invariants) { /// Check LogProbability. TEST(HybridGaussianConditional, LogProbability) { using namespace equal_constants; - auto actual = hybrid_conditional.logProbability(vv); - - // Check result. - std::vector discrete_keys = {mode}; - std::vector leaves = {conditionals[0]->logProbability(vv), - conditionals[1]->logProbability(vv)}; - AlgebraicDecisionTree expected(discrete_keys, leaves); - - EXPECT(assert_equal(expected, actual, 1e-6)); - - // Check for non-tree version. for (size_t mode : {0, 1}) { const HybridValues hv{vv, {{M(0), mode}}}; EXPECT_DOUBLES_EQUAL(conditionals[mode]->logProbability(vv), From 64513eb6d9a8d46c415d837e363389ecd7295c6d Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 29 Sep 2024 16:37:02 -0700 Subject: [PATCH 018/204] discretePosterior for graphs --- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 23 +++++++------------ gtsam/hybrid/HybridGaussianFactorGraph.h | 17 ++++---------- .../tests/testHybridGaussianFactorGraph.cpp | 9 +------- 3 files changed, 14 insertions(+), 35 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 8e6e95c17..0e5a34359 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -505,22 +505,22 @@ EliminateHybrid(const HybridGaussianFactorGraph &factors, /* ************************************************************************ */ AlgebraicDecisionTree HybridGaussianFactorGraph::errorTree( const VectorValues &continuousValues) const { - AlgebraicDecisionTree error_tree(0.0); + AlgebraicDecisionTree result(0.0); // Iterate over each factor. for (auto &factor : factors_) { if (auto f = std::dynamic_pointer_cast(factor)) { // Check for HybridFactor, and call errorTree - error_tree = error_tree + f->errorTree(continuousValues); + result = result + f->errorTree(continuousValues); } else if (auto f = std::dynamic_pointer_cast(factor)) { // Skip discrete factors continue; } else { // Everything else is a continuous only factor HybridValues hv(continuousValues, DiscreteValues()); - error_tree = error_tree + AlgebraicDecisionTree(factor->error(hv)); + result = result + AlgebraicDecisionTree(factor->error(hv)); } } - return error_tree; + return result; } /* ************************************************************************ */ @@ -531,21 +531,14 @@ double HybridGaussianFactorGraph::probPrime(const HybridValues &values) const { } /* ************************************************************************ */ -DecisionTreeFactor HybridGaussianFactorGraph::probPrime( +AlgebraicDecisionTree HybridGaussianFactorGraph::discretePosterior( const VectorValues &continuousValues) const { - AlgebraicDecisionTree error_tree = this->errorTree(continuousValues); - AlgebraicDecisionTree prob_tree = error_tree.apply([](double error) { + AlgebraicDecisionTree errors = this->errorTree(continuousValues); + AlgebraicDecisionTree p = errors.apply([](double error) { // NOTE: The 0.5 term is handled by each factor return exp(-error); }); - return {GetDiscreteKeys(*this), prob_tree}; -} - -/* ************************************************************************ */ -DiscreteConditional HybridGaussianFactorGraph::discretePosterior( - const VectorValues &continuousValues) const { - auto p = probPrime(continuousValues); - return {p.size(), p}; + return p / p.sum(); } /* ************************************************************************ */ diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.h b/gtsam/hybrid/HybridGaussianFactorGraph.h index 5d19b4f83..3ef6218be 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.h +++ b/gtsam/hybrid/HybridGaussianFactorGraph.h @@ -196,24 +196,17 @@ class GTSAM_EXPORT HybridGaussianFactorGraph */ double probPrime(const HybridValues& values) const; - /** - * @brief Compute unnormalized probability \f$ P(X | M, Z) \f$ - * for each discrete assignment, and return as a tree. - * - * @param continuousValues Continuous values at which to compute probability. - * @return DecisionTreeFactor - */ - DecisionTreeFactor probPrime(const VectorValues& continuousValues) const; - /** * @brief Computer posterior P(M|X=x) when all continuous values X are given. - * This is very efficient as this simply probPrime normalized into a - * conditional. + * This is efficient as this simply probPrime normalized. + * + * @note Not a DiscreteConditional as the cardinalities of the DiscreteKeys, + * which we would need, are hard to recover. * * @param continuousValues Continuous values x to condition on. * @return DecisionTreeFactor */ - DiscreteConditional discretePosterior( + AlgebraicDecisionTree discretePosterior( const VectorValues& continuousValues) const; /** diff --git a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp index 8ba1eb762..0c5f52e61 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp @@ -619,16 +619,9 @@ TEST(HybridGaussianFactorGraph, ErrorAndProbPrimeTree) { const auto error_tree = graph.errorTree(delta.continuous()); EXPECT(assert_equal(expectedErrors, error_tree, 1e-7)); - // regression test for probPrime - const DecisionTreeFactor expectedFactor( - s.modes, std::vector{0.36793249, 0.61247742, 0.59489556, 0.99029064}); - auto probabilities = graph.probPrime(delta.continuous()); - EXPECT(assert_equal(expectedFactor, probabilities, 1e-7)); - // regression test for discretePosterior - const DecisionTreeFactor normalized( + const AlgebraicDecisionTree expectedPosterior( s.modes, std::vector{0.14341014, 0.23872714, 0.23187421, 0.38598852}); - DiscreteConditional expectedPosterior(2, normalized); auto posterior = graph.discretePosterior(delta.continuous()); EXPECT(assert_equal(expectedPosterior, posterior, 1e-7)); } From 788f4b6a199009c8efb683699c0b6ff7c450625c Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 29 Sep 2024 16:37:30 -0700 Subject: [PATCH 019/204] renamed logProbability and added discretePosterior --- gtsam/hybrid/HybridBayesNet.cpp | 38 ++++--- gtsam/hybrid/HybridBayesNet.h | 33 +++--- gtsam/hybrid/tests/testHybridBayesNet.cpp | 132 +++++++--------------- 3 files changed, 85 insertions(+), 118 deletions(-) diff --git a/gtsam/hybrid/HybridBayesNet.cpp b/gtsam/hybrid/HybridBayesNet.cpp index 703c657cf..5f655c990 100644 --- a/gtsam/hybrid/HybridBayesNet.cpp +++ b/gtsam/hybrid/HybridBayesNet.cpp @@ -214,10 +214,14 @@ AlgebraicDecisionTree HybridBayesNet::errorTree( } else if (auto dc = conditional->asDiscrete()) { // If discrete, add the discrete error in the right branch - result = result.apply( - [dc](const Assignment &assignment, double leaf_value) { - return leaf_value + dc->error(DiscreteValues(assignment)); - }); + if (result.nrLeaves() == 1) { + result = dc->errorTree(); + } else { + result = result.apply( + [dc](const Assignment &assignment, double leaf_value) { + return leaf_value + dc->error(DiscreteValues(assignment)); + }); + } } } @@ -225,22 +229,27 @@ AlgebraicDecisionTree HybridBayesNet::errorTree( } /* ************************************************************************* */ -AlgebraicDecisionTree HybridBayesNet::logProbability( +AlgebraicDecisionTree HybridBayesNet::logDiscretePosteriorPrime( const VectorValues &continuousValues) const { AlgebraicDecisionTree result(0.0); + // Get logProbability function for a conditional or arbitrarily small + // logProbability if the conditional was pruned out. + auto probFunc = [continuousValues]( + const GaussianConditional::shared_ptr &conditional) { + return conditional ? conditional->logProbability(continuousValues) : -1e20; + }; + // Iterate over each conditional. for (auto &&conditional : *this) { if (auto gm = conditional->asHybrid()) { // If conditional is hybrid, select based on assignment and compute // logProbability. - result = result + gm->logProbability(continuousValues); + result = result + DecisionTree(gm->conditionals(), probFunc); } else if (auto gc = conditional->asGaussian()) { - // If continuous, get the (double) logProbability and add it to the - // result + // If continuous, get the logProbability and add it to the result double logProbability = gc->logProbability(continuousValues); - // Add the computed logProbability to every leaf of the logProbability - // tree. + // Add the computed logProbability to every leaf of the tree. result = result.apply([logProbability](double leaf_value) { return leaf_value + logProbability; }); @@ -261,10 +270,13 @@ AlgebraicDecisionTree HybridBayesNet::logProbability( } /* ************************************************************************* */ -AlgebraicDecisionTree HybridBayesNet::evaluate( +AlgebraicDecisionTree HybridBayesNet::discretePosterior( const VectorValues &continuousValues) const { - AlgebraicDecisionTree tree = this->logProbability(continuousValues); - return tree.apply([](double log) { return exp(log); }); + AlgebraicDecisionTree log_p = + this->logDiscretePosteriorPrime(continuousValues); + AlgebraicDecisionTree p = + log_p.apply([](double log) { return exp(log); }); + return p / p.sum(); } /* ************************************************************************* */ diff --git a/gtsam/hybrid/HybridBayesNet.h b/gtsam/hybrid/HybridBayesNet.h index 9052a7a16..9e621ea20 100644 --- a/gtsam/hybrid/HybridBayesNet.h +++ b/gtsam/hybrid/HybridBayesNet.h @@ -125,12 +125,13 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { /** * @brief Get the Gaussian Bayes Net which corresponds to a specific discrete - * value assignment. + * value assignment. Note this corresponds to the Gaussian posterior p(X|M=m) + * of the continuous variables given the discrete assignment M=m. * * @note Any pure discrete factors are ignored. * * @param assignment The discrete value assignment for the discrete keys. - * @return GaussianBayesNet + * @return Gaussian posterior as a GaussianBayesNet */ GaussianBayesNet choose(const DiscreteValues &assignment) const; @@ -226,29 +227,33 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { using Base::error; /** - * @brief Compute log probability for each discrete assignment, - * and return as a tree. + * @brief Compute the log posterior log P'(M|x) of all assignments up to a + * constant, returning the result as an algebraic decision tree. * - * @param continuousValues Continuous values at which - * to compute the log probability. + * @note The joint P(X,M) is p(X|M) P(M) + * Then the posterior on M given X=x is is P(M|x) = p(x|M) P(M) / p(x). + * Ideally we want log P(M|x) = log p(x|M) + log P(M) - log P(x), but + * unfortunately log p(x) is expensive, so we compute the log of the + * unnormalized posterior log P'(M|x) = log p(x|M) + log P(M) + * + * @param continuousValues Continuous values x at which to compute log P'(M|x) * @return AlgebraicDecisionTree */ - AlgebraicDecisionTree logProbability( + AlgebraicDecisionTree logDiscretePosteriorPrime( const VectorValues &continuousValues) const; using BayesNet::logProbability; // expose HybridValues version /** - * @brief Compute unnormalized probability q(μ|M), - * for each discrete assignment, and return as a tree. - * q(μ|M) is the unnormalized probability at the MLE point μ, - * conditioned on the discrete variables. + * @brief Compute normalized posterior P(M|X=x) and return as a tree. * - * @param continuousValues Continuous values at which to compute the - * probability. + * @note Not a DiscreteConditional as the cardinalities of the DiscreteKeys, + * which we would need, are hard to recover. + * + * @param continuousValues Continuous values x to condition P(M|X=x) on. * @return AlgebraicDecisionTree */ - AlgebraicDecisionTree evaluate( + AlgebraicDecisionTree discretePosterior( const VectorValues &continuousValues) const; /** diff --git a/gtsam/hybrid/tests/testHybridBayesNet.cpp b/gtsam/hybrid/tests/testHybridBayesNet.cpp index 79979ac83..8988d1e62 100644 --- a/gtsam/hybrid/tests/testHybridBayesNet.cpp +++ b/gtsam/hybrid/tests/testHybridBayesNet.cpp @@ -65,8 +65,7 @@ TEST(HybridBayesNet, Add) { // Test API for a pure discrete Bayes net P(Asia). TEST(HybridBayesNet, EvaluatePureDiscrete) { HybridBayesNet bayesNet; - const auto pAsia = std::make_shared(Asia, "4/6"); - bayesNet.push_back(pAsia); + bayesNet.emplace_shared(Asia, "4/6"); HybridValues zero{{}, {{asiaKey, 0}}}, one{{}, {{asiaKey, 1}}}; // choose @@ -87,92 +86,39 @@ TEST(HybridBayesNet, EvaluatePureDiscrete) { EXPECT(assert_equal(one, bayesNet.sample(one, &rng))); EXPECT(assert_equal(zero, bayesNet.sample(zero, &rng))); + // prune + EXPECT(assert_equal(bayesNet, bayesNet.prune(2))); + // EXPECT(assert_equal(bayesNet, bayesNet.prune(1))); Should fail !!! + // EXPECT(assert_equal(bayesNet, bayesNet.prune(0))); Should fail !!! + + // errorTree + AlgebraicDecisionTree actual = bayesNet.errorTree({}); + AlgebraicDecisionTree expected( + {Asia}, std::vector{-log(0.4), -log(0.6)}); + EXPECT(assert_equal(expected, actual)); + // error EXPECT_DOUBLES_EQUAL(-log(0.4), bayesNet.error(zero), 1e-9); EXPECT_DOUBLES_EQUAL(-log(0.6), bayesNet.error(one), 1e-9); - - // logProbability - EXPECT_DOUBLES_EQUAL(log(0.4), bayesNet.logProbability(zero), 1e-9); - EXPECT_DOUBLES_EQUAL(log(0.6), bayesNet.logProbability(one), 1e-9); - - // toFactorGraph - HybridGaussianFactorGraph expectedFG{pAsia}, fg = bayesNet.toFactorGraph({}); - EXPECT(assert_equal(expectedFG, fg)); - - // prune, imperative :-( - EXPECT(assert_equal(bayesNet, bayesNet.prune(2))); - EXPECT_LONGS_EQUAL(1, bayesNet.prune(1).at(0)->size()); } /* ****************************************************************************/ // Test creation of a tiny hybrid Bayes net. TEST(HybridBayesNet, Tiny) { - auto bayesNet = tiny::createHybridBayesNet(); // P(z|x,mode)P(x)P(mode) - EXPECT_LONGS_EQUAL(3, bayesNet.size()); + auto bn = tiny::createHybridBayesNet(); + EXPECT_LONGS_EQUAL(3, bn.size()); const VectorValues vv{{Z(0), Vector1(5.0)}, {X(0), Vector1(5.0)}}; - HybridValues zero{vv, {{M(0), 0}}}, one{vv, {{M(0), 1}}}; - - // Check Invariants for components - HybridGaussianConditional::shared_ptr hgc = bayesNet.at(0)->asHybrid(); - GaussianConditional::shared_ptr gc0 = hgc->choose(zero.discrete()), - gc1 = hgc->choose(one.discrete()); - GaussianConditional::shared_ptr px = bayesNet.at(1)->asGaussian(); - GaussianConditional::CheckInvariants(*gc0, vv); - GaussianConditional::CheckInvariants(*gc1, vv); - GaussianConditional::CheckInvariants(*px, vv); - HybridGaussianConditional::CheckInvariants(*hgc, zero); - HybridGaussianConditional::CheckInvariants(*hgc, one); - - // choose - GaussianBayesNet expectedChosen; - expectedChosen.push_back(gc0); - expectedChosen.push_back(px); - auto chosen0 = bayesNet.choose(zero.discrete()); - auto chosen1 = bayesNet.choose(one.discrete()); - EXPECT(assert_equal(expectedChosen, chosen0, 1e-9)); - - // logProbability - const double logP0 = chosen0.logProbability(vv) + log(0.4); // 0.4 is prior - const double logP1 = chosen1.logProbability(vv) + log(0.6); // 0.6 is prior - EXPECT_DOUBLES_EQUAL(logP0, bayesNet.logProbability(zero), 1e-9); - EXPECT_DOUBLES_EQUAL(logP1, bayesNet.logProbability(one), 1e-9); - - // evaluate - EXPECT_DOUBLES_EQUAL(exp(logP0), bayesNet.evaluate(zero), 1e-9); - - // optimize - EXPECT(assert_equal(one, bayesNet.optimize())); - EXPECT(assert_equal(chosen0.optimize(), bayesNet.optimize(zero.discrete()))); - - // sample - std::mt19937_64 rng(42); - EXPECT(assert_equal({{M(0), 1}}, bayesNet.sample(&rng).discrete())); - - // error - const double error0 = chosen0.error(vv) + gc0->negLogConstant() - - px->negLogConstant() - log(0.4); - const double error1 = chosen1.error(vv) + gc1->negLogConstant() - - px->negLogConstant() - log(0.6); - EXPECT_DOUBLES_EQUAL(error0, bayesNet.error(zero), 1e-9); - EXPECT_DOUBLES_EQUAL(error1, bayesNet.error(one), 1e-9); - EXPECT_DOUBLES_EQUAL(error0 + logP0, error1 + logP1, 1e-9); - - // toFactorGraph - auto fg = bayesNet.toFactorGraph({{Z(0), Vector1(5.0)}}); + auto fg = bn.toFactorGraph(vv); EXPECT_LONGS_EQUAL(3, fg.size()); // Check that the ratio of probPrime to evaluate is the same for all modes. std::vector ratio(2); - ratio[0] = std::exp(-fg.error(zero)) / bayesNet.evaluate(zero); - ratio[1] = std::exp(-fg.error(one)) / bayesNet.evaluate(one); + for (size_t mode : {0, 1}) { + const HybridValues hv{vv, {{M(0), mode}}}; + ratio[mode] = std::exp(-fg.error(hv)) / bn.evaluate(hv); + } EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); - - // prune, imperative :-( - auto pruned = bayesNet.prune(1); - EXPECT_LONGS_EQUAL(1, pruned.at(0)->asHybrid()->nrComponents()); - EXPECT(!pruned.equals(bayesNet)); - } /* ****************************************************************************/ @@ -318,22 +264,19 @@ TEST(HybridBayesNet, Pruning) { // Optimize HybridValues delta = posterior->optimize(); - auto actualTree = posterior->evaluate(delta.continuous()); - // Regression test on density tree. - std::vector discrete_keys = {{M(0), 2}, {M(1), 2}}; - std::vector leaves = {6.1112424, 20.346113, 17.785849, 19.738098}; - AlgebraicDecisionTree expected(discrete_keys, leaves); - EXPECT(assert_equal(expected, actualTree, 1e-6)); + // Verify discrete posterior at optimal value sums to 1. + auto discretePosterior = posterior->discretePosterior(delta.continuous()); + EXPECT_DOUBLES_EQUAL(1.0, discretePosterior.sum(), 1e-9); + + // Regression test on discrete posterior at optimal value. + std::vector leaves = {0.095516068, 0.31800092, 0.27798511, 0.3084979}; + AlgebraicDecisionTree expected(s.modes, leaves); + EXPECT(assert_equal(expected, discretePosterior, 1e-6)); // Prune and get probabilities auto prunedBayesNet = posterior->prune(2); - auto prunedTree = prunedBayesNet.evaluate(delta.continuous()); - - // Regression test on pruned logProbability tree - std::vector pruned_leaves = {0.0, 32.713418, 0.0, 31.735823}; - AlgebraicDecisionTree expected_pruned(discrete_keys, pruned_leaves); - EXPECT(assert_equal(expected_pruned, prunedTree, 1e-6)); + auto prunedTree = prunedBayesNet.discretePosterior(delta.continuous()); // Verify logProbability computation and check specific logProbability value const DiscreteValues discrete_values{{M(0), 1}, {M(1), 1}}; @@ -346,14 +289,21 @@ TEST(HybridBayesNet, Pruning) { posterior->at(3)->asDiscrete()->logProbability(hybridValues); logProbability += posterior->at(4)->asDiscrete()->logProbability(hybridValues); - - // Regression - double density = exp(logProbability); - EXPECT_DOUBLES_EQUAL(density, - 1.6078460548731697 * actualTree(discrete_values), 1e-6); - EXPECT_DOUBLES_EQUAL(density, prunedTree(discrete_values), 1e-9); EXPECT_DOUBLES_EQUAL(logProbability, posterior->logProbability(hybridValues), 1e-9); + + // Check agreement with discrete posterior + // double density = exp(logProbability); + // FAILS: EXPECT_DOUBLES_EQUAL(density, discretePosterior(discrete_values), + // 1e-6); + + // Regression test on pruned logProbability tree + std::vector pruned_leaves = {0.0, 0.50758422, 0.0, 0.49241578}; + AlgebraicDecisionTree expected_pruned(s.modes, pruned_leaves); + EXPECT(assert_equal(expected_pruned, prunedTree, 1e-6)); + + // Regression + // FAILS: EXPECT_DOUBLES_EQUAL(density, prunedTree(discrete_values), 1e-9); } /* ****************************************************************************/ From 3d55fe0d378dff654d8520e9d174b93be22948d5 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 29 Sep 2024 22:56:32 -0700 Subject: [PATCH 020/204] Finish tests --- gtsam/hybrid/tests/testHybridBayesNet.cpp | 36 ++++++++++++++++++----- 1 file changed, 29 insertions(+), 7 deletions(-) diff --git a/gtsam/hybrid/tests/testHybridBayesNet.cpp b/gtsam/hybrid/tests/testHybridBayesNet.cpp index 8988d1e62..ee47a698a 100644 --- a/gtsam/hybrid/tests/testHybridBayesNet.cpp +++ b/gtsam/hybrid/tests/testHybridBayesNet.cpp @@ -65,13 +65,18 @@ TEST(HybridBayesNet, Add) { // Test API for a pure discrete Bayes net P(Asia). TEST(HybridBayesNet, EvaluatePureDiscrete) { HybridBayesNet bayesNet; - bayesNet.emplace_shared(Asia, "4/6"); + const auto pAsia = std::make_shared(Asia, "4/6"); + bayesNet.push_back(pAsia); HybridValues zero{{}, {{asiaKey, 0}}}, one{{}, {{asiaKey, 1}}}; // choose GaussianBayesNet empty; EXPECT(assert_equal(empty, bayesNet.choose(zero.discrete()), 1e-9)); + // logProbability + EXPECT_DOUBLES_EQUAL(log(0.4), bayesNet.logProbability(zero), 1e-9); + EXPECT_DOUBLES_EQUAL(log(0.6), bayesNet.logProbability(one), 1e-9); + // evaluate EXPECT_DOUBLES_EQUAL(0.4, bayesNet.evaluate(zero), 1e-9); EXPECT_DOUBLES_EQUAL(0.4, bayesNet(zero), 1e-9); @@ -88,18 +93,35 @@ TEST(HybridBayesNet, EvaluatePureDiscrete) { // prune EXPECT(assert_equal(bayesNet, bayesNet.prune(2))); - // EXPECT(assert_equal(bayesNet, bayesNet.prune(1))); Should fail !!! - // EXPECT(assert_equal(bayesNet, bayesNet.prune(0))); Should fail !!! + EXPECT_LONGS_EQUAL(1, bayesNet.prune(1).at(0)->size()); // errorTree AlgebraicDecisionTree actual = bayesNet.errorTree({}); - AlgebraicDecisionTree expected( + AlgebraicDecisionTree expectedErrorTree( {Asia}, std::vector{-log(0.4), -log(0.6)}); - EXPECT(assert_equal(expected, actual)); + EXPECT(assert_equal(expectedErrorTree, actual)); // error EXPECT_DOUBLES_EQUAL(-log(0.4), bayesNet.error(zero), 1e-9); EXPECT_DOUBLES_EQUAL(-log(0.6), bayesNet.error(one), 1e-9); + + // logDiscretePosteriorPrime, TODO: useless as -errorTree? + AlgebraicDecisionTree expected({Asia}, + std::vector{log(0.4), log(0.6)}); + EXPECT(assert_equal(expected, bayesNet.logDiscretePosteriorPrime({}))); + + // logProbability + EXPECT_DOUBLES_EQUAL(log(0.4), bayesNet.logProbability(zero), 1e-9); + EXPECT_DOUBLES_EQUAL(log(0.6), bayesNet.logProbability(one), 1e-9); + + // discretePosterior + AlgebraicDecisionTree expectedPosterior({Asia}, + std::vector{0.4, 0.6}); + EXPECT(assert_equal(expectedPosterior, bayesNet.discretePosterior({}))); + + // toFactorGraph + HybridGaussianFactorGraph expectedFG{pAsia}, fg = bayesNet.toFactorGraph({}); + EXPECT(assert_equal(expectedFG, fg)); } /* ****************************************************************************/ @@ -358,7 +380,7 @@ TEST(HybridBayesNet, UpdateDiscreteConditionals) { DiscreteConditional expected_discrete_conditionals(1, s.modes, potentials); // Prune! - posterior->prune(maxNrLeaves); + auto pruned = posterior->prune(maxNrLeaves); // Functor to verify values against the expected_discrete_conditionals auto checker = [&](const Assignment& assignment, @@ -375,7 +397,7 @@ TEST(HybridBayesNet, UpdateDiscreteConditionals) { }; // Get the pruned discrete conditionals as an AlgebraicDecisionTree - auto pruned_discrete_conditionals = posterior->at(4)->asDiscrete(); + auto pruned_discrete_conditionals = pruned.at(4)->asDiscrete(); auto discrete_conditional_tree = std::dynamic_pointer_cast( pruned_discrete_conditionals); From 50809001e161e2886e7acf9c776b79b94b3aa9a5 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 30 Sep 2024 00:20:10 -0700 Subject: [PATCH 021/204] Got rid of HBN::errorTree. Weird semantics and not used unless in regression tests. --- gtsam/hybrid/HybridBayesNet.cpp | 34 ------- gtsam/hybrid/HybridBayesNet.h | 10 -- gtsam/hybrid/tests/testHybridBayesNet.cpp | 96 ++++++++++++------- .../hybrid/tests/testHybridGaussianFactor.cpp | 7 -- .../tests/testHybridNonlinearFactorGraph.cpp | 7 -- 5 files changed, 64 insertions(+), 90 deletions(-) diff --git a/gtsam/hybrid/HybridBayesNet.cpp b/gtsam/hybrid/HybridBayesNet.cpp index 5f655c990..9df0012c7 100644 --- a/gtsam/hybrid/HybridBayesNet.cpp +++ b/gtsam/hybrid/HybridBayesNet.cpp @@ -194,40 +194,6 @@ HybridValues HybridBayesNet::sample() const { return sample(&kRandomNumberGenerator); } -/* ************************************************************************* */ -AlgebraicDecisionTree HybridBayesNet::errorTree( - const VectorValues &continuousValues) const { - AlgebraicDecisionTree result(0.0); - - // Iterate over each conditional. - for (auto &&conditional : *this) { - if (auto gm = conditional->asHybrid()) { - // If conditional is hybrid, compute error for all assignments. - result = result + gm->errorTree(continuousValues); - - } else if (auto gc = conditional->asGaussian()) { - // If continuous, get the error and add it to the result - double error = gc->error(continuousValues); - // Add the computed error to every leaf of the result tree. - result = result.apply( - [error](double leaf_value) { return leaf_value + error; }); - - } else if (auto dc = conditional->asDiscrete()) { - // If discrete, add the discrete error in the right branch - if (result.nrLeaves() == 1) { - result = dc->errorTree(); - } else { - result = result.apply( - [dc](const Assignment &assignment, double leaf_value) { - return leaf_value + dc->error(DiscreteValues(assignment)); - }); - } - } - } - - return result; -} - /* ************************************************************************* */ AlgebraicDecisionTree HybridBayesNet::logDiscretePosteriorPrime( const VectorValues &continuousValues) const { diff --git a/gtsam/hybrid/HybridBayesNet.h b/gtsam/hybrid/HybridBayesNet.h index 9e621ea20..fba6bb6aa 100644 --- a/gtsam/hybrid/HybridBayesNet.h +++ b/gtsam/hybrid/HybridBayesNet.h @@ -210,16 +210,6 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { */ HybridBayesNet prune(size_t maxNrLeaves) const; - /** - * @brief Compute conditional error for each discrete assignment, - * and return as a tree. - * - * @param continuousValues Continuous values at which to compute the error. - * @return AlgebraicDecisionTree - */ - AlgebraicDecisionTree errorTree( - const VectorValues &continuousValues) const; - /** * @brief Error method using HybridValues which returns specific error for * assignment. diff --git a/gtsam/hybrid/tests/testHybridBayesNet.cpp b/gtsam/hybrid/tests/testHybridBayesNet.cpp index ee47a698a..9974827e8 100644 --- a/gtsam/hybrid/tests/testHybridBayesNet.cpp +++ b/gtsam/hybrid/tests/testHybridBayesNet.cpp @@ -95,12 +95,6 @@ TEST(HybridBayesNet, EvaluatePureDiscrete) { EXPECT(assert_equal(bayesNet, bayesNet.prune(2))); EXPECT_LONGS_EQUAL(1, bayesNet.prune(1).at(0)->size()); - // errorTree - AlgebraicDecisionTree actual = bayesNet.errorTree({}); - AlgebraicDecisionTree expectedErrorTree( - {Asia}, std::vector{-log(0.4), -log(0.6)}); - EXPECT(assert_equal(expectedErrorTree, actual)); - // error EXPECT_DOUBLES_EQUAL(-log(0.4), bayesNet.error(zero), 1e-9); EXPECT_DOUBLES_EQUAL(-log(0.6), bayesNet.error(one), 1e-9); @@ -127,20 +121,73 @@ TEST(HybridBayesNet, EvaluatePureDiscrete) { /* ****************************************************************************/ // Test creation of a tiny hybrid Bayes net. TEST(HybridBayesNet, Tiny) { - auto bn = tiny::createHybridBayesNet(); - EXPECT_LONGS_EQUAL(3, bn.size()); + auto bayesNet = tiny::createHybridBayesNet(); // P(z|x,mode)P(x)P(mode) + EXPECT_LONGS_EQUAL(3, bayesNet.size()); const VectorValues vv{{Z(0), Vector1(5.0)}, {X(0), Vector1(5.0)}}; - auto fg = bn.toFactorGraph(vv); - EXPECT_LONGS_EQUAL(3, fg.size()); + HybridValues zero{vv, {{M(0), 0}}}, one{vv, {{M(0), 1}}}; - // Check that the ratio of probPrime to evaluate is the same for all modes. - std::vector ratio(2); - for (size_t mode : {0, 1}) { - const HybridValues hv{vv, {{M(0), mode}}}; - ratio[mode] = std::exp(-fg.error(hv)) / bn.evaluate(hv); - } - EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); + // choose + HybridGaussianConditional::shared_ptr hgc = bayesNet.at(0)->asHybrid(); + GaussianBayesNet chosen; + chosen.push_back(hgc->choose(zero.discrete())); + chosen.push_back(bayesNet.at(1)->asGaussian()); + EXPECT(assert_equal(chosen, bayesNet.choose(zero.discrete()), 1e-9)); + + // logProbability + const double logP0 = chosen.logProbability(vv) + log(0.4); // 0.4 is prior + const double logP1 = bayesNet.choose(one.discrete()).logProbability(vv) + log(0.6); // 0.6 is prior + EXPECT_DOUBLES_EQUAL(logP0, bayesNet.logProbability(zero), 1e-9); + EXPECT_DOUBLES_EQUAL(logP1, bayesNet.logProbability(one), 1e-9); + + // evaluate + EXPECT_DOUBLES_EQUAL(exp(logP0), bayesNet.evaluate(zero), 1e-9); + + // optimize + EXPECT(assert_equal(one, bayesNet.optimize())); + EXPECT(assert_equal(chosen.optimize(), bayesNet.optimize(zero.discrete()))); + + // sample + std::mt19937_64 rng(42); + EXPECT(assert_equal({{M(0), 1}}, bayesNet.sample(&rng).discrete())); + + // prune + auto pruned = bayesNet.prune(1); + EXPECT_LONGS_EQUAL(1, pruned.at(0)->asHybrid()->nrComponents()); + EXPECT(!pruned.equals(bayesNet)); + + // // error + // EXPECT_DOUBLES_EQUAL(-log(0.4), bayesNet.error(zero), 1e-9); + // EXPECT_DOUBLES_EQUAL(-log(0.6), bayesNet.error(one), 1e-9); + + // logDiscretePosteriorPrime, TODO: useless as -errorTree? + AlgebraicDecisionTree expected(M(0), logP0, logP1); + EXPECT(assert_equal(expected, bayesNet.logDiscretePosteriorPrime(vv))); + + // // logProbability + // EXPECT_DOUBLES_EQUAL(log(0.4), bayesNet.logProbability(zero), 1e-9); + // EXPECT_DOUBLES_EQUAL(log(0.6), bayesNet.logProbability(one), 1e-9); + + // // discretePosterior + // AlgebraicDecisionTree expectedPosterior({Asia}, + // std::vector{0.4, + // 0.6}); + // EXPECT(assert_equal(expectedPosterior, bayesNet.discretePosterior({}))); + + // // toFactorGraph + // HybridGaussianFactorGraph expectedFG{}; + + // auto fg = bayesNet.toFactorGraph(vv); + // EXPECT_LONGS_EQUAL(3, fg.size()); + // EXPECT(assert_equal(expectedFG, fg)); + + // // Check that the ratio of probPrime to evaluate is the same for all modes. + // std::vector ratio(2); + // ratio[0] = std::exp(-fg.error(zero)) / bayesNet.evaluate(zero); + // ratio[0] = std::exp(-fg.error(one)) / bayesNet.evaluate(one); + // EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); + + // TODO: better test: check if discretePosteriors agree ! } /* ****************************************************************************/ @@ -174,21 +221,6 @@ TEST(HybridBayesNet, evaluateHybrid) { bayesNet.evaluate(values), 1e-9); } -/* ****************************************************************************/ -// Test error for a hybrid Bayes net P(X0|X1) P(X1|Asia) P(Asia). -TEST(HybridBayesNet, Error) { - using namespace different_sigmas; - - AlgebraicDecisionTree actual = bayesNet.errorTree(values.continuous()); - - // Regression. - // Manually added all the error values from the 3 conditional types. - AlgebraicDecisionTree expected( - {Asia}, std::vector{2.33005033585, 5.38619084965}); - - EXPECT(assert_equal(expected, actual)); -} - /* ****************************************************************************/ // Test choosing an assignment of conditionals TEST(HybridBayesNet, Choose) { diff --git a/gtsam/hybrid/tests/testHybridGaussianFactor.cpp b/gtsam/hybrid/tests/testHybridGaussianFactor.cpp index c2ffe24c8..5ff8c1478 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactor.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactor.cpp @@ -357,16 +357,9 @@ TEST(HybridGaussianFactor, DifferentCovariancesFG) { cv.insert(X(0), Vector1(0.0)); cv.insert(X(1), Vector1(0.0)); - // Check that the error values at the MLE point μ. - AlgebraicDecisionTree errorTree = hbn->errorTree(cv); - DiscreteValues dv0{{M(1), 0}}; DiscreteValues dv1{{M(1), 1}}; - // regression - EXPECT_DOUBLES_EQUAL(9.90348755254, errorTree(dv0), 1e-9); - EXPECT_DOUBLES_EQUAL(0.69314718056, errorTree(dv1), 1e-9); - DiscreteConditional expected_m1(m1, "0.5/0.5"); DiscreteConditional actual_m1 = *(hbn->at(2)->asDiscrete()); diff --git a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp index 4735c1657..3a26f4486 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp @@ -994,16 +994,9 @@ TEST(HybridNonlinearFactorGraph, DifferentCovariances) { cv.insert(X(0), Vector1(0.0)); cv.insert(X(1), Vector1(0.0)); - // Check that the error values at the MLE point μ. - AlgebraicDecisionTree errorTree = hbn->errorTree(cv); - DiscreteValues dv0{{M(1), 0}}; DiscreteValues dv1{{M(1), 1}}; - // regression - EXPECT_DOUBLES_EQUAL(9.90348755254, errorTree(dv0), 1e-9); - EXPECT_DOUBLES_EQUAL(0.69314718056, errorTree(dv1), 1e-9); - DiscreteConditional expected_m1(m1, "0.5/0.5"); DiscreteConditional actual_m1 = *(hbn->at(2)->asDiscrete()); From 78b47770c0c0b783de6081ea5047fe275ea851df Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 30 Sep 2024 01:11:18 -0700 Subject: [PATCH 022/204] All tests for tiny work --- gtsam/hybrid/HybridGaussianConditional.h | 1 + gtsam/hybrid/tests/testHybridBayesNet.cpp | 77 ++++++++++++----------- 2 files changed, 41 insertions(+), 37 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index f3bf4d839..27e31d767 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -14,6 +14,7 @@ * @brief A hybrid conditional in the Conditional Linear Gaussian scheme * @author Fan Jiang * @author Varun Agrawal + * @author Frank Dellaert * @date Mar 12, 2022 */ diff --git a/gtsam/hybrid/tests/testHybridBayesNet.cpp b/gtsam/hybrid/tests/testHybridBayesNet.cpp index 9974827e8..ea9ca8285 100644 --- a/gtsam/hybrid/tests/testHybridBayesNet.cpp +++ b/gtsam/hybrid/tests/testHybridBayesNet.cpp @@ -73,10 +73,6 @@ TEST(HybridBayesNet, EvaluatePureDiscrete) { GaussianBayesNet empty; EXPECT(assert_equal(empty, bayesNet.choose(zero.discrete()), 1e-9)); - // logProbability - EXPECT_DOUBLES_EQUAL(log(0.4), bayesNet.logProbability(zero), 1e-9); - EXPECT_DOUBLES_EQUAL(log(0.6), bayesNet.logProbability(one), 1e-9); - // evaluate EXPECT_DOUBLES_EQUAL(0.4, bayesNet.evaluate(zero), 1e-9); EXPECT_DOUBLES_EQUAL(0.4, bayesNet(zero), 1e-9); @@ -127,16 +123,28 @@ TEST(HybridBayesNet, Tiny) { const VectorValues vv{{Z(0), Vector1(5.0)}, {X(0), Vector1(5.0)}}; HybridValues zero{vv, {{M(0), 0}}}, one{vv, {{M(0), 1}}}; - // choose + // Check Invariants for components HybridGaussianConditional::shared_ptr hgc = bayesNet.at(0)->asHybrid(); - GaussianBayesNet chosen; - chosen.push_back(hgc->choose(zero.discrete())); - chosen.push_back(bayesNet.at(1)->asGaussian()); - EXPECT(assert_equal(chosen, bayesNet.choose(zero.discrete()), 1e-9)); + GaussianConditional::shared_ptr gc0 = hgc->choose(zero.discrete()), + gc1 = hgc->choose(one.discrete()); + GaussianConditional::shared_ptr px = bayesNet.at(1)->asGaussian(); + GaussianConditional::CheckInvariants(*gc0, vv); + GaussianConditional::CheckInvariants(*gc1, vv); + GaussianConditional::CheckInvariants(*px, vv); + HybridGaussianConditional::CheckInvariants(*hgc, zero); + HybridGaussianConditional::CheckInvariants(*hgc, one); + + // choose + GaussianBayesNet expectedChosen; + expectedChosen.push_back(gc0); + expectedChosen.push_back(px); + auto chosen0 = bayesNet.choose(zero.discrete()); + auto chosen1 = bayesNet.choose(one.discrete()); + EXPECT(assert_equal(expectedChosen, chosen0, 1e-9)); // logProbability - const double logP0 = chosen.logProbability(vv) + log(0.4); // 0.4 is prior - const double logP1 = bayesNet.choose(one.discrete()).logProbability(vv) + log(0.6); // 0.6 is prior + const double logP0 = chosen0.logProbability(vv) + log(0.4); // 0.4 is prior + const double logP1 = chosen1.logProbability(vv) + log(0.6); // 0.6 is prior EXPECT_DOUBLES_EQUAL(logP0, bayesNet.logProbability(zero), 1e-9); EXPECT_DOUBLES_EQUAL(logP1, bayesNet.logProbability(one), 1e-9); @@ -145,7 +153,7 @@ TEST(HybridBayesNet, Tiny) { // optimize EXPECT(assert_equal(one, bayesNet.optimize())); - EXPECT(assert_equal(chosen.optimize(), bayesNet.optimize(zero.discrete()))); + EXPECT(assert_equal(chosen0.optimize(), bayesNet.optimize(zero.discrete()))); // sample std::mt19937_64 rng(42); @@ -156,38 +164,33 @@ TEST(HybridBayesNet, Tiny) { EXPECT_LONGS_EQUAL(1, pruned.at(0)->asHybrid()->nrComponents()); EXPECT(!pruned.equals(bayesNet)); - // // error - // EXPECT_DOUBLES_EQUAL(-log(0.4), bayesNet.error(zero), 1e-9); - // EXPECT_DOUBLES_EQUAL(-log(0.6), bayesNet.error(one), 1e-9); + // error + const double error0 = chosen0.error(vv) + gc0->negLogConstant() - + px->negLogConstant() - log(0.4); + const double error1 = chosen1.error(vv) + gc1->negLogConstant() - + px->negLogConstant() - log(0.6); + EXPECT_DOUBLES_EQUAL(error0, bayesNet.error(zero), 1e-9); + EXPECT_DOUBLES_EQUAL(error1, bayesNet.error(one), 1e-9); + EXPECT_DOUBLES_EQUAL(error0 + logP0, error1 + logP1, 1e-9); // logDiscretePosteriorPrime, TODO: useless as -errorTree? AlgebraicDecisionTree expected(M(0), logP0, logP1); EXPECT(assert_equal(expected, bayesNet.logDiscretePosteriorPrime(vv))); - // // logProbability - // EXPECT_DOUBLES_EQUAL(log(0.4), bayesNet.logProbability(zero), 1e-9); - // EXPECT_DOUBLES_EQUAL(log(0.6), bayesNet.logProbability(one), 1e-9); + // discretePosterior + double q0 = std::exp(logP0), q1 = std::exp(logP1), sum = q0 + q1; + AlgebraicDecisionTree expectedPosterior(M(0), q0 / sum, q1 / sum); + EXPECT(assert_equal(expectedPosterior, bayesNet.discretePosterior(vv))); - // // discretePosterior - // AlgebraicDecisionTree expectedPosterior({Asia}, - // std::vector{0.4, - // 0.6}); - // EXPECT(assert_equal(expectedPosterior, bayesNet.discretePosterior({}))); + // toFactorGraph + auto fg = bayesNet.toFactorGraph({{Z(0), Vector1(5.0)}}); + EXPECT_LONGS_EQUAL(3, fg.size()); - // // toFactorGraph - // HybridGaussianFactorGraph expectedFG{}; - - // auto fg = bayesNet.toFactorGraph(vv); - // EXPECT_LONGS_EQUAL(3, fg.size()); - // EXPECT(assert_equal(expectedFG, fg)); - - // // Check that the ratio of probPrime to evaluate is the same for all modes. - // std::vector ratio(2); - // ratio[0] = std::exp(-fg.error(zero)) / bayesNet.evaluate(zero); - // ratio[0] = std::exp(-fg.error(one)) / bayesNet.evaluate(one); - // EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); - - // TODO: better test: check if discretePosteriors agree ! + // Check that the ratio of probPrime to evaluate is the same for all modes. + std::vector ratio(2); + ratio[0] = std::exp(-fg.error(zero)) / bayesNet.evaluate(zero); + ratio[1] = std::exp(-fg.error(one)) / bayesNet.evaluate(one); + EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); } /* ****************************************************************************/ From d054a041ed446f4d28e250befdebb9e4e37cb6e1 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 30 Sep 2024 14:42:33 -0700 Subject: [PATCH 023/204] choose docs --- gtsam/hybrid/HybridBayesNet.h | 2 +- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 2 +- gtsam/hybrid/HybridGaussianFactorGraph.h | 19 +++++++++++++++++-- 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/gtsam/hybrid/HybridBayesNet.h b/gtsam/hybrid/HybridBayesNet.h index fba6bb6aa..94a0762de 100644 --- a/gtsam/hybrid/HybridBayesNet.h +++ b/gtsam/hybrid/HybridBayesNet.h @@ -128,7 +128,7 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { * value assignment. Note this corresponds to the Gaussian posterior p(X|M=m) * of the continuous variables given the discrete assignment M=m. * - * @note Any pure discrete factors are ignored. + * @note Be careful, as any factors not Gaussian are ignored. * * @param assignment The discrete value assignment for the discrete keys. * @return Gaussian posterior as a GaussianBayesNet diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 0e5a34359..0c4e9c489 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -542,7 +542,7 @@ AlgebraicDecisionTree HybridGaussianFactorGraph::discretePosterior( } /* ************************************************************************ */ -GaussianFactorGraph HybridGaussianFactorGraph::operator()( +GaussianFactorGraph HybridGaussianFactorGraph::choose( const DiscreteValues &assignment) const { GaussianFactorGraph gfg; for (auto &&f : *this) { diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.h b/gtsam/hybrid/HybridGaussianFactorGraph.h index 3ef6218be..a5130ca08 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.h +++ b/gtsam/hybrid/HybridGaussianFactorGraph.h @@ -230,8 +230,23 @@ class GTSAM_EXPORT HybridGaussianFactorGraph eliminate(const Ordering& keys) const; /// @} - /// Get the GaussianFactorGraph at a given discrete assignment. - GaussianFactorGraph operator()(const DiscreteValues& assignment) const; + /** + @brief Get the GaussianFactorGraph at a given discrete assignment. Note this + * corresponds to the Gaussian posterior p(X|M=m, Z=z) of the continuous + * variables X given the discrete assignment M=m and whatever measurements z + * where assumed in the creation of the factor Graph. + * + * @note Be careful, as any factors not Gaussian are ignored. + * + * @param assignment The discrete value assignment for the discrete keys. + * @return Gaussian factors as a GaussianFactorGraph + */ + GaussianFactorGraph choose(const DiscreteValues& assignment) const; + + /// Syntactic sugar for choose + GaussianFactorGraph operator()(const DiscreteValues& assignment) const { + return choose(assignment); + } }; // traits From 5fb3b377718253fb6151e40dda7def95bfe2bb37 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 30 Sep 2024 15:19:05 -0700 Subject: [PATCH 024/204] Additional arithmetic --- gtsam/discrete/AlgebraicDecisionTree.h | 11 ++++ .../tests/testAlgebraicDecisionTree.cpp | 52 ++++++++++++------- 2 files changed, 43 insertions(+), 20 deletions(-) diff --git a/gtsam/discrete/AlgebraicDecisionTree.h b/gtsam/discrete/AlgebraicDecisionTree.h index 17385a975..6001b1983 100644 --- a/gtsam/discrete/AlgebraicDecisionTree.h +++ b/gtsam/discrete/AlgebraicDecisionTree.h @@ -70,6 +70,7 @@ namespace gtsam { return a / b; } static inline double id(const double& x) { return x; } + static inline double negate(const double& x) { return -x; } }; AlgebraicDecisionTree(double leaf = 1.0) : Base(leaf) {} @@ -186,6 +187,16 @@ namespace gtsam { return this->apply(g, &Ring::add); } + /** negation */ + AlgebraicDecisionTree operator-() const { + return this->apply(&Ring::negate); + } + + /** subtract */ + AlgebraicDecisionTree operator-(const AlgebraicDecisionTree& g) const { + return *this + (-g); + } + /** product */ AlgebraicDecisionTree operator*(const AlgebraicDecisionTree& g) const { return this->apply(g, &Ring::mul); diff --git a/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp b/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp index ffb1f0b5a..bf728695c 100644 --- a/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp +++ b/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp @@ -10,10 +10,10 @@ * -------------------------------------------------------------------------- */ /* - * @file testDecisionTree.cpp - * @brief Develop DecisionTree - * @author Frank Dellaert - * @date Mar 6, 2011 + * @file testAlgebraicDecisionTree.cpp + * @brief Unit tests for Algebraic decision tree + * @author Frank Dellaert + * @date Mar 6, 2011 */ #include @@ -46,23 +46,35 @@ void dot(const T& f, const string& filename) { #endif } -/** I can't get this to work ! - class Mul: std::function { - inline double operator()(const double& a, const double& b) { - return a * b; - } - }; +/* ************************************************************************** */ +// Test arithmetic: +TEST(ADT, arithmetic) { + DiscreteKey A(0, 2), B(1, 2); + ADT zero{0}, one{1}; + ADT a(A, 1, 2); + ADT b(B, 3, 4); - // If second argument of binary op is Leaf - template - typename DecisionTree::Node::Ptr DecisionTree::Choice::apply_fC_op_gL( Cache& cache, const Leaf& gL, Mul op) const { - Ptr h(new Choice(label(), cardinality())); - for(const NodePtr& branch: branches_) - h->push_back(branch->apply_f_op_g(cache, gL, op)); - return Unique(cache, h); - } - */ + // Addition + CHECK(assert_equal(a, zero + a)); + + // Negate and subtraction + CHECK(assert_equal(-a, zero - a)); + CHECK(assert_equal({zero}, a - a)); + CHECK(assert_equal(a + b, b + a)); + CHECK(assert_equal({A, 3, 4}, a + 2)); + CHECK(assert_equal({B, 1, 2}, b - 2)); + + // Multiplication + CHECK(assert_equal(zero, zero * a)); + CHECK(assert_equal(zero, a * zero)); + CHECK(assert_equal(a, one * a)); + CHECK(assert_equal(a, a * one)); + CHECK(assert_equal(a * b, b * a)); + + // division + // CHECK(assert_equal(a, (a * b) / b)); // not true because no pruning + CHECK(assert_equal(b, (a * b) / a)); +} /* ************************************************************************** */ // instrumented operators From 53599969ad20b73bfa13771a04d64a59625c88ca Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 30 Sep 2024 16:20:50 -0700 Subject: [PATCH 025/204] FIX BUG in errorTree --- gtsam/hybrid/HybridConditional.cpp | 68 ++++++++++++------------------ 1 file changed, 28 insertions(+), 40 deletions(-) diff --git a/gtsam/hybrid/HybridConditional.cpp b/gtsam/hybrid/HybridConditional.cpp index cac2adcf8..175aec30c 100644 --- a/gtsam/hybrid/HybridConditional.cpp +++ b/gtsam/hybrid/HybridConditional.cpp @@ -64,7 +64,6 @@ void HybridConditional::print(const std::string &s, if (inner_) { inner_->print("", formatter); - } else { if (isContinuous()) std::cout << "Continuous "; if (isDiscrete()) std::cout << "Discrete "; @@ -100,79 +99,68 @@ bool HybridConditional::equals(const HybridFactor &other, double tol) const { if (auto gm = asHybrid()) { auto other = e->asHybrid(); return other != nullptr && gm->equals(*other, tol); - } - if (auto gc = asGaussian()) { + } else if (auto gc = asGaussian()) { auto other = e->asGaussian(); return other != nullptr && gc->equals(*other, tol); - } - if (auto dc = asDiscrete()) { + } else if (auto dc = asDiscrete()) { auto other = e->asDiscrete(); return other != nullptr && dc->equals(*other, tol); - } - - return inner_ ? (e->inner_ ? inner_->equals(*(e->inner_), tol) : false) - : !(e->inner_); + } else + return inner_ ? (e->inner_ ? inner_->equals(*(e->inner_), tol) : false) + : !(e->inner_); } /* ************************************************************************ */ double HybridConditional::error(const HybridValues &values) const { if (auto gc = asGaussian()) { return gc->error(values.continuous()); - } - if (auto gm = asHybrid()) { + } else if (auto gm = asHybrid()) { return gm->error(values); - } - if (auto dc = asDiscrete()) { + } else if (auto dc = asDiscrete()) { return dc->error(values.discrete()); - } - throw std::runtime_error( - "HybridConditional::error: conditional type not handled"); + } else + throw std::runtime_error( + "HybridConditional::error: conditional type not handled"); } /* ************************************************************************ */ AlgebraicDecisionTree HybridConditional::errorTree( const VectorValues &values) const { if (auto gc = asGaussian()) { - return AlgebraicDecisionTree(gc->error(values)); - } - if (auto gm = asHybrid()) { + return {gc->error(values)}; // NOTE: a "constant" tree + } else if (auto gm = asHybrid()) { return gm->errorTree(values); - } - if (auto dc = asDiscrete()) { - return AlgebraicDecisionTree(0.0); - } - throw std::runtime_error( - "HybridConditional::error: conditional type not handled"); + } else if (auto dc = asDiscrete()) { + return dc->errorTree(); + } else + throw std::runtime_error( + "HybridConditional::error: conditional type not handled"); } /* ************************************************************************ */ double HybridConditional::logProbability(const HybridValues &values) const { if (auto gc = asGaussian()) { return gc->logProbability(values.continuous()); - } - if (auto gm = asHybrid()) { + } else if (auto gm = asHybrid()) { return gm->logProbability(values); - } - if (auto dc = asDiscrete()) { + } else if (auto dc = asDiscrete()) { return dc->logProbability(values.discrete()); - } - throw std::runtime_error( - "HybridConditional::logProbability: conditional type not handled"); + } else + throw std::runtime_error( + "HybridConditional::logProbability: conditional type not handled"); } /* ************************************************************************ */ double HybridConditional::negLogConstant() const { if (auto gc = asGaussian()) { return gc->negLogConstant(); - } - if (auto gm = asHybrid()) { - return gm->negLogConstant(); // 0.0! - } - if (auto dc = asDiscrete()) { + } else if (auto gm = asHybrid()) { + return gm->negLogConstant(); + } else if (auto dc = asDiscrete()) { return dc->negLogConstant(); // 0.0! - } - throw std::runtime_error( - "HybridConditional::negLogConstant: conditional type not handled"); + } else + throw std::runtime_error( + "HybridConditional::negLogConstant: conditional type not handled"); } /* ************************************************************************ */ From 3b50ba9895b132db2e02ff41220ef29241dcc21f Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 30 Sep 2024 16:21:24 -0700 Subject: [PATCH 026/204] FIX BUG: don't skip discrete factors! --- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 14 +++++++------- .../tests/testHybridGaussianFactorGraph.cpp | 16 ++++++---------- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 0c4e9c489..7dfa56e77 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -508,16 +508,16 @@ AlgebraicDecisionTree HybridGaussianFactorGraph::errorTree( AlgebraicDecisionTree result(0.0); // Iterate over each factor. for (auto &factor : factors_) { - if (auto f = std::dynamic_pointer_cast(factor)) { - // Check for HybridFactor, and call errorTree - result = result + f->errorTree(continuousValues); - } else if (auto f = std::dynamic_pointer_cast(factor)) { - // Skip discrete factors - continue; + if (auto hf = std::dynamic_pointer_cast(factor)) { + // Add errorTree for hybrid factors, includes HybridGaussianConditionals! + result = result + hf->errorTree(continuousValues); + } else if (auto df = std::dynamic_pointer_cast(factor)) { + // If discrete, just add its errorTree as well + result = result + df->errorTree(); } else { // Everything else is a continuous only factor HybridValues hv(continuousValues, DiscreteValues()); - result = result + AlgebraicDecisionTree(factor->error(hv)); + result = result + factor->error(hv); // NOTE: yes, you can add constants } } return result; diff --git a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp index 0c5f52e61..f30085f02 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp @@ -614,21 +614,20 @@ TEST(HybridGaussianFactorGraph, ErrorAndProbPrimeTree) { const HybridValues delta = hybridBayesNet->optimize(); // regression test for errorTree - std::vector leaves = {0.9998558, 0.4902432, 0.5193694, 0.0097568}; + std::vector leaves = {2.7916153, 1.5888555, 1.7233422, 1.6191947}; AlgebraicDecisionTree expectedErrors(s.modes, leaves); const auto error_tree = graph.errorTree(delta.continuous()); EXPECT(assert_equal(expectedErrors, error_tree, 1e-7)); // regression test for discretePosterior const AlgebraicDecisionTree expectedPosterior( - s.modes, std::vector{0.14341014, 0.23872714, 0.23187421, 0.38598852}); + s.modes, std::vector{0.095516068, 0.31800092, 0.27798511, 0.3084979}); auto posterior = graph.discretePosterior(delta.continuous()); EXPECT(assert_equal(expectedPosterior, posterior, 1e-7)); } /* ****************************************************************************/ -// Test hybrid gaussian factor graph errorTree during -// incremental operation +// Test hybrid gaussian factor graph errorTree during incremental operation TEST(HybridGaussianFactorGraph, IncrementalErrorTree) { Switching s(4); @@ -648,8 +647,7 @@ TEST(HybridGaussianFactorGraph, IncrementalErrorTree) { auto error_tree = graph.errorTree(delta.continuous()); std::vector discrete_keys = {{M(0), 2}, {M(1), 2}}; - std::vector leaves = {0.99985581, 0.4902432, 0.51936941, - 0.0097568009}; + std::vector leaves = {2.7916153, 1.5888555, 1.7233422, 1.6191947}; AlgebraicDecisionTree expected_error(discrete_keys, leaves); // regression @@ -666,12 +664,10 @@ TEST(HybridGaussianFactorGraph, IncrementalErrorTree) { delta = hybridBayesNet->optimize(); auto error_tree2 = graph.errorTree(delta.continuous()); - discrete_keys = {{M(0), 2}, {M(1), 2}, {M(2), 2}}; + // regression leaves = {0.50985198, 0.0097577296, 0.50009425, 0, 0.52922138, 0.029127133, 0.50985105, 0.0097567964}; - AlgebraicDecisionTree expected_error2(discrete_keys, leaves); - - // regression + AlgebraicDecisionTree expected_error2(s.modes, leaves); EXPECT(assert_equal(expected_error, error_tree, 1e-7)); } From d77efb0f51824db728b18b0030e53c3ad7edabf7 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 30 Sep 2024 16:22:02 -0700 Subject: [PATCH 027/204] Drastically simplify errorTree --- gtsam/hybrid/HybridBayesNet.cpp | 37 +++------------------ gtsam/hybrid/HybridBayesNet.h | 6 ++-- gtsam/hybrid/tests/testHybridBayesNet.cpp | 39 ++++++++++++++++------- 3 files changed, 35 insertions(+), 47 deletions(-) diff --git a/gtsam/hybrid/HybridBayesNet.cpp b/gtsam/hybrid/HybridBayesNet.cpp index 9df0012c7..b4441f15a 100644 --- a/gtsam/hybrid/HybridBayesNet.cpp +++ b/gtsam/hybrid/HybridBayesNet.cpp @@ -195,41 +195,13 @@ HybridValues HybridBayesNet::sample() const { } /* ************************************************************************* */ -AlgebraicDecisionTree HybridBayesNet::logDiscretePosteriorPrime( +AlgebraicDecisionTree HybridBayesNet::errorTree( const VectorValues &continuousValues) const { AlgebraicDecisionTree result(0.0); - // Get logProbability function for a conditional or arbitrarily small - // logProbability if the conditional was pruned out. - auto probFunc = [continuousValues]( - const GaussianConditional::shared_ptr &conditional) { - return conditional ? conditional->logProbability(continuousValues) : -1e20; - }; - // Iterate over each conditional. for (auto &&conditional : *this) { - if (auto gm = conditional->asHybrid()) { - // If conditional is hybrid, select based on assignment and compute - // logProbability. - result = result + DecisionTree(gm->conditionals(), probFunc); - } else if (auto gc = conditional->asGaussian()) { - // If continuous, get the logProbability and add it to the result - double logProbability = gc->logProbability(continuousValues); - // Add the computed logProbability to every leaf of the tree. - result = result.apply([logProbability](double leaf_value) { - return leaf_value + logProbability; - }); - } else if (auto dc = conditional->asDiscrete()) { - // If discrete, add the discrete logProbability in the right branch - if (result.nrLeaves() == 1) { - result = dc->errorTree().apply([](double error) { return -error; }); - } else { - result = result.apply([dc](const Assignment &assignment, - double leaf_value) { - return leaf_value + dc->logProbability(DiscreteValues(assignment)); - }); - } - } + result = result + conditional->errorTree(continuousValues); } return result; @@ -238,10 +210,9 @@ AlgebraicDecisionTree HybridBayesNet::logDiscretePosteriorPrime( /* ************************************************************************* */ AlgebraicDecisionTree HybridBayesNet::discretePosterior( const VectorValues &continuousValues) const { - AlgebraicDecisionTree log_p = - this->logDiscretePosteriorPrime(continuousValues); + AlgebraicDecisionTree errors = this->errorTree(continuousValues); AlgebraicDecisionTree p = - log_p.apply([](double log) { return exp(log); }); + errors.apply([](double error) { return exp(-error); }); return p / p.sum(); } diff --git a/gtsam/hybrid/HybridBayesNet.h b/gtsam/hybrid/HybridBayesNet.h index 94a0762de..a997174ec 100644 --- a/gtsam/hybrid/HybridBayesNet.h +++ b/gtsam/hybrid/HybridBayesNet.h @@ -217,8 +217,8 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { using Base::error; /** - * @brief Compute the log posterior log P'(M|x) of all assignments up to a - * constant, returning the result as an algebraic decision tree. + * @brief Compute the negative log posterior log P'(M|x) of all assignments up + * to a constant, returning the result as an algebraic decision tree. * * @note The joint P(X,M) is p(X|M) P(M) * Then the posterior on M given X=x is is P(M|x) = p(x|M) P(M) / p(x). @@ -229,7 +229,7 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { * @param continuousValues Continuous values x at which to compute log P'(M|x) * @return AlgebraicDecisionTree */ - AlgebraicDecisionTree logDiscretePosteriorPrime( + AlgebraicDecisionTree errorTree( const VectorValues &continuousValues) const; using BayesNet::logProbability; // expose HybridValues version diff --git a/gtsam/hybrid/tests/testHybridBayesNet.cpp b/gtsam/hybrid/tests/testHybridBayesNet.cpp index ea9ca8285..521bca4a7 100644 --- a/gtsam/hybrid/tests/testHybridBayesNet.cpp +++ b/gtsam/hybrid/tests/testHybridBayesNet.cpp @@ -95,18 +95,16 @@ TEST(HybridBayesNet, EvaluatePureDiscrete) { EXPECT_DOUBLES_EQUAL(-log(0.4), bayesNet.error(zero), 1e-9); EXPECT_DOUBLES_EQUAL(-log(0.6), bayesNet.error(one), 1e-9); - // logDiscretePosteriorPrime, TODO: useless as -errorTree? - AlgebraicDecisionTree expected({Asia}, - std::vector{log(0.4), log(0.6)}); - EXPECT(assert_equal(expected, bayesNet.logDiscretePosteriorPrime({}))); + // errorTree + AlgebraicDecisionTree expected(asiaKey, -log(0.4), -log(0.6)); + EXPECT(assert_equal(expected, bayesNet.errorTree({}))); // logProbability EXPECT_DOUBLES_EQUAL(log(0.4), bayesNet.logProbability(zero), 1e-9); EXPECT_DOUBLES_EQUAL(log(0.6), bayesNet.logProbability(one), 1e-9); // discretePosterior - AlgebraicDecisionTree expectedPosterior({Asia}, - std::vector{0.4, 0.6}); + AlgebraicDecisionTree expectedPosterior(asiaKey, 0.4, 0.6); EXPECT(assert_equal(expectedPosterior, bayesNet.discretePosterior({}))); // toFactorGraph @@ -169,15 +167,21 @@ TEST(HybridBayesNet, Tiny) { px->negLogConstant() - log(0.4); const double error1 = chosen1.error(vv) + gc1->negLogConstant() - px->negLogConstant() - log(0.6); + // print errors: + std::cout << "error0 = " << error0 << std::endl; + std::cout << "error1 = " << error1 << std::endl; EXPECT_DOUBLES_EQUAL(error0, bayesNet.error(zero), 1e-9); EXPECT_DOUBLES_EQUAL(error1, bayesNet.error(one), 1e-9); EXPECT_DOUBLES_EQUAL(error0 + logP0, error1 + logP1, 1e-9); - // logDiscretePosteriorPrime, TODO: useless as -errorTree? - AlgebraicDecisionTree expected(M(0), logP0, logP1); - EXPECT(assert_equal(expected, bayesNet.logDiscretePosteriorPrime(vv))); + // errorTree + AlgebraicDecisionTree expected(M(0), error0, error1); + EXPECT(assert_equal(expected, bayesNet.errorTree(vv))); // discretePosterior + // We have: P(z|x,mode)P(x)P(mode). When we condition on z and x, we get + // P(mode|z,x) \propto P(z|x,mode)P(x)P(mode) + // Normalizing this yields posterior P(mode|z,x) = {0.8, 0.2} double q0 = std::exp(logP0), q1 = std::exp(logP1), sum = q0 + q1; AlgebraicDecisionTree expectedPosterior(M(0), q0 / sum, q1 / sum); EXPECT(assert_equal(expectedPosterior, bayesNet.discretePosterior(vv))); @@ -191,6 +195,19 @@ TEST(HybridBayesNet, Tiny) { ratio[0] = std::exp(-fg.error(zero)) / bayesNet.evaluate(zero); ratio[1] = std::exp(-fg.error(one)) / bayesNet.evaluate(one); EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); + + // TODO(Frank): Better test: check if discretePosteriors agree ! + // Since ϕ(M, x) \propto P(M,x|z) + // q0 = std::exp(-fg.error(zero)); + // q1 = std::exp(-fg.error(one)); + // sum = q0 + q1; + // AlgebraicDecisionTree fgPosterior(M(0), q0 / sum, q1 / sum); + VectorValues xv{{X(0), Vector1(5.0)}}; + fg.printErrors(zero); + fg.printErrors(one); + GTSAM_PRINT(fg.errorTree(xv)); + auto fgPosterior = fg.discretePosterior(xv); + EXPECT(assert_equal(expectedPosterior, fgPosterior)); } /* ****************************************************************************/ @@ -556,8 +573,8 @@ TEST(HybridBayesNet, ErrorTreeWithConditional) { AlgebraicDecisionTree errorTree = gfg.errorTree(vv); // regression - AlgebraicDecisionTree expected(m1, 59.335390372, 5050.125); - EXPECT(assert_equal(expected, errorTree, 1e-9)); + AlgebraicDecisionTree expected(m1, 60.028538, 5050.8181); + EXPECT(assert_equal(expected, errorTree, 1e-4)); } /* ************************************************************************* */ From 20e5664928297e865dd8610a47a55fe00d207de5 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 30 Sep 2024 16:23:39 -0700 Subject: [PATCH 028/204] Fix switching docs --- gtsam/hybrid/tests/Switching.h | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/gtsam/hybrid/tests/Switching.h b/gtsam/hybrid/tests/Switching.h index 547facce9..1b176ad65 100644 --- a/gtsam/hybrid/tests/Switching.h +++ b/gtsam/hybrid/tests/Switching.h @@ -46,29 +46,29 @@ using symbol_shorthand::X; * @brief Create a switching system chain. A switching system is a continuous * system which depends on a discrete mode at each time step of the chain. * - * @param n The number of chain elements. + * @param K The number of chain elements. * @param x The functional to help specify the continuous key. * @param m The functional to help specify the discrete key. * @return HybridGaussianFactorGraph::shared_ptr */ inline HybridGaussianFactorGraph::shared_ptr makeSwitchingChain( - size_t n, std::function x = X, std::function m = M) { + size_t K, std::function x = X, std::function m = M) { HybridGaussianFactorGraph hfg; hfg.add(JacobianFactor(x(1), I_3x3, Z_3x1)); // x(1) to x(n+1) - for (size_t t = 1; t < n; t++) { - DiscreteKeys dKeys{{m(t), 2}}; + for (size_t k = 1; k < K; k++) { + DiscreteKeys dKeys{{m(k), 2}}; std::vector components; components.emplace_back( - new JacobianFactor(x(t), I_3x3, x(t + 1), I_3x3, Z_3x1)); + new JacobianFactor(x(k), I_3x3, x(k + 1), I_3x3, Z_3x1)); components.emplace_back( - new JacobianFactor(x(t), I_3x3, x(t + 1), I_3x3, Vector3::Ones())); - hfg.add(HybridGaussianFactor({m(t), 2}, components)); + new JacobianFactor(x(k), I_3x3, x(k + 1), I_3x3, Vector3::Ones())); + hfg.add(HybridGaussianFactor({m(k), 2}, components)); - if (t > 1) { - hfg.add(DecisionTreeFactor({{m(t - 1), 2}, {m(t), 2}}, "0 1 1 3")); + if (k > 1) { + hfg.add(DecisionTreeFactor({{m(k - 1), 2}, {m(k), 2}}, "0 1 1 3")); } } @@ -118,7 +118,7 @@ inline std::pair> makeBinaryOrdering( using MotionModel = BetweenFactor; // Test fixture with switching network. -/// ϕ(X(0)) .. ϕ(X(k),X(k+1)) .. ϕ(X(k);z_k) .. ϕ(M(0)) .. ϕ(M(k),M(k+1)) +/// ϕ(X(0)) .. ϕ(X(k),X(k+1)) .. ϕ(X(k);z_k) .. ϕ(M(0)) .. ϕ(M(K-3),M(K-2)) struct Switching { size_t K; DiscreteKeys modes; @@ -195,7 +195,7 @@ struct Switching { } /** - * @brief Add "mode chain" to HybridNonlinearFactorGraph from M(0) to M(K-1). + * @brief Add "mode chain" to HybridNonlinearFactorGraph from M(0) to M(K-2). * E.g. if K=4, we want M0, M1 and M2. * * @param fg The factor graph to which the mode chain is added. From a709a2d750f3a1100650169e35865195293cb52f Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 30 Sep 2024 16:52:41 -0700 Subject: [PATCH 029/204] Remove printing, add one more test --- gtsam/hybrid/tests/testHybridBayesNet.cpp | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/gtsam/hybrid/tests/testHybridBayesNet.cpp b/gtsam/hybrid/tests/testHybridBayesNet.cpp index 521bca4a7..f24c6fcb6 100644 --- a/gtsam/hybrid/tests/testHybridBayesNet.cpp +++ b/gtsam/hybrid/tests/testHybridBayesNet.cpp @@ -168,8 +168,6 @@ TEST(HybridBayesNet, Tiny) { const double error1 = chosen1.error(vv) + gc1->negLogConstant() - px->negLogConstant() - log(0.6); // print errors: - std::cout << "error0 = " << error0 << std::endl; - std::cout << "error1 = " << error1 << std::endl; EXPECT_DOUBLES_EQUAL(error0, bayesNet.error(zero), 1e-9); EXPECT_DOUBLES_EQUAL(error1, bayesNet.error(one), 1e-9); EXPECT_DOUBLES_EQUAL(error0 + logP0, error1 + logP1, 1e-9); @@ -196,16 +194,13 @@ TEST(HybridBayesNet, Tiny) { ratio[1] = std::exp(-fg.error(one)) / bayesNet.evaluate(one); EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); - // TODO(Frank): Better test: check if discretePosteriors agree ! - // Since ϕ(M, x) \propto P(M,x|z) - // q0 = std::exp(-fg.error(zero)); - // q1 = std::exp(-fg.error(one)); - // sum = q0 + q1; - // AlgebraicDecisionTree fgPosterior(M(0), q0 / sum, q1 / sum); + // Better and more general test: + // Since ϕ(M, x) \propto P(M,x|z) the discretePosteriors should agree + q0 = std::exp(-fg.error(zero)); + q1 = std::exp(-fg.error(one)); + sum = q0 + q1; + EXPECT(assert_equal(expectedPosterior, {M(0), q0 / sum, q1 / sum})); VectorValues xv{{X(0), Vector1(5.0)}}; - fg.printErrors(zero); - fg.printErrors(one); - GTSAM_PRINT(fg.errorTree(xv)); auto fgPosterior = fg.discretePosterior(xv); EXPECT(assert_equal(expectedPosterior, fgPosterior)); } From bb22831662641ae9d99bc56456a8243a55c6604a Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 1 Oct 2024 12:14:48 -0400 Subject: [PATCH 030/204] implement errorTree in DiscreteFactor --- gtsam/discrete/DecisionTreeFactor.cpp | 16 ---------------- gtsam/discrete/DecisionTreeFactor.h | 5 +---- gtsam/discrete/DiscreteFactor.cpp | 16 ++++++++++++++++ gtsam/discrete/DiscreteFactor.h | 11 +++++------ gtsam/discrete/TableFactor.cpp | 5 ----- gtsam/discrete/TableFactor.h | 5 +---- 6 files changed, 23 insertions(+), 35 deletions(-) diff --git a/gtsam/discrete/DecisionTreeFactor.cpp b/gtsam/discrete/DecisionTreeFactor.cpp index d1b68f4bf..68c09295c 100644 --- a/gtsam/discrete/DecisionTreeFactor.cpp +++ b/gtsam/discrete/DecisionTreeFactor.cpp @@ -62,22 +62,6 @@ namespace gtsam { return error(values.discrete()); } - /* ************************************************************************ */ - AlgebraicDecisionTree DecisionTreeFactor::errorTree() const { - // Get all possible assignments - DiscreteKeys dkeys = discreteKeys(); - // Reverse to make cartesian product output a more natural ordering. - DiscreteKeys rdkeys(dkeys.rbegin(), dkeys.rend()); - const auto assignments = DiscreteValues::CartesianProduct(rdkeys); - - // Construct vector with error values - std::vector errors; - for (const auto& assignment : assignments) { - errors.push_back(error(assignment)); - } - return AlgebraicDecisionTree(dkeys, errors); - } - /* ************************************************************************ */ double DecisionTreeFactor::safe_div(const double& a, const double& b) { // The use for safe_div is when we divide the product factor by the sum diff --git a/gtsam/discrete/DecisionTreeFactor.h b/gtsam/discrete/DecisionTreeFactor.h index 784b11e51..07d2cac14 100644 --- a/gtsam/discrete/DecisionTreeFactor.h +++ b/gtsam/discrete/DecisionTreeFactor.h @@ -141,7 +141,7 @@ namespace gtsam { } /// Calculate error for DiscreteValues `x`, is -log(probability). - double error(const DiscreteValues& values) const; + double error(const DiscreteValues& values) const override; /// multiply two factors DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override { @@ -292,9 +292,6 @@ namespace gtsam { */ double error(const HybridValues& values) const override; - /// Compute error for each assignment and return as a tree - AlgebraicDecisionTree errorTree() const override; - /// @} private: diff --git a/gtsam/discrete/DiscreteFactor.cpp b/gtsam/discrete/DiscreteFactor.cpp index b44d4fce2..2b11046f4 100644 --- a/gtsam/discrete/DiscreteFactor.cpp +++ b/gtsam/discrete/DiscreteFactor.cpp @@ -50,6 +50,22 @@ double DiscreteFactor::error(const HybridValues& c) const { return this->error(c.discrete()); } +/* ************************************************************************ */ +AlgebraicDecisionTree DiscreteFactor::errorTree() const { + // Get all possible assignments + DiscreteKeys dkeys = discreteKeys(); + // Reverse to make cartesian product output a more natural ordering. + DiscreteKeys rdkeys(dkeys.rbegin(), dkeys.rend()); + const auto assignments = DiscreteValues::CartesianProduct(rdkeys); + + // Construct vector with error values + std::vector errors; + for (const auto& assignment : assignments) { + errors.push_back(error(assignment)); + } + return AlgebraicDecisionTree(dkeys, errors); +} + /* ************************************************************************* */ std::vector expNormalize(const std::vector& logProbs) { double maxLogProb = -std::numeric_limits::infinity(); diff --git a/gtsam/discrete/DiscreteFactor.h b/gtsam/discrete/DiscreteFactor.h index 771efbe5b..19af5bd13 100644 --- a/gtsam/discrete/DiscreteFactor.h +++ b/gtsam/discrete/DiscreteFactor.h @@ -96,7 +96,7 @@ class GTSAM_EXPORT DiscreteFactor : public Factor { virtual double operator()(const DiscreteValues&) const = 0; /// Error is just -log(value) - double error(const DiscreteValues& values) const; + virtual double error(const DiscreteValues& values) const; /** * The Factor::error simply extracts the \class DiscreteValues from the @@ -105,7 +105,7 @@ class GTSAM_EXPORT DiscreteFactor : public Factor { double error(const HybridValues& c) const override; /// Compute error for each assignment and return as a tree - virtual AlgebraicDecisionTree errorTree() const = 0; + virtual AlgebraicDecisionTree errorTree() const; /// Multiply in a DecisionTreeFactor and return the result as /// DecisionTreeFactor @@ -158,8 +158,8 @@ class GTSAM_EXPORT DiscreteFactor : public Factor { // DiscreteFactor // traits -template<> struct traits : public Testable {}; - +template <> +struct traits : public Testable {}; /** * @brief Normalize a set of log probabilities. @@ -177,7 +177,6 @@ template<> struct traits : public Testable {}; * of the (unnormalized) log probabilities are either very large or very * small. */ -std::vector expNormalize(const std::vector &logProbs); - +std::vector expNormalize(const std::vector& logProbs); } // namespace gtsam diff --git a/gtsam/discrete/TableFactor.cpp b/gtsam/discrete/TableFactor.cpp index b360617f5..f4e023a4d 100644 --- a/gtsam/discrete/TableFactor.cpp +++ b/gtsam/discrete/TableFactor.cpp @@ -168,11 +168,6 @@ double TableFactor::error(const HybridValues& values) const { return error(values.discrete()); } -/* ************************************************************************ */ -AlgebraicDecisionTree TableFactor::errorTree() const { - return toDecisionTreeFactor().errorTree(); -} - /* ************************************************************************ */ DecisionTreeFactor TableFactor::operator*(const DecisionTreeFactor& f) const { return toDecisionTreeFactor() * f; diff --git a/gtsam/discrete/TableFactor.h b/gtsam/discrete/TableFactor.h index 228b36337..f0ecd66a3 100644 --- a/gtsam/discrete/TableFactor.h +++ b/gtsam/discrete/TableFactor.h @@ -179,7 +179,7 @@ class GTSAM_EXPORT TableFactor : public DiscreteFactor { double operator()(const DiscreteValues& values) const override; /// Calculate error for DiscreteValues `x`, is -log(probability). - double error(const DiscreteValues& values) const; + double error(const DiscreteValues& values) const override; /// multiply two TableFactors TableFactor operator*(const TableFactor& f) const { @@ -358,9 +358,6 @@ class GTSAM_EXPORT TableFactor : public DiscreteFactor { */ double error(const HybridValues& values) const override; - /// Compute error for each assignment and return as a tree - AlgebraicDecisionTree errorTree() const override; - /// @} }; From f42a297a40ebe5af59e4eec113bac333cfd08a2c Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 1 Oct 2024 12:15:04 -0400 Subject: [PATCH 031/204] fix docstring --- gtsam/hybrid/tests/Switching.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/hybrid/tests/Switching.h b/gtsam/hybrid/tests/Switching.h index 547facce9..3d8174db7 100644 --- a/gtsam/hybrid/tests/Switching.h +++ b/gtsam/hybrid/tests/Switching.h @@ -158,7 +158,7 @@ struct Switching { nonlinearFactorGraph.emplace_shared>( X(0), measurements.at(0), Isotropic::Sigma(1, prior_sigma)); - // Add "motion models" ϕ(X(k),X(k+1)). + // Add "motion models" ϕ(X(k),X(k+1),M(k)). for (size_t k = 0; k < K - 1; k++) { auto motion_models = motionModels(k, between_sigma); nonlinearFactorGraph.emplace_shared(modes[k], From d2f1894bacf584c49e62691769853f6adcff5084 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 1 Oct 2024 14:26:11 -0400 Subject: [PATCH 032/204] implement errorTree for HybridNonlinearFactorGraph --- gtsam/hybrid/HybridNonlinearFactorGraph.cpp | 31 +++++++++++++++++++++ gtsam/hybrid/HybridNonlinearFactorGraph.h | 13 +++++++++ 2 files changed, 44 insertions(+) diff --git a/gtsam/hybrid/HybridNonlinearFactorGraph.cpp b/gtsam/hybrid/HybridNonlinearFactorGraph.cpp index 6d19ef156..0e7e9c692 100644 --- a/gtsam/hybrid/HybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/HybridNonlinearFactorGraph.cpp @@ -179,4 +179,35 @@ HybridGaussianFactorGraph::shared_ptr HybridNonlinearFactorGraph::linearize( return linearFG; } +/* ************************************************************************* */ +AlgebraicDecisionTree HybridNonlinearFactorGraph::errorTree( + const Values& values) const { + AlgebraicDecisionTree result(0.0); + + // Iterate over each factor. + for (auto& factor : factors_) { + if (auto hnf = std::dynamic_pointer_cast(factor)) { + // Compute factor error and add it. + result = result + hnf->errorTree(values); + + } else if (auto nf = std::dynamic_pointer_cast(factor)) { + // If continuous only, get the (double) error + // and add it to every leaf of the result + result = result + nf->error(values); + + } else if (auto df = std::dynamic_pointer_cast(factor)) { + // If discrete, just add its errorTree as well + result = result + df->errorTree(); + + } else { + throw std::runtime_error( + "HybridNonlinearFactorGraph::errorTree(Values) not implemented for " + "factor type " + + demangle(typeid(factor).name()) + "."); + } + } + + return result; +} + } // namespace gtsam diff --git a/gtsam/hybrid/HybridNonlinearFactorGraph.h b/gtsam/hybrid/HybridNonlinearFactorGraph.h index 5a09f18d4..53920a4aa 100644 --- a/gtsam/hybrid/HybridNonlinearFactorGraph.h +++ b/gtsam/hybrid/HybridNonlinearFactorGraph.h @@ -90,6 +90,19 @@ class GTSAM_EXPORT HybridNonlinearFactorGraph : public HybridFactorGraph { /// Expose error(const HybridValues&) method. using Base::error; + /** + * @brief Compute error of (hybrid) nonlinear factors and discrete factors + * over each discrete assignment, and return as a tree. + * + * Error \f$ e = \Vert f(x) - \mu \Vert_{\Sigma} \f$. + * + * @note: Gaussian and hybrid Gaussian factors are not considered! + * + * @param values Manifold values at which to compute the error. + * @return AlgebraicDecisionTree + */ + AlgebraicDecisionTree errorTree(const Values& values) const; + /// @} }; From 5b713032c1afc4772501a8011d5cfffba824700e Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 1 Oct 2024 11:31:16 -0700 Subject: [PATCH 033/204] Add test for prune --- .../tests/testHybridGaussianConditional.cpp | 58 ++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp index 24eb409a1..cd9c182cd 100644 --- a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp @@ -25,8 +25,12 @@ #include #include +#include #include +#include "gtsam/discrete/DecisionTree.h" +#include "gtsam/discrete/DiscreteKey.h" + // Include for test suite #include @@ -250,8 +254,60 @@ TEST(HybridGaussianConditional, Likelihood2) { } /* ************************************************************************* */ +// Test pruning a HybridGaussianConditional with two discrete keys, based on a +// DecisionTreeFactor with 3 keys: +TEST(HybridGaussianConditional, Prune) { + // Create a two key conditional: + DiscreteKeys modes{{M(1), 2}, {M(2), 2}}; + std::vector gcs; + for (size_t i = 0; i < 4; i++) { + gcs.push_back( + GaussianConditional::sharedMeanAndStddev(Z(0), Vector1(i + 1), i + 1)); + } + auto empty = std::make_shared(); + HybridGaussianConditional::Conditionals conditionals(modes, gcs); + HybridGaussianConditional hgc(modes, conditionals); + + DiscreteKeys keys = modes; + keys.push_back({M(3), 2}); + { + for (size_t i = 0; i < 8; i++) { + std::vector potentials{0, 0, 0, 0, 0, 0, 0, 0}; + potentials[i] = 1; + const DecisionTreeFactor decisionTreeFactor(keys, potentials); + // Prune the HybridGaussianConditional + const auto pruned = hgc.prune(decisionTreeFactor); + // Check that the pruned HybridGaussianConditional has 1 conditional + EXPECT_LONGS_EQUAL(1, pruned->nrComponents()); + } + } + { + const std::vector potentials{0, 0, 0.5, 0, // + 0, 0, 0.5, 0}; + const DecisionTreeFactor decisionTreeFactor(keys, potentials); + + const auto pruned = hgc.prune(decisionTreeFactor); + + // Check that the pruned HybridGaussianConditional has 2 conditionals + EXPECT_LONGS_EQUAL(2, pruned->nrComponents()); + } + { + const std::vector potentials{0.2, 0, 0.3, 0, // + 0, 0, 0.5, 0}; + const DecisionTreeFactor decisionTreeFactor(keys, potentials); + + const auto pruned = hgc.prune(decisionTreeFactor); + + // Check that the pruned HybridGaussianConditional has 3 conditionals + EXPECT_LONGS_EQUAL(3, pruned->nrComponents()); + } +} + +/* ************************************************************************* + */ int main() { TestResult tr; return TestRegistry::runAllTests(tr); } -/* ************************************************************************* */ +/* ************************************************************************* + */ From 2463245ec21aa77301a790b879245c0f35a75887 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 1 Oct 2024 14:38:51 -0400 Subject: [PATCH 034/204] add unit test --- .../tests/testHybridNonlinearFactorGraph.cpp | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp index 4735c1657..8221321ea 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp @@ -211,6 +211,24 @@ TEST(HybridNonlinearFactorGraph, PushBack) { // EXPECT_LONGS_EQUAL(3, hnfg.size()); } +/* ****************************************************************************/ +// Test hybrid nonlinear factor graph errorTree +TEST(HybridNonlinearFactorGraph, ErrorTree) { + Switching s(3); + + HybridNonlinearFactorGraph graph = s.nonlinearFactorGraph; + + auto error_tree = graph.errorTree(s.linearizationPoint); + + auto dkeys = graph.discreteKeys(); + std::vector discrete_keys(dkeys.begin(), dkeys.end()); + std::vector leaves = {152.79175946923, 151.59861228867, + 151.70397280433, 151.60943791243}; + AlgebraicDecisionTree expected_error(discrete_keys, leaves); + // regression + EXPECT(assert_equal(expected_error, error_tree, 1e-7)); +} + /**************************************************************************** * Test construction of switching-like hybrid factor graph. */ From dc5c4cb99dbaf7361e70b33cfa4417886a00d40f Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 1 Oct 2024 15:50:33 -0400 Subject: [PATCH 035/204] compute expected error --- .../tests/testHybridNonlinearFactorGraph.cpp | 21 +++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp index 8221321ea..647a8b646 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp @@ -217,15 +217,28 @@ TEST(HybridNonlinearFactorGraph, ErrorTree) { Switching s(3); HybridNonlinearFactorGraph graph = s.nonlinearFactorGraph; + Values values = s.linearizationPoint; auto error_tree = graph.errorTree(s.linearizationPoint); auto dkeys = graph.discreteKeys(); - std::vector discrete_keys(dkeys.begin(), dkeys.end()); - std::vector leaves = {152.79175946923, 151.59861228867, - 151.70397280433, 151.60943791243}; + DiscreteKeys discrete_keys(dkeys.begin(), dkeys.end()); + + // Compute the sum of errors for each factor. + auto assignments = DiscreteValues::CartesianProduct(discrete_keys); + std::vector leaves(assignments.size()); + for (auto &&factor : graph) { + for (size_t i = 0; i < assignments.size(); ++i) { + leaves[i] += + factor->error(HybridValues(VectorValues(), assignments[i], values)); + } + } + // Swap i=1 and i=2 to give correct ordering. + double temp = leaves[1]; + leaves[1] = leaves[2]; + leaves[2] = temp; AlgebraicDecisionTree expected_error(discrete_keys, leaves); - // regression + EXPECT(assert_equal(expected_error, error_tree, 1e-7)); } From b70c63ee4cb2be9ec82601ff143af287259d4371 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 1 Oct 2024 13:32:23 -0700 Subject: [PATCH 036/204] Better prune --- gtsam/discrete/DecisionTreeFactor.h | 4 +- gtsam/hybrid/HybridGaussianConditional.cpp | 49 +++++++--------------- 2 files changed, 16 insertions(+), 37 deletions(-) diff --git a/gtsam/discrete/DecisionTreeFactor.h b/gtsam/discrete/DecisionTreeFactor.h index 784b11e51..7ed116016 100644 --- a/gtsam/discrete/DecisionTreeFactor.h +++ b/gtsam/discrete/DecisionTreeFactor.h @@ -131,7 +131,7 @@ namespace gtsam { /// Calculate probability for given values `x`, /// is just look up in AlgebraicDecisionTree. - double evaluate(const DiscreteValues& values) const { + double evaluate(const Assignment& values) const { return ADT::operator()(values); } @@ -155,7 +155,7 @@ namespace gtsam { return apply(f, safe_div); } - /// Convert into a decisiontree + /// Convert into a decision tree DecisionTreeFactor toDecisionTreeFactor() const override { return *this; } /// Create new factor by summing all values with the same separator values diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index 1c3a69ce7..2c0fb28a4 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -291,45 +291,24 @@ std::set DiscreteKeysAsSet(const DiscreteKeys &discreteKeys) { /* *******************************************************************************/ HybridGaussianConditional::shared_ptr HybridGaussianConditional::prune( const DecisionTreeFactor &discreteProbs) const { - auto discreteProbsKeySet = DiscreteKeysAsSet(discreteProbs.discreteKeys()); - auto hybridGaussianCondKeySet = DiscreteKeysAsSet(this->discreteKeys()); + // Find keys in discreteProbs.keys() but not in this->keys(): + std::set mine(this->keys().begin(), this->keys().end()); + std::set theirs(discreteProbs.keys().begin(), + discreteProbs.keys().end()); + std::vector diff; + std::set_difference(theirs.begin(), theirs.end(), mine.begin(), mine.end(), + std::back_inserter(diff)); - // Functional which loops over all assignments and create a set of - // GaussianConditionals + // Find maximum probability value for every combination of our keys. + Ordering keys(diff); + auto max = discreteProbs.max(keys); + + // Check the max value for every combination of our keys. + // If the max value is 0.0, we can prune the corresponding conditional. auto pruner = [&](const Assignment &choices, const GaussianConditional::shared_ptr &conditional) -> GaussianConditional::shared_ptr { - // typecast so we can use this to get probability value - const DiscreteValues values(choices); - - // Case where the hybrid gaussian conditional has the same - // discrete keys as the decision tree. - if (hybridGaussianCondKeySet == discreteProbsKeySet) { - return (discreteProbs(values) == 0.0) ? nullptr : conditional; - } else { - // TODO(Frank): It might be faster to "choose" based on values - // and then check whether the resulting tree has non-nullptrs. - std::vector set_diff; - std::set_difference( - discreteProbsKeySet.begin(), discreteProbsKeySet.end(), - hybridGaussianCondKeySet.begin(), hybridGaussianCondKeySet.end(), - std::back_inserter(set_diff)); - - const std::vector assignments = - DiscreteValues::CartesianProduct(set_diff); - for (const DiscreteValues &assignment : assignments) { - DiscreteValues augmented_values(values); - augmented_values.insert(assignment); - - // If any one of the sub-branches are non-zero, - // we need this conditional. - if (discreteProbs(augmented_values) > 0.0) { - return conditional; - } - } - // If we are here, it means that all the sub-branches are 0, so we prune. - return nullptr; - } + return (max->evaluate(choices) == 0.0) ? nullptr : conditional; }; auto pruned_conditionals = conditionals_.apply(pruner); From cf9d38ef4fa81936e2e9dce175d40c8e3b24e8e0 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 1 Oct 2024 13:32:41 -0700 Subject: [PATCH 037/204] better, functional prune --- gtsam/hybrid/HybridBayesNet.cpp | 96 ++++++++++------------- gtsam/hybrid/HybridBayesNet.h | 40 +++++----- gtsam/hybrid/tests/testHybridBayesNet.cpp | 45 ++++++----- 3 files changed, 83 insertions(+), 98 deletions(-) diff --git a/gtsam/hybrid/HybridBayesNet.cpp b/gtsam/hybrid/HybridBayesNet.cpp index b4441f15a..36503d2ea 100644 --- a/gtsam/hybrid/HybridBayesNet.cpp +++ b/gtsam/hybrid/HybridBayesNet.cpp @@ -17,10 +17,13 @@ */ #include +#include #include #include #include +#include + // In Wrappers we have no access to this so have a default ready static std::mt19937_64 kRandomNumberGenerator(42); @@ -38,48 +41,26 @@ bool HybridBayesNet::equals(const This &bn, double tol) const { } /* ************************************************************************* */ -DecisionTreeFactor HybridBayesNet::pruneDiscreteConditionals( - size_t maxNrLeaves) { - // Get the joint distribution of only the discrete keys - // The joint discrete probability. - DiscreteConditional discreteProbs; - - std::vector discrete_factor_idxs; - // Record frontal keys so we can maintain ordering - Ordering discrete_frontals; - - for (size_t i = 0; i < this->size(); i++) { - auto conditional = this->at(i); - if (conditional->isDiscrete()) { - discreteProbs = discreteProbs * (*conditional->asDiscrete()); - - Ordering conditional_keys(conditional->frontals()); - discrete_frontals += conditional_keys; - discrete_factor_idxs.push_back(i); - } - } - - const DecisionTreeFactor prunedDiscreteProbs = - discreteProbs.prune(maxNrLeaves); - - // Eliminate joint probability back into conditionals - DiscreteFactorGraph dfg{prunedDiscreteProbs}; - DiscreteBayesNet::shared_ptr dbn = dfg.eliminateSequential(discrete_frontals); - - // Assign pruned discrete conditionals back at the correct indices. - for (size_t i = 0; i < discrete_factor_idxs.size(); i++) { - size_t idx = discrete_factor_idxs.at(i); - this->at(idx) = std::make_shared(dbn->at(i)); - } - - return prunedDiscreteProbs; -} - -/* ************************************************************************* */ +// The implementation is: build the entire joint into one factor and then prune. +// TODO(Frank): This can be quite expensive *unless* the factors have already +// been pruned before. Another, possibly faster approach is branch and bound +// search to find the K-best leaves and then create a single pruned conditional. HybridBayesNet HybridBayesNet::prune(size_t maxNrLeaves) const { - HybridBayesNet copy(*this); - DecisionTreeFactor prunedDiscreteProbs = - copy.pruneDiscreteConditionals(maxNrLeaves); + // Collect all the discrete conditionals. Could be small if already pruned. + const DiscreteBayesNet marginal = discreteMarginal(); + + // Multiply into one big conditional. NOTE: possibly quite expensive. + DiscreteConditional joint; + for (auto &&conditional : marginal) { + joint = joint * (*conditional); + } + + // Prune the joint. NOTE: again, possibly quite expensive. + const DecisionTreeFactor pruned = joint.prune(maxNrLeaves); + + // Create a the result starting with the pruned joint. + HybridBayesNet result; + result.emplace_shared(pruned.size(), pruned); /* To prune, we visitWith every leaf in the HybridGaussianConditional. * For each leaf, using the assignment we can check the discrete decision tree @@ -88,25 +69,34 @@ HybridBayesNet HybridBayesNet::prune(size_t maxNrLeaves) const { * We can later check the HybridGaussianConditional for just nullptrs. */ - HybridBayesNet prunedBayesNetFragment; - - // Go through all the conditionals in the - // Bayes Net and prune them as per prunedDiscreteProbs. - for (auto &&conditional : copy) { - if (auto gm = conditional->asHybrid()) { + // Go through all the Gaussian conditionals in the Bayes Net and prune them as + // per pruned Discrete joint. + for (auto &&conditional : *this) { + if (auto hgc = conditional->asHybrid()) { // Make a copy of the hybrid Gaussian conditional and prune it! - auto prunedHybridGaussianConditional = gm->prune(prunedDiscreteProbs); + auto prunedHybridGaussianConditional = hgc->prune(pruned); // Type-erase and add to the pruned Bayes Net fragment. - prunedBayesNetFragment.push_back(prunedHybridGaussianConditional); - - } else { + result.push_back(prunedHybridGaussianConditional); + } else if (auto gc = conditional->asGaussian()) { // Add the non-HybridGaussianConditional conditional - prunedBayesNetFragment.push_back(conditional); + result.push_back(gc); } + // We ignore DiscreteConditional as they are already pruned and added. } - return prunedBayesNetFragment; + return result; +} + +/* ************************************************************************* */ +DiscreteBayesNet HybridBayesNet::discreteMarginal() const { + DiscreteBayesNet result; + for (auto &&conditional : *this) { + if (auto dc = conditional->asDiscrete()) { + result.push_back(dc); + } + } + return result; } /* ************************************************************************* */ diff --git a/gtsam/hybrid/HybridBayesNet.h b/gtsam/hybrid/HybridBayesNet.h index a997174ec..bba301be2 100644 --- a/gtsam/hybrid/HybridBayesNet.h +++ b/gtsam/hybrid/HybridBayesNet.h @@ -18,6 +18,7 @@ #pragma once #include +#include #include #include #include @@ -77,16 +78,11 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { } /** - * Add a conditional using a shared_ptr, using implicit conversion to - * a HybridConditional. - * - * This is useful when you create a conditional shared pointer as you need it - * somewhere else. - * + * Move a HybridConditional into a shared pointer and add. + * Example: - * auto shared_ptr_to_a_conditional = - * std::make_shared(...); - * hbn.push_back(shared_ptr_to_a_conditional); + * HybridGaussianConditional conditional(...); + * hbn.push_back(conditional); // loses the original conditional */ void push_back(HybridConditional &&conditional) { factors_.push_back( @@ -124,14 +120,21 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { } /** - * @brief Get the Gaussian Bayes Net which corresponds to a specific discrete - * value assignment. Note this corresponds to the Gaussian posterior p(X|M=m) - * of the continuous variables given the discrete assignment M=m. + * @brief Get the discrete Bayes Net P(M). As the hybrid Bayes net defines + * P(X,M) = P(X|M) P(M), this method returns the marginal distribution on the + * discrete variables. * - * @note Be careful, as any factors not Gaussian are ignored. + * @return discrete marginal as a DiscreteBayesNet. + */ + DiscreteBayesNet discreteMarginal() const; + + /** + * @brief Get the Gaussian Bayes net P(X|M=m) corresponding to a specific + * assignment m for the discrete variables M. As the hybrid Bayes net defines + * P(X,M) = P(X|M) P(M), this method returns the **posterior** p(X|M=m). * * @param assignment The discrete value assignment for the discrete keys. - * @return Gaussian posterior as a GaussianBayesNet + * @return Gaussian posterior P(X|M=m) as a GaussianBayesNet. */ GaussianBayesNet choose(const DiscreteValues &assignment) const; @@ -222,7 +225,7 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { * * @note The joint P(X,M) is p(X|M) P(M) * Then the posterior on M given X=x is is P(M|x) = p(x|M) P(M) / p(x). - * Ideally we want log P(M|x) = log p(x|M) + log P(M) - log P(x), but + * Ideally we want log P(M|x) = log p(x|M) + log P(M) - log p(x), but * unfortunately log p(x) is expensive, so we compute the log of the * unnormalized posterior log P'(M|x) = log p(x|M) + log P(M) * @@ -255,13 +258,6 @@ class GTSAM_EXPORT HybridBayesNet : public BayesNet { /// @} private: - /** - * @brief Prune all the discrete conditionals. - * - * @param maxNrLeaves - */ - DecisionTreeFactor pruneDiscreteConditionals(size_t maxNrLeaves); - #ifdef GTSAM_ENABLE_BOOST_SERIALIZATION /** Serialization function */ friend class boost::serialization::access; diff --git a/gtsam/hybrid/tests/testHybridBayesNet.cpp b/gtsam/hybrid/tests/testHybridBayesNet.cpp index f24c6fcb6..1d22b3d73 100644 --- a/gtsam/hybrid/tests/testHybridBayesNet.cpp +++ b/gtsam/hybrid/tests/testHybridBayesNet.cpp @@ -153,13 +153,14 @@ TEST(HybridBayesNet, Tiny) { EXPECT(assert_equal(one, bayesNet.optimize())); EXPECT(assert_equal(chosen0.optimize(), bayesNet.optimize(zero.discrete()))); - // sample - std::mt19937_64 rng(42); - EXPECT(assert_equal({{M(0), 1}}, bayesNet.sample(&rng).discrete())); + // sample. Not deterministic !!! TODO(Frank): figure out why + // std::mt19937_64 rng(42); + // EXPECT(assert_equal({{M(0), 1}}, bayesNet.sample(&rng).discrete())); // prune auto pruned = bayesNet.prune(1); - EXPECT_LONGS_EQUAL(1, pruned.at(0)->asHybrid()->nrComponents()); + CHECK(pruned.at(1)->asHybrid()); + EXPECT_LONGS_EQUAL(1, pruned.at(1)->asHybrid()->nrComponents()); EXPECT(!pruned.equals(bayesNet)); // error @@ -402,49 +403,47 @@ TEST(HybridBayesNet, UpdateDiscreteConditionals) { s.linearizedFactorGraph.eliminateSequential(); EXPECT_LONGS_EQUAL(7, posterior->size()); - size_t maxNrLeaves = 3; - DiscreteConditional discreteConditionals; - for (auto&& conditional : *posterior) { - if (conditional->isDiscrete()) { - discreteConditionals = - discreteConditionals * (*conditional->asDiscrete()); - } + DiscreteConditional joint; + for (auto&& conditional : posterior->discreteMarginal()) { + joint = joint * (*conditional); } - const DecisionTreeFactor::shared_ptr prunedDecisionTree = - std::make_shared( - discreteConditionals.prune(maxNrLeaves)); + + size_t maxNrLeaves = 3; + auto prunedDecisionTree = joint.prune(maxNrLeaves); #ifdef GTSAM_DT_MERGING EXPECT_LONGS_EQUAL(maxNrLeaves + 2 /*2 zero leaves*/, - prunedDecisionTree->nrLeaves()); + prunedDecisionTree.nrLeaves()); #else - EXPECT_LONGS_EQUAL(8 /*full tree*/, prunedDecisionTree->nrLeaves()); + EXPECT_LONGS_EQUAL(8 /*full tree*/, prunedDecisionTree.nrLeaves()); #endif // regression + // NOTE(Frank): I had to include *three* non-zeroes here now. DecisionTreeFactor::ADT potentials( - s.modes, std::vector{0, 0, 0, 0.505145423, 0, 1, 0, 0.494854577}); - DiscreteConditional expected_discrete_conditionals(1, s.modes, potentials); + s.modes, + std::vector{0, 0, 0, 0.28739288, 0, 0.43106901, 0, 0.2815381}); + DiscreteConditional expectedConditional(3, s.modes, potentials); // Prune! auto pruned = posterior->prune(maxNrLeaves); - // Functor to verify values against the expected_discrete_conditionals + // Functor to verify values against the expectedConditional auto checker = [&](const Assignment& assignment, double probability) -> double { // typecast so we can use this to get probability value DiscreteValues choices(assignment); - if (prunedDecisionTree->operator()(choices) == 0) { + if (prunedDecisionTree(choices) == 0) { EXPECT_DOUBLES_EQUAL(0.0, probability, 1e-9); } else { - EXPECT_DOUBLES_EQUAL(expected_discrete_conditionals(choices), probability, - 1e-9); + EXPECT_DOUBLES_EQUAL(expectedConditional(choices), probability, 1e-6); } return 0.0; }; // Get the pruned discrete conditionals as an AlgebraicDecisionTree - auto pruned_discrete_conditionals = pruned.at(4)->asDiscrete(); + CHECK(pruned.at(0)->asDiscrete()); + auto pruned_discrete_conditionals = pruned.at(0)->asDiscrete(); auto discrete_conditional_tree = std::dynamic_pointer_cast( pruned_discrete_conditionals); From acccef8024a86dc58a7f093d20aa67f1b0b6558b Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 1 Oct 2024 13:51:09 -0700 Subject: [PATCH 038/204] Fix smoother --- gtsam/hybrid/HybridSmoother.cpp | 10 +++------- gtsam/hybrid/HybridSmoother.h | 2 +- gtsam/hybrid/tests/testHybridEstimation.cpp | 7 ++++++- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/gtsam/hybrid/HybridSmoother.cpp b/gtsam/hybrid/HybridSmoother.cpp index b898c0520..ca3e27252 100644 --- a/gtsam/hybrid/HybridSmoother.cpp +++ b/gtsam/hybrid/HybridSmoother.cpp @@ -72,21 +72,17 @@ void HybridSmoother::update(HybridGaussianFactorGraph graph, addConditionals(graph, hybridBayesNet_, ordering); // Eliminate. - HybridBayesNet::shared_ptr bayesNetFragment = - graph.eliminateSequential(ordering); + HybridBayesNet bayesNetFragment = *graph.eliminateSequential(ordering); /// Prune if (maxNrLeaves) { // `pruneBayesNet` sets the leaves with 0 in discreteFactor to nullptr in // all the conditionals with the same keys in bayesNetFragment. - HybridBayesNet prunedBayesNetFragment = - bayesNetFragment->prune(*maxNrLeaves); - // Set the bayes net fragment to the pruned version - bayesNetFragment = std::make_shared(prunedBayesNetFragment); + bayesNetFragment = bayesNetFragment.prune(*maxNrLeaves); } // Add the partial bayes net to the posterior bayes net. - hybridBayesNet_.add(*bayesNetFragment); + hybridBayesNet_.add(bayesNetFragment); } /* ************************************************************************* */ diff --git a/gtsam/hybrid/HybridSmoother.h b/gtsam/hybrid/HybridSmoother.h index 267746ab6..66edf86d6 100644 --- a/gtsam/hybrid/HybridSmoother.h +++ b/gtsam/hybrid/HybridSmoother.h @@ -39,7 +39,7 @@ class GTSAM_EXPORT HybridSmoother { * discrete factor on all discrete keys, plus all discrete factors in the * original graph. * - * \note If maxComponents is given, we look at the discrete factor resulting + * \note If maxNrLeaves is given, we look at the discrete factor resulting * from this elimination, and prune it and the Gaussian components * corresponding to the pruned choices. * diff --git a/gtsam/hybrid/tests/testHybridEstimation.cpp b/gtsam/hybrid/tests/testHybridEstimation.cpp index 58decc695..88d8be0bc 100644 --- a/gtsam/hybrid/tests/testHybridEstimation.cpp +++ b/gtsam/hybrid/tests/testHybridEstimation.cpp @@ -109,6 +109,7 @@ TEST(HybridEstimation, IncrementalSmoother) { HybridGaussianFactorGraph linearized; + constexpr size_t maxNrLeaves = 3; for (size_t k = 1; k < K; k++) { // Motion Model graph.push_back(switching.nonlinearFactorGraph.at(k)); @@ -120,8 +121,12 @@ TEST(HybridEstimation, IncrementalSmoother) { linearized = *graph.linearize(initial); Ordering ordering = smoother.getOrdering(linearized); - smoother.update(linearized, 3, ordering); + smoother.update(linearized, maxNrLeaves, ordering); graph.resize(0); + + // Uncomment to print out pruned discrete marginal: + // smoother.hybridBayesNet().at(0)->asDiscrete()->dot("smoother_" + + // std::to_string(k)); } HybridValues delta = smoother.hybridBayesNet().optimize(); From 3797996e8904373079cdde60d0624453883984d5 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sat, 28 Sep 2024 10:48:51 -0700 Subject: [PATCH 039/204] Store the values --- gtsam/hybrid/HybridGaussianFactor.cpp | 68 ++++++++++--------- gtsam/hybrid/HybridGaussianFactor.h | 15 ++-- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 4 +- gtsam/hybrid/HybridNonlinearFactor.cpp | 4 +- .../hybrid/tests/testSerializationHybrid.cpp | 6 +- 5 files changed, 49 insertions(+), 48 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactor.cpp b/gtsam/hybrid/HybridGaussianFactor.cpp index b04db4977..d3b26d4ef 100644 --- a/gtsam/hybrid/HybridGaussianFactor.cpp +++ b/gtsam/hybrid/HybridGaussianFactor.cpp @@ -27,14 +27,16 @@ #include #include +#include "gtsam/base/types.h" + namespace gtsam { /* *******************************************************************************/ -HybridGaussianFactor::Factors HybridGaussianFactor::augment( +HybridGaussianFactor::FactorValuePairs HybridGaussianFactor::augment( const FactorValuePairs &factors) { // Find the minimum value so we can "proselytize" to positive values. // Done because we can't have sqrt of negative numbers. - Factors gaussianFactors; + DecisionTree gaussianFactors; AlgebraicDecisionTree valueTree; std::tie(gaussianFactors, valueTree) = unzip(factors); @@ -42,16 +44,16 @@ HybridGaussianFactor::Factors HybridGaussianFactor::augment( double min_value = valueTree.min(); // Finally, update the [A|b] matrices. - auto update = [&min_value](const GaussianFactorValuePair &gfv) { + auto update = [&min_value](const auto &gfv) -> GaussianFactorValuePair { auto [gf, value] = gfv; auto jf = std::dynamic_pointer_cast(gf); - if (!jf) return gf; + if (!jf) return {gf, 0.0}; // should this be zero or infinite? double normalized_value = value - min_value; // If the value is 0, do nothing - if (normalized_value == 0.0) return gf; + if (normalized_value == 0.0) return {gf, 0.0}; GaussianFactorGraph gfg; gfg.push_back(jf); @@ -62,18 +64,16 @@ HybridGaussianFactor::Factors HybridGaussianFactor::augment( auto constantFactor = std::make_shared(c); gfg.push_back(constantFactor); - return std::dynamic_pointer_cast( - std::make_shared(gfg)); + return {std::make_shared(gfg), normalized_value}; }; - return Factors(factors, update); + return FactorValuePairs(factors, update); } /* *******************************************************************************/ struct HybridGaussianFactor::ConstructorHelper { KeyVector continuousKeys; // Continuous keys extracted from factors DiscreteKeys discreteKeys; // Discrete keys provided to the constructors - FactorValuePairs pairs; // Used only if factorsTree is empty - Factors factorsTree; + FactorValuePairs pairs; // The decision tree with factors and scalars ConstructorHelper(const DiscreteKey &discreteKey, const std::vector &factors) @@ -85,9 +85,10 @@ struct HybridGaussianFactor::ConstructorHelper { break; } } - - // Build the DecisionTree from the factor vector - factorsTree = Factors(discreteKeys, factors); + // Build the FactorValuePairs DecisionTree + pairs = FactorValuePairs( + DecisionTree(discreteKeys, factors), + [](const auto &f) { return std::pair{f, 0.0}; }); } ConstructorHelper(const DiscreteKey &discreteKey, @@ -109,6 +110,7 @@ struct HybridGaussianFactor::ConstructorHelper { const FactorValuePairs &factorPairs) : discreteKeys(discreteKeys) { // Extract continuous keys from the first non-null factor + // TODO: just stop after first non-null factor factorPairs.visit([&](const GaussianFactorValuePair &pair) { if (pair.first && continuousKeys.empty()) { continuousKeys = pair.first->keys(); @@ -123,14 +125,13 @@ struct HybridGaussianFactor::ConstructorHelper { /* *******************************************************************************/ HybridGaussianFactor::HybridGaussianFactor(const ConstructorHelper &helper) : Base(helper.continuousKeys, helper.discreteKeys), - factors_(helper.factorsTree.empty() ? augment(helper.pairs) - : helper.factorsTree) {} + factors_(augment(helper.pairs)) {} /* *******************************************************************************/ HybridGaussianFactor::HybridGaussianFactor( const DiscreteKey &discreteKey, - const std::vector &factors) - : HybridGaussianFactor(ConstructorHelper(discreteKey, factors)) {} + const std::vector &factorPairs) + : HybridGaussianFactor(ConstructorHelper(discreteKey, factorPairs)) {} /* *******************************************************************************/ HybridGaussianFactor::HybridGaussianFactor( @@ -140,8 +141,8 @@ HybridGaussianFactor::HybridGaussianFactor( /* *******************************************************************************/ HybridGaussianFactor::HybridGaussianFactor(const DiscreteKeys &discreteKeys, - const FactorValuePairs &factors) - : HybridGaussianFactor(ConstructorHelper(discreteKeys, factors)) {} + const FactorValuePairs &factorPairs) + : HybridGaussianFactor(ConstructorHelper(discreteKeys, factorPairs)) {} /* *******************************************************************************/ bool HybridGaussianFactor::equals(const HybridFactor &lf, double tol) const { @@ -153,10 +154,12 @@ bool HybridGaussianFactor::equals(const HybridFactor &lf, double tol) const { if (factors_.empty() ^ e->factors_.empty()) return false; // Check the base and the factors: - return Base::equals(*e, tol) && - factors_.equals(e->factors_, [tol](const auto &f1, const auto &f2) { - return (!f1 && !f2) || (f1 && f2 && f1->equals(*f2, tol)); - }); + auto compareFunc = [tol](const auto &pair1, const auto &pair2) { + auto f1 = pair1.first, f2 = pair2.first; + bool match = (!f1 && !f2) || (f1 && f2 && f1->equals(*f2, tol)); + return match && gtsam::equal(pair1.second, pair2.second, tol); + }; + return Base::equals(*e, tol) && factors_.equals(e->factors_, compareFunc); } /* *******************************************************************************/ @@ -171,15 +174,16 @@ void HybridGaussianFactor::print(const std::string &s, } else { factors_.print( "", [&](Key k) { return formatter(k); }, - [&](const sharedFactor &gf) -> std::string { + [&](const auto &pair) -> std::string { RedirectCout rd; std::cout << ":\n"; - if (gf) { - gf->print("", formatter); + if (pair.first) { + pair.first->print("", formatter); return rd.str(); } else { return "nullptr"; } + std::cout << "scalar: " << pair.second << "\n"; }); } std::cout << "}" << std::endl; @@ -188,7 +192,7 @@ void HybridGaussianFactor::print(const std::string &s, /* *******************************************************************************/ HybridGaussianFactor::sharedFactor HybridGaussianFactor::operator()( const DiscreteValues &assignment) const { - return factors_(assignment); + return factors_(assignment).first; } /* *******************************************************************************/ @@ -207,7 +211,7 @@ GaussianFactorGraphTree HybridGaussianFactor::add( /* *******************************************************************************/ GaussianFactorGraphTree HybridGaussianFactor::asGaussianFactorGraphTree() const { - auto wrap = [](const sharedFactor &gf) { return GaussianFactorGraph{gf}; }; + auto wrap = [](const auto &pair) { return GaussianFactorGraph{pair.first}; }; return {factors_, wrap}; } @@ -229,8 +233,8 @@ static double PotentiallyPrunedComponentError( AlgebraicDecisionTree HybridGaussianFactor::errorTree( const VectorValues &continuousValues) const { // functor to convert from sharedFactor to double error value. - auto errorFunc = [&continuousValues](const sharedFactor &gf) { - return PotentiallyPrunedComponentError(gf, continuousValues); + auto errorFunc = [this, &continuousValues](const auto &pair) { + return PotentiallyPrunedComponentError(pair.first, continuousValues); }; DecisionTree error_tree(factors_, errorFunc); return error_tree; @@ -239,8 +243,8 @@ AlgebraicDecisionTree HybridGaussianFactor::errorTree( /* *******************************************************************************/ double HybridGaussianFactor::error(const HybridValues &values) const { // Directly index to get the component, no need to build the whole tree. - const sharedFactor gf = factors_(values.discrete()); - return PotentiallyPrunedComponentError(gf, values.continuous()); + const auto pair = factors_(values.discrete()); + return PotentiallyPrunedComponentError(pair.first, values.continuous()); } } // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactor.h b/gtsam/hybrid/HybridGaussianFactor.h index e5a575409..15993f582 100644 --- a/gtsam/hybrid/HybridGaussianFactor.h +++ b/gtsam/hybrid/HybridGaussianFactor.h @@ -66,12 +66,10 @@ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { /// typedef for Decision Tree of Gaussian factors and arbitrary value. using FactorValuePairs = DecisionTree; - /// typedef for Decision Tree of Gaussian factors. - using Factors = DecisionTree; private: /// Decision tree of Gaussian factors indexed by discrete keys. - Factors factors_; + FactorValuePairs factors_; public: /// @name Constructors @@ -110,10 +108,10 @@ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { * The value ϕ(x,M) for the factor is again ϕ_m(x) + E_m. * * @param discreteKeys Discrete variables and their cardinalities. - * @param factors The decision tree of Gaussian factor/scalar pairs. + * @param factorPairs The decision tree of Gaussian factor/scalar pairs. */ HybridGaussianFactor(const DiscreteKeys &discreteKeys, - const FactorValuePairs &factors); + const FactorValuePairs &factorPairs); /// @} /// @name Testable @@ -158,7 +156,7 @@ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { double error(const HybridValues &values) const override; /// Getter for GaussianFactor decision tree - const Factors &factors() const { return factors_; } + const FactorValuePairs &factors() const { return factors_; } /// Add HybridNonlinearFactor to a Sum, syntactic sugar. friend GaussianFactorGraphTree &operator+=( @@ -184,10 +182,9 @@ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { * value in the `b` vector as an additional row. * * @param factors DecisionTree of GaussianFactors and arbitrary scalars. - * Gaussian factor in factors. - * @return HybridGaussianFactor::Factors + * @return FactorValuePairs */ - static Factors augment(const FactorValuePairs &factors); + static FactorValuePairs augment(const FactorValuePairs &factors); /// Helper struct to assist private constructor below. struct ConstructorHelper; diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 7dfa56e77..957a85038 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -238,8 +238,8 @@ discreteElimination(const HybridGaussianFactorGraph &factors, } else if (auto gmf = dynamic_pointer_cast(f)) { // Case where we have a HybridGaussianFactor with no continuous keys. // In this case, compute discrete probabilities. - auto logProbability = - [&](const GaussianFactor::shared_ptr &factor) -> double { + auto logProbability = [&](const auto &pair) -> double { + auto [factor, _] = pair; if (!factor) return 0.0; return factor->error(VectorValues()); }; diff --git a/gtsam/hybrid/HybridNonlinearFactor.cpp b/gtsam/hybrid/HybridNonlinearFactor.cpp index 9378d07fe..56711b313 100644 --- a/gtsam/hybrid/HybridNonlinearFactor.cpp +++ b/gtsam/hybrid/HybridNonlinearFactor.cpp @@ -196,8 +196,8 @@ std::shared_ptr HybridNonlinearFactor::linearize( } }; - DecisionTree> - linearized_factors(factors_, linearizeDT); + HybridGaussianFactor::FactorValuePairs linearized_factors(factors_, + linearizeDT); return std::make_shared(discreteKeys_, linearized_factors); diff --git a/gtsam/hybrid/tests/testSerializationHybrid.cpp b/gtsam/hybrid/tests/testSerializationHybrid.cpp index e09669117..8258d8615 100644 --- a/gtsam/hybrid/tests/testSerializationHybrid.cpp +++ b/gtsam/hybrid/tests/testSerializationHybrid.cpp @@ -52,11 +52,11 @@ BOOST_CLASS_EXPORT_GUID(ADT::Leaf, "gtsam_AlgebraicDecisionTree_Leaf"); BOOST_CLASS_EXPORT_GUID(ADT::Choice, "gtsam_AlgebraicDecisionTree_Choice") BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor, "gtsam_HybridGaussianFactor"); -BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::Factors, +BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::FactorValuePairs, "gtsam_HybridGaussianFactor_Factors"); -BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::Factors::Leaf, +BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::FactorValuePairs::Leaf, "gtsam_HybridGaussianFactor_Factors_Leaf"); -BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::Factors::Choice, +BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::FactorValuePairs::Choice, "gtsam_HybridGaussianFactor_Factors_Choice"); BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional, From 14d1594bd177de4b85c9feaa717eda9c4fa3dcaa Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 1 Oct 2024 14:26:11 -0400 Subject: [PATCH 040/204] implement errorTree for HybridNonlinearFactorGraph --- gtsam/hybrid/HybridNonlinearFactorGraph.cpp | 31 +++++++++++++++++++++ gtsam/hybrid/HybridNonlinearFactorGraph.h | 13 +++++++++ 2 files changed, 44 insertions(+) diff --git a/gtsam/hybrid/HybridNonlinearFactorGraph.cpp b/gtsam/hybrid/HybridNonlinearFactorGraph.cpp index 6d19ef156..0e7e9c692 100644 --- a/gtsam/hybrid/HybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/HybridNonlinearFactorGraph.cpp @@ -179,4 +179,35 @@ HybridGaussianFactorGraph::shared_ptr HybridNonlinearFactorGraph::linearize( return linearFG; } +/* ************************************************************************* */ +AlgebraicDecisionTree HybridNonlinearFactorGraph::errorTree( + const Values& values) const { + AlgebraicDecisionTree result(0.0); + + // Iterate over each factor. + for (auto& factor : factors_) { + if (auto hnf = std::dynamic_pointer_cast(factor)) { + // Compute factor error and add it. + result = result + hnf->errorTree(values); + + } else if (auto nf = std::dynamic_pointer_cast(factor)) { + // If continuous only, get the (double) error + // and add it to every leaf of the result + result = result + nf->error(values); + + } else if (auto df = std::dynamic_pointer_cast(factor)) { + // If discrete, just add its errorTree as well + result = result + df->errorTree(); + + } else { + throw std::runtime_error( + "HybridNonlinearFactorGraph::errorTree(Values) not implemented for " + "factor type " + + demangle(typeid(factor).name()) + "."); + } + } + + return result; +} + } // namespace gtsam diff --git a/gtsam/hybrid/HybridNonlinearFactorGraph.h b/gtsam/hybrid/HybridNonlinearFactorGraph.h index 5a09f18d4..53920a4aa 100644 --- a/gtsam/hybrid/HybridNonlinearFactorGraph.h +++ b/gtsam/hybrid/HybridNonlinearFactorGraph.h @@ -90,6 +90,19 @@ class GTSAM_EXPORT HybridNonlinearFactorGraph : public HybridFactorGraph { /// Expose error(const HybridValues&) method. using Base::error; + /** + * @brief Compute error of (hybrid) nonlinear factors and discrete factors + * over each discrete assignment, and return as a tree. + * + * Error \f$ e = \Vert f(x) - \mu \Vert_{\Sigma} \f$. + * + * @note: Gaussian and hybrid Gaussian factors are not considered! + * + * @param values Manifold values at which to compute the error. + * @return AlgebraicDecisionTree + */ + AlgebraicDecisionTree errorTree(const Values& values) const; + /// @} }; From 1bb5b9551b6a8ff57c4039c436b15226e34e11f0 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 1 Oct 2024 21:41:58 -0700 Subject: [PATCH 041/204] discretePosterior in HNFG --- gtsam/hybrid/HybridGaussianFactor.cpp | 2 +- gtsam/hybrid/HybridNonlinearFactorGraph.cpp | 18 +++++++++++++++--- gtsam/hybrid/HybridNonlinearFactorGraph.h | 17 +++++++++++++++-- 3 files changed, 31 insertions(+), 6 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactor.cpp b/gtsam/hybrid/HybridGaussianFactor.cpp index d3b26d4ef..f1e8a8498 100644 --- a/gtsam/hybrid/HybridGaussianFactor.cpp +++ b/gtsam/hybrid/HybridGaussianFactor.cpp @@ -233,7 +233,7 @@ static double PotentiallyPrunedComponentError( AlgebraicDecisionTree HybridGaussianFactor::errorTree( const VectorValues &continuousValues) const { // functor to convert from sharedFactor to double error value. - auto errorFunc = [this, &continuousValues](const auto &pair) { + auto errorFunc = [&continuousValues](const auto &pair) { return PotentiallyPrunedComponentError(pair.first, continuousValues); }; DecisionTree error_tree(factors_, errorFunc); diff --git a/gtsam/hybrid/HybridNonlinearFactorGraph.cpp b/gtsam/hybrid/HybridNonlinearFactorGraph.cpp index 0e7e9c692..56b75d15e 100644 --- a/gtsam/hybrid/HybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/HybridNonlinearFactorGraph.cpp @@ -181,19 +181,19 @@ HybridGaussianFactorGraph::shared_ptr HybridNonlinearFactorGraph::linearize( /* ************************************************************************* */ AlgebraicDecisionTree HybridNonlinearFactorGraph::errorTree( - const Values& values) const { + const Values& continuousValues) const { AlgebraicDecisionTree result(0.0); // Iterate over each factor. for (auto& factor : factors_) { if (auto hnf = std::dynamic_pointer_cast(factor)) { // Compute factor error and add it. - result = result + hnf->errorTree(values); + result = result + hnf->errorTree(continuousValues); } else if (auto nf = std::dynamic_pointer_cast(factor)) { // If continuous only, get the (double) error // and add it to every leaf of the result - result = result + nf->error(values); + result = result + nf->error(continuousValues); } else if (auto df = std::dynamic_pointer_cast(factor)) { // If discrete, just add its errorTree as well @@ -210,4 +210,16 @@ AlgebraicDecisionTree HybridNonlinearFactorGraph::errorTree( return result; } +/* ************************************************************************ */ +AlgebraicDecisionTree HybridNonlinearFactorGraph::discretePosterior( + const Values& continuousValues) const { + AlgebraicDecisionTree errors = this->errorTree(continuousValues); + AlgebraicDecisionTree p = errors.apply([](double error) { + // NOTE: The 0.5 term is handled by each factor + return exp(-error); + }); + return p / p.sum(); +} + +/* ************************************************************************ */ } // namespace gtsam diff --git a/gtsam/hybrid/HybridNonlinearFactorGraph.h b/gtsam/hybrid/HybridNonlinearFactorGraph.h index 53920a4aa..dd18cfa60 100644 --- a/gtsam/hybrid/HybridNonlinearFactorGraph.h +++ b/gtsam/hybrid/HybridNonlinearFactorGraph.h @@ -98,10 +98,23 @@ class GTSAM_EXPORT HybridNonlinearFactorGraph : public HybridFactorGraph { * * @note: Gaussian and hybrid Gaussian factors are not considered! * - * @param values Manifold values at which to compute the error. + * @param continuousValues Manifold values at which to compute the error. * @return AlgebraicDecisionTree */ - AlgebraicDecisionTree errorTree(const Values& values) const; + AlgebraicDecisionTree errorTree(const Values& continuousValues) const; + + /** + * @brief Computer posterior P(M|X=x) when all continuous values X are given. + * This is efficient as this simply takes -exp(.) of errorTree and normalizes. + * + * @note Not a DiscreteConditional as the cardinalities of the DiscreteKeys, + * which we would need, are hard to recover. + * + * @param continuousValues Continuous values x to condition on. + * @return DecisionTreeFactor + */ + AlgebraicDecisionTree discretePosterior( + const Values& continuousValues) const; /// @} }; From 8b3dfd85e70099f43d18260c72020116b7495e64 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 2 Oct 2024 08:39:18 -0700 Subject: [PATCH 042/204] New product factor class --- gtsam/hybrid/HybridGaussianFactor.cpp | 10 +- gtsam/hybrid/HybridGaussianFactor.h | 12 +- gtsam/hybrid/HybridGaussianProductFactor.cpp | 89 +++++++++ gtsam/hybrid/HybridGaussianProductFactor.h | 117 +++++++++++ .../tests/testHybridGaussianProductFactor.cpp | 185 ++++++++++++++++++ 5 files changed, 410 insertions(+), 3 deletions(-) create mode 100644 gtsam/hybrid/HybridGaussianProductFactor.cpp create mode 100644 gtsam/hybrid/HybridGaussianProductFactor.h create mode 100644 gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp diff --git a/gtsam/hybrid/HybridGaussianFactor.cpp b/gtsam/hybrid/HybridGaussianFactor.cpp index f1e8a8498..aa88ded30 100644 --- a/gtsam/hybrid/HybridGaussianFactor.cpp +++ b/gtsam/hybrid/HybridGaussianFactor.cpp @@ -18,17 +18,17 @@ * @date Mar 12, 2022 */ +#include #include #include #include #include #include +#include #include #include #include -#include "gtsam/base/types.h" - namespace gtsam { /* *******************************************************************************/ @@ -215,6 +215,12 @@ GaussianFactorGraphTree HybridGaussianFactor::asGaussianFactorGraphTree() return {factors_, wrap}; } +/* *******************************************************************************/ +HybridGaussianProductFactor HybridGaussianFactor::asProductFactor() const { + return {{factors_, + [](const auto &pair) { return GaussianFactorGraph{pair.first}; }}}; +} + /* *******************************************************************************/ /// Helper method to compute the error of a component. static double PotentiallyPrunedComponentError( diff --git a/gtsam/hybrid/HybridGaussianFactor.h b/gtsam/hybrid/HybridGaussianFactor.h index 15993f582..d160798b6 100644 --- a/gtsam/hybrid/HybridGaussianFactor.h +++ b/gtsam/hybrid/HybridGaussianFactor.h @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -164,6 +165,14 @@ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { sum = factor.add(sum); return sum; } + + /** + * @brief Helper function to return factors and functional to create a + * DecisionTree of Gaussian Factor Graphs. + * + * @return HybridGaussianProductFactor + */ + virtual HybridGaussianProductFactor asProductFactor() const; /// @} protected: @@ -175,7 +184,8 @@ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { */ GaussianFactorGraphTree asGaussianFactorGraphTree() const; - private: + +private: /** * @brief Helper function to augment the [A|b] matrices in the factor * components with the additional scalar values. This is done by storing the diff --git a/gtsam/hybrid/HybridGaussianProductFactor.cpp b/gtsam/hybrid/HybridGaussianProductFactor.cpp new file mode 100644 index 000000000..c9b4c07dd --- /dev/null +++ b/gtsam/hybrid/HybridGaussianProductFactor.cpp @@ -0,0 +1,89 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridGaussianProductFactor.h + * @date Oct 2, 2024 + * @author Frank Dellaert + * @author Varun Agrawal + */ + +#include +#include +#include +#include +#include + +namespace gtsam { + +static GaussianFactorGraph add(const GaussianFactorGraph &graph1, + const GaussianFactorGraph &graph2) { + auto result = graph1; + result.push_back(graph2); + return result; +}; + +HybridGaussianProductFactor operator+(const HybridGaussianProductFactor &a, + const HybridGaussianProductFactor &b) { + return a.empty() ? b : HybridGaussianProductFactor(a.apply(b, add)); +} + +HybridGaussianProductFactor HybridGaussianProductFactor::operator+( + const HybridGaussianFactor &factor) const { + return *this + factor.asProductFactor(); +} + +HybridGaussianProductFactor HybridGaussianProductFactor::operator+( + const GaussianFactor::shared_ptr &factor) const { + return *this + HybridGaussianProductFactor(factor); +} + +HybridGaussianProductFactor &HybridGaussianProductFactor::operator+=( + const GaussianFactor::shared_ptr &factor) { + *this = *this + factor; + return *this; +} + +HybridGaussianProductFactor & +HybridGaussianProductFactor::operator+=(const HybridGaussianFactor &factor) { + *this = *this + factor; + return *this; +} + +void HybridGaussianProductFactor::print(const std::string &s, + const KeyFormatter &formatter) const { + KeySet keys; + auto printer = [&](const Y &graph) { + if (keys.size() == 0) + keys = graph.keys(); + return "Graph of size " + std::to_string(graph.size()); + }; + Base::print(s, formatter, printer); + if (keys.size() > 0) { + std::stringstream ss; + ss << s << " Keys:"; + for (auto &&key : keys) + ss << " " << formatter(key); + std::cout << ss.str() << "." << std::endl; + } +} + +HybridGaussianProductFactor HybridGaussianProductFactor::removeEmpty() const { + auto emptyGaussian = [](const GaussianFactorGraph &graph) { + bool hasNull = + std::any_of(graph.begin(), graph.end(), + [](const GaussianFactor::shared_ptr &ptr) { return !ptr; }); + return hasNull ? GaussianFactorGraph() : graph; + }; + return {Base(*this, emptyGaussian)}; +} + +} // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianProductFactor.h b/gtsam/hybrid/HybridGaussianProductFactor.h new file mode 100644 index 000000000..f1bd8bc3c --- /dev/null +++ b/gtsam/hybrid/HybridGaussianProductFactor.h @@ -0,0 +1,117 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file HybridGaussianProductFactor.h + * @date Oct 2, 2024 + * @author Frank Dellaert + * @author Varun Agrawal + */ + +#pragma once + +#include +#include +#include + +namespace gtsam { + +class HybridGaussianFactor; + +/// Alias for DecisionTree of GaussianFactorGraphs +class HybridGaussianProductFactor : public DecisionTree { + public: + using Y = GaussianFactorGraph; + using Base = DecisionTree; + + /// @name Constructors + /// @{ + + /// Default constructor + HybridGaussianProductFactor() = default; + + /** + * @brief Construct from a single factor + * @tparam FACTOR Factor type + * @param factor Shared pointer to the factor + */ + template + HybridGaussianProductFactor(const std::shared_ptr& factor) : Base(Y{factor}) {} + + /** + * @brief Construct from DecisionTree + * @param tree Decision tree to construct from + */ + HybridGaussianProductFactor(Base&& tree) : Base(std::move(tree)) {} + + ///@} + + /// @name Operators + ///@{ + + /// Add GaussianFactor into HybridGaussianProductFactor + HybridGaussianProductFactor operator+(const GaussianFactor::shared_ptr& factor) const; + + /// Add HybridGaussianFactor into HybridGaussianProductFactor + HybridGaussianProductFactor operator+(const HybridGaussianFactor& factor) const; + + /// Add-assign operator for GaussianFactor + HybridGaussianProductFactor& operator+=(const GaussianFactor::shared_ptr& factor); + + /// Add-assign operator for HybridGaussianFactor + HybridGaussianProductFactor& operator+=(const HybridGaussianFactor& factor); + + ///@} + + /// @name Testable + /// @{ + + /** + * @brief Print the HybridGaussianProductFactor + * @param s Optional string to prepend + * @param formatter Optional key formatter + */ + void print(const std::string& s = "", const KeyFormatter& formatter = DefaultKeyFormatter) const; + + /** + * @brief Check if this HybridGaussianProductFactor is equal to another + * @param other The other HybridGaussianProductFactor to compare with + * @param tol Tolerance for floating point comparisons + * @return true if equal, false otherwise + */ + bool equals(const HybridGaussianProductFactor& other, double tol = 1e-9) const { + return Base::equals(other, [tol](const Y& a, const Y& b) { return a.equals(b, tol); }); + } + + /// @} + + /// @name Other methods + ///@{ + + /** + * @brief Remove empty GaussianFactorGraphs from the decision tree + * @return A new HybridGaussianProductFactor with empty GaussianFactorGraphs removed + * + * If any GaussianFactorGraph in the decision tree contains a nullptr, convert + * that leaf to an empty GaussianFactorGraph. This is needed because the DecisionTree + * will otherwise create a GaussianFactorGraph with a single (null) factor, + * which doesn't register as null. + */ + HybridGaussianProductFactor removeEmpty() const; + + ///@} +}; + +// Testable traits +template <> +struct traits : public Testable {}; + +} // namespace gtsam diff --git a/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp b/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp new file mode 100644 index 000000000..bd830794a --- /dev/null +++ b/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp @@ -0,0 +1,185 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testHybridGaussianProductFactor.cpp + * @brief Unit tests for HybridGaussianProductFactor + * @author Frank Dellaert + * @date October 2024 + */ + +#include "gtsam/inference/Key.h" +#include +#include +#include +#include +#include +#include +#include + +// Include for test suite +#include + +#include + +using namespace std; +using namespace gtsam; +using symbol_shorthand::M; +using symbol_shorthand::X; +using symbol_shorthand::Z; + +/* ************************************************************************* */ +namespace examples { +static const DiscreteKey m1(M(1), 2), m2(M(2), 3); + +auto A1 = Matrix::Zero(2, 1); +auto A2 = Matrix::Zero(2, 2); +auto b = Matrix::Zero(2, 1); + +auto f10 = std::make_shared(X(1), A1, X(2), A2, b); +auto f11 = std::make_shared(X(1), A1, X(2), A2, b); + +auto A3 = Matrix::Zero(2, 3); +auto f20 = std::make_shared(X(1), A1, X(3), A3, b); +auto f21 = std::make_shared(X(1), A1, X(3), A3, b); +auto f22 = std::make_shared(X(1), A1, X(3), A3, b); + +HybridGaussianFactor hybridFactorA(m1, {f10, f11}); +HybridGaussianFactor hybridFactorB(m2, {f20, f21, f22}); +// Simulate a pruned hybrid factor, in this case m2==1 is nulled out. +HybridGaussianFactor prunedFactorB(m2, {f20, nullptr, f22}); +} // namespace examples + +/* ************************************************************************* */ +// Constructor +TEST(HybridGaussianProductFactor, Construct) { + HybridGaussianProductFactor product; +} + +/* ************************************************************************* */ +// Add two Gaussian factors and check only one leaf in tree +TEST(HybridGaussianProductFactor, AddTwoGaussianFactors) { + using namespace examples; + + HybridGaussianProductFactor product; + product += f10; + product += f11; + + // Check that the product has only one leaf and no discrete variables. + EXPECT_LONGS_EQUAL(1, product.nrLeaves()); + EXPECT(product.labels().empty()); + + // Retrieve the single leaf + auto leaf = product(Assignment()); + + // Check that the leaf contains both factors + EXPECT_LONGS_EQUAL(2, leaf.size()); + EXPECT(leaf.at(0) == f10); + EXPECT(leaf.at(1) == f11); +} + +/* ************************************************************************* */ +// Add two GaussianConditionals and check the resulting tree +TEST(HybridGaussianProductFactor, AddTwoGaussianConditionals) { + // Create two GaussianConditionals + Vector1 d(1.0); + Matrix11 R = I_1x1, S = I_1x1; + auto gc1 = std::make_shared(X(1), d, R, X(2), S); + auto gc2 = std::make_shared(X(2), d, R); + + // Create a HybridGaussianProductFactor and add the conditionals + HybridGaussianProductFactor product; + product += std::static_pointer_cast(gc1); + product += std::static_pointer_cast(gc2); + + // Check that the product has only one leaf and no discrete variables + EXPECT_LONGS_EQUAL(1, product.nrLeaves()); + EXPECT(product.labels().empty()); + + // Retrieve the single leaf + auto leaf = product(Assignment()); + + // Check that the leaf contains both conditionals + EXPECT_LONGS_EQUAL(2, leaf.size()); + EXPECT(leaf.at(0) == gc1); + EXPECT(leaf.at(1) == gc2); +} + +/* ************************************************************************* */ +// Check AsProductFactor +TEST(HybridGaussianProductFactor, AsProductFactor) { + using namespace examples; + auto product = hybridFactorA.asProductFactor(); + + // Let's check that this worked: + Assignment mode; + mode[m1.first] = 1; + auto actual = product(mode); + EXPECT(actual.at(0) == f11); +} + +/* ************************************************************************* */ +// "Add" one hybrid factors together. +TEST(HybridGaussianProductFactor, AddOne) { + using namespace examples; + HybridGaussianProductFactor product; + product += hybridFactorA; + + // Let's check that this worked: + Assignment mode; + mode[m1.first] = 1; + auto actual = product(mode); + EXPECT(actual.at(0) == f11); +} + +/* ************************************************************************* */ +// "Add" two HFG together. +TEST(HybridGaussianProductFactor, AddTwo) { + using namespace examples; + + // Create product of two hybrid factors: it will be a decision tree now on + // both discrete variables m1 and m2: + HybridGaussianProductFactor product; + product += hybridFactorA; + product += hybridFactorB; + + // Let's check that this worked: + auto actual00 = product({{M(1), 0}, {M(2), 0}}); + EXPECT(actual00.at(0) == f10); + EXPECT(actual00.at(1) == f20); + + auto actual12 = product({{M(1), 1}, {M(2), 2}}); + EXPECT(actual12.at(0) == f11); + EXPECT(actual12.at(1) == f22); +} + +/* ************************************************************************* */ +// "Add" two HFG together. +TEST(HybridGaussianProductFactor, AddPruned) { + using namespace examples; + + // Create product of two hybrid factors: it will be a decision tree now on + // both discrete variables m1 and m2: + HybridGaussianProductFactor product; + product += hybridFactorA; + product += prunedFactorB; + EXPECT_LONGS_EQUAL(6, product.nrLeaves()); + + auto pruned = product.removeEmpty(); + EXPECT_LONGS_EQUAL(5, pruned.nrLeaves()); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ From ed9a216365a1087f94bd27882cf0ebdea324c563 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sat, 5 Oct 2024 07:18:19 +0900 Subject: [PATCH 043/204] Switch to using HybridGaussianProductFactor --- gtsam/hybrid/HybridFactor.h | 3 - gtsam/hybrid/HybridGaussianConditional.cpp | 7 ++- gtsam/hybrid/HybridGaussianConditional.h | 7 ++- gtsam/hybrid/HybridGaussianFactor.cpp | 20 ------ gtsam/hybrid/HybridGaussianFactor.h | 28 --------- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 62 +++++-------------- gtsam/hybrid/HybridGaussianFactorGraph.h | 2 +- .../hybrid/tests/testHybridGaussianFactor.cpp | 37 ++++------- .../tests/testHybridGaussianFactorGraph.cpp | 11 ++-- 9 files changed, 42 insertions(+), 135 deletions(-) diff --git a/gtsam/hybrid/HybridFactor.h b/gtsam/hybrid/HybridFactor.h index fc91e0838..8d6fef3f4 100644 --- a/gtsam/hybrid/HybridFactor.h +++ b/gtsam/hybrid/HybridFactor.h @@ -32,9 +32,6 @@ namespace gtsam { class HybridValues; -/// Alias for DecisionTree of GaussianFactorGraphs -using GaussianFactorGraphTree = DecisionTree; - KeyVector CollectKeys(const KeyVector &continuousKeys, const DiscreteKeys &discreteKeys); KeyVector CollectKeys(const KeyVector &keys1, const KeyVector &keys2); diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index 2c0fb28a4..e69b966f8 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -136,9 +137,9 @@ HybridGaussianConditional::conditionals() const { } /* *******************************************************************************/ -GaussianFactorGraphTree HybridGaussianConditional::asGaussianFactorGraphTree() +HybridGaussianProductFactor HybridGaussianConditional::asProductFactor() const { - auto wrap = [this](const GaussianConditional::shared_ptr &gc) { + auto wrap = [this](const std::shared_ptr &gc) { // First check if conditional has not been pruned if (gc) { const double Cgm_Kgcm = gc->negLogConstant() - this->negLogConstant_; @@ -155,7 +156,7 @@ GaussianFactorGraphTree HybridGaussianConditional::asGaussianFactorGraphTree() } return GaussianFactorGraph{gc}; }; - return {conditionals_, wrap}; + return {{conditionals_, wrap}}; } /* *******************************************************************************/ diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index 27e31d767..3e6574abc 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -221,6 +222,9 @@ class GTSAM_EXPORT HybridGaussianConditional HybridGaussianConditional::shared_ptr prune( const DecisionTreeFactor &discreteProbs) const; + /// Convert to a DecisionTree of Gaussian factor graphs. + HybridGaussianProductFactor asProductFactor() const override; + /// @} private: @@ -231,9 +235,6 @@ class GTSAM_EXPORT HybridGaussianConditional HybridGaussianConditional(const DiscreteKeys &discreteParents, const Helper &helper); - /// Convert to a DecisionTree of Gaussian factor graphs. - GaussianFactorGraphTree asGaussianFactorGraphTree() const; - /// Check whether `given` has values for all frontal keys. bool allFrontalsGiven(const VectorValues &given) const; diff --git a/gtsam/hybrid/HybridGaussianFactor.cpp b/gtsam/hybrid/HybridGaussianFactor.cpp index aa88ded30..cc72dc0ec 100644 --- a/gtsam/hybrid/HybridGaussianFactor.cpp +++ b/gtsam/hybrid/HybridGaussianFactor.cpp @@ -195,26 +195,6 @@ HybridGaussianFactor::sharedFactor HybridGaussianFactor::operator()( return factors_(assignment).first; } -/* *******************************************************************************/ -GaussianFactorGraphTree HybridGaussianFactor::add( - const GaussianFactorGraphTree &sum) const { - using Y = GaussianFactorGraph; - auto add = [](const Y &graph1, const Y &graph2) { - auto result = graph1; - result.push_back(graph2); - return result; - }; - const auto tree = asGaussianFactorGraphTree(); - return sum.empty() ? tree : sum.apply(tree, add); -} - -/* *******************************************************************************/ -GaussianFactorGraphTree HybridGaussianFactor::asGaussianFactorGraphTree() - const { - auto wrap = [](const auto &pair) { return GaussianFactorGraph{pair.first}; }; - return {factors_, wrap}; -} - /* *******************************************************************************/ HybridGaussianProductFactor HybridGaussianFactor::asProductFactor() const { return {{factors_, diff --git a/gtsam/hybrid/HybridGaussianFactor.h b/gtsam/hybrid/HybridGaussianFactor.h index d160798b6..64a9e1918 100644 --- a/gtsam/hybrid/HybridGaussianFactor.h +++ b/gtsam/hybrid/HybridGaussianFactor.h @@ -130,16 +130,6 @@ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { /// Get factor at a given discrete assignment. sharedFactor operator()(const DiscreteValues &assignment) const; - /** - * @brief Combine the Gaussian Factor Graphs in `sum` and `this` while - * maintaining the original tree structure. - * - * @param sum Decision Tree of Gaussian Factor Graphs indexed by the - * variables. - * @return Sum - */ - GaussianFactorGraphTree add(const GaussianFactorGraphTree &sum) const; - /** * @brief Compute error of the HybridGaussianFactor as a tree. * @@ -158,14 +148,6 @@ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { /// Getter for GaussianFactor decision tree const FactorValuePairs &factors() const { return factors_; } - - /// Add HybridNonlinearFactor to a Sum, syntactic sugar. - friend GaussianFactorGraphTree &operator+=( - GaussianFactorGraphTree &sum, const HybridGaussianFactor &factor) { - sum = factor.add(sum); - return sum; - } - /** * @brief Helper function to return factors and functional to create a * DecisionTree of Gaussian Factor Graphs. @@ -175,16 +157,6 @@ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { virtual HybridGaussianProductFactor asProductFactor() const; /// @} - protected: - /** - * @brief Helper function to return factors and functional to create a - * DecisionTree of Gaussian Factor Graphs. - * - * @return GaussianFactorGraphTree - */ - GaussianFactorGraphTree asGaussianFactorGraphTree() const; - - private: /** * @brief Helper function to augment the [A|b] matrices in the factor diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 957a85038..b58400aa4 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -127,43 +127,28 @@ void HybridGaussianFactorGraph::printErrors( std::cout.flush(); } -/* ************************************************************************ */ -static GaussianFactorGraphTree addGaussian( - const GaussianFactorGraphTree &gfgTree, - const GaussianFactor::shared_ptr &factor) { - // If the decision tree is not initialized, then initialize it. - if (gfgTree.empty()) { - GaussianFactorGraph result{factor}; - return GaussianFactorGraphTree(result); - } else { - auto add = [&factor](const GaussianFactorGraph &graph) { - auto result = graph; - result.push_back(factor); - return result; - }; - return gfgTree.apply(add); - } -} - /* ************************************************************************ */ // TODO(dellaert): it's probably more efficient to first collect the discrete // keys, and then loop over all assignments to populate a vector. -GaussianFactorGraphTree HybridGaussianFactorGraph::assembleGraphTree() const { - GaussianFactorGraphTree result; +HybridGaussianProductFactor +HybridGaussianFactorGraph::collectProductFactor() const { + HybridGaussianProductFactor result; for (auto &f : factors_) { - // TODO(dellaert): just use a virtual method defined in HybridFactor. + // TODO(dellaert): can we make this cleaner and less error-prone? if (auto gf = dynamic_pointer_cast(f)) { - result = addGaussian(result, gf); + result += gf; + } else if (auto gc = dynamic_pointer_cast(f)) { + result += gc; } else if (auto gmf = dynamic_pointer_cast(f)) { - result = gmf->add(result); + result += *gmf; } else if (auto gm = dynamic_pointer_cast(f)) { - result = gm->add(result); + result += *gm; // handled above already? } else if (auto hc = dynamic_pointer_cast(f)) { if (auto gm = hc->asHybrid()) { - result = gm->add(result); + result += *gm; } else if (auto g = hc->asGaussian()) { - result = addGaussian(result, g); + result += g; } else { // Has to be discrete. // TODO(dellaert): in C++20, we can use std::visit. @@ -176,7 +161,7 @@ GaussianFactorGraphTree HybridGaussianFactorGraph::assembleGraphTree() const { } else { // TODO(dellaert): there was an unattributed comment here: We need to // handle the case where the object is actually an BayesTreeOrphanWrapper! - throwRuntimeError("gtsam::assembleGraphTree", f); + throwRuntimeError("gtsam::collectProductFactor", f); } } @@ -269,21 +254,6 @@ discreteElimination(const HybridGaussianFactorGraph &factors, return {std::make_shared(result.first), result.second}; } -/* ************************************************************************ */ -// If any GaussianFactorGraph in the decision tree contains a nullptr, convert -// that leaf to an empty GaussianFactorGraph. Needed since the DecisionTree will -// otherwise create a GFG with a single (null) factor, -// which doesn't register as null. -GaussianFactorGraphTree removeEmpty(const GaussianFactorGraphTree &sum) { - auto emptyGaussian = [](const GaussianFactorGraph &graph) { - bool hasNull = - std::any_of(graph.begin(), graph.end(), - [](const GaussianFactor::shared_ptr &ptr) { return !ptr; }); - return hasNull ? GaussianFactorGraph() : graph; - }; - return GaussianFactorGraphTree(sum, emptyGaussian); -} - /* ************************************************************************ */ using Result = std::pair, HybridGaussianFactor::sharedFactor>; @@ -334,7 +304,7 @@ static std::shared_ptr createHybridGaussianFactor( // and negative since we want log(k) hf->constantTerm() += -2.0 * conditional->negLogConstant(); } - return {factor, 0.0}; + return {factor, conditional->negLogConstant()}; }; DecisionTree newFactors(eliminationResults, correct); @@ -359,12 +329,12 @@ HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { // Collect all the factors to create a set of Gaussian factor graphs in a // decision tree indexed by all discrete keys involved. - GaussianFactorGraphTree factorGraphTree = assembleGraphTree(); + HybridGaussianProductFactor productFactor = collectProductFactor(); // Convert factor graphs with a nullptr to an empty factor graph. // This is done after assembly since it is non-trivial to keep track of which // FG has a nullptr as we're looping over the factors. - factorGraphTree = removeEmpty(factorGraphTree); + auto prunedProductFactor = productFactor.removeEmpty(); // This is the elimination method on the leaf nodes bool someContinuousLeft = false; @@ -383,7 +353,7 @@ HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { }; // Perform elimination! - DecisionTree eliminationResults(factorGraphTree, eliminate); + DecisionTree eliminationResults(prunedProductFactor, eliminate); // If there are no more continuous parents we create a DiscreteFactor with the // error for each discrete choice. Otherwise, create a HybridGaussianFactor diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.h b/gtsam/hybrid/HybridGaussianFactorGraph.h index a5130ca08..ad6d6337d 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.h +++ b/gtsam/hybrid/HybridGaussianFactorGraph.h @@ -218,7 +218,7 @@ class GTSAM_EXPORT HybridGaussianFactorGraph * one for A and one for B. The leaves of the tree will be the Gaussian * factors that have only continuous keys. */ - GaussianFactorGraphTree assembleGraphTree() const; + HybridGaussianProductFactor collectProductFactor() const; /** * @brief Eliminate the given continuous keys. diff --git a/gtsam/hybrid/tests/testHybridGaussianFactor.cpp b/gtsam/hybrid/tests/testHybridGaussianFactor.cpp index 5ff8c1478..1c9476eeb 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactor.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactor.cpp @@ -82,40 +82,25 @@ TEST(HybridGaussianFactor, ConstructorVariants) { } /* ************************************************************************* */ -// "Add" two hybrid factors together. -TEST(HybridGaussianFactor, Sum) { +TEST(HybridGaussianFactor, Keys) { using namespace test_constructor; - DiscreteKey m2(2, 3); - - auto A3 = Matrix::Zero(2, 3); - auto f20 = std::make_shared(X(1), A1, X(3), A3, b); - auto f21 = std::make_shared(X(1), A1, X(3), A3, b); - auto f22 = std::make_shared(X(1), A1, X(3), A3, b); - - // TODO(Frank): why specify keys at all? And: keys in factor should be *all* - // keys, deviating from Kevin's scheme. Should we index DT on DiscreteKey? - // Design review! HybridGaussianFactor hybridFactorA(m1, {f10, f11}); - HybridGaussianFactor hybridFactorB(m2, {f20, f21, f22}); - // Check the number of keys matches what we expect EXPECT_LONGS_EQUAL(3, hybridFactorA.keys().size()); EXPECT_LONGS_EQUAL(2, hybridFactorA.continuousKeys().size()); EXPECT_LONGS_EQUAL(1, hybridFactorA.discreteKeys().size()); - // Create sum of two hybrid factors: it will be a decision tree now on both - // discrete variables m1 and m2: - GaussianFactorGraphTree sum; - sum += hybridFactorA; - sum += hybridFactorB; + DiscreteKey m2(2, 3); + auto A3 = Matrix::Zero(2, 3); + auto f20 = std::make_shared(X(1), A1, X(3), A3, b); + auto f21 = std::make_shared(X(1), A1, X(3), A3, b); + auto f22 = std::make_shared(X(1), A1, X(3), A3, b); + HybridGaussianFactor hybridFactorB(m2, {f20, f21, f22}); - // Let's check that this worked: - Assignment mode; - mode[m1.first] = 1; - mode[m2.first] = 2; - auto actual = sum(mode); - EXPECT(actual.at(0) == f11); - EXPECT(actual.at(1) == f22); + // Check the number of keys matches what we expect + EXPECT_LONGS_EQUAL(3, hybridFactorB.keys().size()); + EXPECT_LONGS_EQUAL(2, hybridFactorB.continuousKeys().size()); + EXPECT_LONGS_EQUAL(1, hybridFactorB.discreteKeys().size()); } /* ************************************************************************* */ diff --git a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp index f30085f02..4c88fa8cd 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -674,14 +675,14 @@ TEST(HybridGaussianFactorGraph, IncrementalErrorTree) { /* ****************************************************************************/ // Check that assembleGraphTree assembles Gaussian factor graphs for each // assignment. -TEST(HybridGaussianFactorGraph, assembleGraphTree) { +TEST(HybridGaussianFactorGraph, collectProductFactor) { const int num_measurements = 1; auto fg = tiny::createHybridGaussianFactorGraph( num_measurements, VectorValues{{Z(0), Vector1(5.0)}}); EXPECT_LONGS_EQUAL(3, fg.size()); // Assemble graph tree: - auto actual = fg.assembleGraphTree(); + auto actual = fg.collectProductFactor(); // Create expected decision tree with two factor graphs: @@ -701,9 +702,9 @@ TEST(HybridGaussianFactorGraph, assembleGraphTree) { // Expected decision tree with two factor graphs: // f(x0;mode=0)P(x0) and f(x0;mode=1)P(x0) - GaussianFactorGraphTree expected{ - M(0), GaussianFactorGraph(std::vector{(*hybrid)(d0), prior}), - GaussianFactorGraph(std::vector{(*hybrid)(d1), prior})}; + HybridGaussianProductFactor expected{ + {M(0), GaussianFactorGraph(std::vector{(*hybrid)(d0), prior}), + GaussianFactorGraph(std::vector{(*hybrid)(d1), prior})}}; EXPECT(assert_equal(expected(d0), actual(d0), 1e-5)); EXPECT(assert_equal(expected(d1), actual(d1), 1e-5)); From 55ca557b1ebdd74e6ec30f99b3acebca7c11c4e2 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sat, 5 Oct 2024 19:00:39 +0900 Subject: [PATCH 044/204] Fix conditional==null bug --- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 37 ++++++++++++++-------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index b58400aa4..a46493a32 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -40,7 +40,6 @@ #include #include -#include #include #include #include @@ -136,7 +135,9 @@ HybridGaussianFactorGraph::collectProductFactor() const { for (auto &f : factors_) { // TODO(dellaert): can we make this cleaner and less error-prone? - if (auto gf = dynamic_pointer_cast(f)) { + if (auto orphan = dynamic_pointer_cast(f)) { + continue; // Ignore OrphanWrapper + } else if (auto gf = dynamic_pointer_cast(f)) { result += gf; } else if (auto gc = dynamic_pointer_cast(f)) { result += gc; @@ -269,15 +270,20 @@ static std::shared_ptr createDiscreteFactor( const DiscreteKeys &discreteSeparator) { auto negLogProbability = [&](const Result &pair) -> double { const auto &[conditional, factor] = pair; - static const VectorValues kEmpty; - // If the factor is not null, it has no keys, just contains the residual. - if (!factor) return 1.0; // TODO(dellaert): not loving this. + if (conditional && factor) { + static const VectorValues kEmpty; + // If the factor is not null, it has no keys, just contains the residual. - // Negative logspace version of: - // exp(-factor->error(kEmpty)) / conditional->normalizationConstant(); - // negLogConstant gives `-log(k)` - // which is `-log(k) = log(1/k) = log(\sqrt{|2πΣ|})`. - return factor->error(kEmpty) - conditional->negLogConstant(); + // Negative-log-space version of: + // exp(-factor->error(kEmpty)) / conditional->normalizationConstant(); + // negLogConstant gives `-log(k)` + // which is `-log(k) = log(1/k) = log(\sqrt{|2πΣ|})`. + return factor->error(kEmpty) - conditional->negLogConstant(); + } else if (!conditional && !factor) { + return 1.0; // TODO(dellaert): not loving this, what should this be?? + } else { + throw std::runtime_error("createDiscreteFactor has mixed NULLs"); + } }; AlgebraicDecisionTree negLogProbabilities( @@ -296,15 +302,20 @@ static std::shared_ptr createHybridGaussianFactor( // Correct for the normalization constant used up by the conditional auto correct = [&](const Result &pair) -> GaussianFactorValuePair { const auto &[conditional, factor] = pair; - if (factor) { + if (conditional && factor) { auto hf = std::dynamic_pointer_cast(factor); if (!hf) throw std::runtime_error("Expected HessianFactor!"); // Add 2.0 term since the constant term will be premultiplied by 0.5 // as per the Hessian definition, // and negative since we want log(k) - hf->constantTerm() += -2.0 * conditional->negLogConstant(); + const double negLogK = conditional->negLogConstant(); + hf->constantTerm() += -2.0 * negLogK; + return {factor, negLogK}; + } else if (!conditional && !factor){ + return {nullptr, 0.0}; // TODO(frank): or should this be infinity? + } else { + throw std::runtime_error("createHybridGaussianFactors has mixed NULLs"); } - return {factor, conditional->negLogConstant()}; }; DecisionTree newFactors(eliminationResults, correct); From 92540298e1068b7f0fa2a5694629a8da4cd27bd5 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 6 Oct 2024 15:12:03 +0900 Subject: [PATCH 045/204] Squashed commit Revised asProductFactor methods collectProductFactor() method Move test better print Formatting Efficiency Fix several bugs Fix print methods Fix print methods More tests, BT tests in different file More product tests Disable printing tests Minimize diff Fix rebase issue --- gtsam/hybrid/HybridGaussianConditional.cpp | 3 - gtsam/hybrid/HybridGaussianFactor.cpp | 46 +- gtsam/hybrid/HybridGaussianFactor.h | 20 +- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 59 ++- gtsam/hybrid/HybridGaussianFactorGraph.h | 7 +- gtsam/hybrid/HybridNonlinearFactor.cpp | 4 +- gtsam/hybrid/tests/testHybridBayesTree.cpp | 323 +++++++++++++- .../hybrid/tests/testHybridGaussianFactor.cpp | 4 +- .../tests/testHybridGaussianFactorGraph.cpp | 393 +++--------------- .../tests/testHybridGaussianProductFactor.cpp | 1 - .../tests/testHybridNonlinearFactorGraph.cpp | 70 ++-- 11 files changed, 509 insertions(+), 421 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index e69b966f8..a42485f10 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -203,9 +203,6 @@ bool HybridGaussianConditional::equals(const HybridFactor &lf, void HybridGaussianConditional::print(const std::string &s, const KeyFormatter &formatter) const { std::cout << (s.empty() ? "" : s + "\n"); - if (isContinuous()) std::cout << "Continuous "; - if (isDiscrete()) std::cout << "Discrete "; - if (isHybrid()) std::cout << "Hybrid "; BaseConditional::print("", formatter); std::cout << " Discrete Keys = "; for (auto &dk : discreteKeys()) { diff --git a/gtsam/hybrid/HybridGaussianFactor.cpp b/gtsam/hybrid/HybridGaussianFactor.cpp index cc72dc0ec..acdd9ef89 100644 --- a/gtsam/hybrid/HybridGaussianFactor.cpp +++ b/gtsam/hybrid/HybridGaussianFactor.cpp @@ -32,8 +32,8 @@ namespace gtsam { /* *******************************************************************************/ -HybridGaussianFactor::FactorValuePairs HybridGaussianFactor::augment( - const FactorValuePairs &factors) { +HybridGaussianFactor::FactorValuePairs +HybridGaussianFactor::augment(const FactorValuePairs &factors) { // Find the minimum value so we can "proselytize" to positive values. // Done because we can't have sqrt of negative numbers. DecisionTree gaussianFactors; @@ -48,12 +48,14 @@ HybridGaussianFactor::FactorValuePairs HybridGaussianFactor::augment( auto [gf, value] = gfv; auto jf = std::dynamic_pointer_cast(gf); - if (!jf) return {gf, 0.0}; // should this be zero or infinite? + if (!jf) + return {gf, 0.0}; // should this be zero or infinite? double normalized_value = value - min_value; // If the value is 0, do nothing - if (normalized_value == 0.0) return {gf, 0.0}; + if (normalized_value == 0.0) + return {gf, 0.0}; GaussianFactorGraph gfg; gfg.push_back(jf); @@ -71,9 +73,9 @@ HybridGaussianFactor::FactorValuePairs HybridGaussianFactor::augment( /* *******************************************************************************/ struct HybridGaussianFactor::ConstructorHelper { - KeyVector continuousKeys; // Continuous keys extracted from factors - DiscreteKeys discreteKeys; // Discrete keys provided to the constructors - FactorValuePairs pairs; // The decision tree with factors and scalars + KeyVector continuousKeys; // Continuous keys extracted from factors + DiscreteKeys discreteKeys; // Discrete keys provided to the constructors + FactorValuePairs pairs; // The decision tree with factors and scalars ConstructorHelper(const DiscreteKey &discreteKey, const std::vector &factors) @@ -88,7 +90,9 @@ struct HybridGaussianFactor::ConstructorHelper { // Build the FactorValuePairs DecisionTree pairs = FactorValuePairs( DecisionTree(discreteKeys, factors), - [](const auto &f) { return std::pair{f, 0.0}; }); + [](const auto &f) { + return std::pair{f, 0.0}; + }); } ConstructorHelper(const DiscreteKey &discreteKey, @@ -147,15 +151,17 @@ HybridGaussianFactor::HybridGaussianFactor(const DiscreteKeys &discreteKeys, /* *******************************************************************************/ bool HybridGaussianFactor::equals(const HybridFactor &lf, double tol) const { const This *e = dynamic_cast(&lf); - if (e == nullptr) return false; + if (e == nullptr) + return false; // This will return false if either factors_ is empty or e->factors_ is // empty, but not if both are empty or both are not empty: - if (factors_.empty() ^ e->factors_.empty()) return false; + if (factors_.empty() ^ e->factors_.empty()) + return false; // Check the base and the factors: auto compareFunc = [tol](const auto &pair1, const auto &pair2) { - auto f1 = pair1.first, f2 = pair2.first; + auto f1 = pair1.first, f2 = pair2.first; bool match = (!f1 && !f2) || (f1 && f2 && f1->equals(*f2, tol)); return match && gtsam::equal(pair1.second, pair2.second, tol); }; @@ -166,7 +172,6 @@ bool HybridGaussianFactor::equals(const HybridFactor &lf, double tol) const { void HybridGaussianFactor::print(const std::string &s, const KeyFormatter &formatter) const { std::cout << (s.empty() ? "" : s + "\n"); - std::cout << "HybridGaussianFactor" << std::endl; HybridFactor::print("", formatter); std::cout << "{\n"; if (factors_.empty()) { @@ -179,19 +184,19 @@ void HybridGaussianFactor::print(const std::string &s, std::cout << ":\n"; if (pair.first) { pair.first->print("", formatter); + std::cout << "scalar: " << pair.second << "\n"; return rd.str(); } else { return "nullptr"; } - std::cout << "scalar: " << pair.second << "\n"; }); } std::cout << "}" << std::endl; } /* *******************************************************************************/ -HybridGaussianFactor::sharedFactor HybridGaussianFactor::operator()( - const DiscreteValues &assignment) const { +HybridGaussianFactor::sharedFactor +HybridGaussianFactor::operator()(const DiscreteValues &assignment) const { return factors_(assignment).first; } @@ -203,8 +208,9 @@ HybridGaussianProductFactor HybridGaussianFactor::asProductFactor() const { /* *******************************************************************************/ /// Helper method to compute the error of a component. -static double PotentiallyPrunedComponentError( - const GaussianFactor::shared_ptr &gf, const VectorValues &values) { +static double +PotentiallyPrunedComponentError(const GaussianFactor::shared_ptr &gf, + const VectorValues &values) { // Check if valid pointer if (gf) { return gf->error(values); @@ -216,8 +222,8 @@ static double PotentiallyPrunedComponentError( } /* *******************************************************************************/ -AlgebraicDecisionTree HybridGaussianFactor::errorTree( - const VectorValues &continuousValues) const { +AlgebraicDecisionTree +HybridGaussianFactor::errorTree(const VectorValues &continuousValues) const { // functor to convert from sharedFactor to double error value. auto errorFunc = [&continuousValues](const auto &pair) { return PotentiallyPrunedComponentError(pair.first, continuousValues); @@ -233,4 +239,4 @@ double HybridGaussianFactor::error(const HybridValues &values) const { return PotentiallyPrunedComponentError(pair.first, values.continuous()); } -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactor.h b/gtsam/hybrid/HybridGaussianFactor.h index 64a9e1918..bb4d3b368 100644 --- a/gtsam/hybrid/HybridGaussianFactor.h +++ b/gtsam/hybrid/HybridGaussianFactor.h @@ -58,7 +58,7 @@ using GaussianFactorValuePair = std::pair; * @ingroup hybrid */ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { - public: +public: using Base = HybridFactor; using This = HybridGaussianFactor; using shared_ptr = std::shared_ptr; @@ -68,11 +68,11 @@ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { /// typedef for Decision Tree of Gaussian factors and arbitrary value. using FactorValuePairs = DecisionTree; - private: +private: /// Decision tree of Gaussian factors indexed by discrete keys. FactorValuePairs factors_; - public: +public: /// @name Constructors /// @{ @@ -120,8 +120,9 @@ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { bool equals(const HybridFactor &lf, double tol = 1e-9) const override; - void print(const std::string &s = "", const KeyFormatter &formatter = - DefaultKeyFormatter) const override; + void + print(const std::string &s = "", + const KeyFormatter &formatter = DefaultKeyFormatter) const override; /// @} /// @name Standard API @@ -137,8 +138,8 @@ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { * @return AlgebraicDecisionTree A decision tree with the same keys * as the factors involved, and leaf values as the error. */ - AlgebraicDecisionTree errorTree( - const VectorValues &continuousValues) const override; + AlgebraicDecisionTree + errorTree(const VectorValues &continuousValues) const override; /** * @brief Compute the log-likelihood, including the log-normalizing constant. @@ -148,13 +149,14 @@ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { /// Getter for GaussianFactor decision tree const FactorValuePairs &factors() const { return factors_; } - /** + /** * @brief Helper function to return factors and functional to create a * DecisionTree of Gaussian Factor Graphs. * * @return HybridGaussianProductFactor */ virtual HybridGaussianProductFactor asProductFactor() const; + /// @} private: @@ -189,4 +191,4 @@ private: template <> struct traits : public Testable {}; -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index a46493a32..5d2b4c203 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -18,6 +18,7 @@ * @date Mar 11, 2022 */ +#include "gtsam/discrete/DiscreteValues.h" #include #include #include @@ -78,10 +79,14 @@ static void printFactor(const std::shared_ptr &factor, const DiscreteValues &assignment, const KeyFormatter &keyFormatter) { if (auto hgf = std::dynamic_pointer_cast(factor)) { - hgf->operator()(assignment) - ->print("HybridGaussianFactor, component:", keyFormatter); + if (assignment.empty()) + hgf->print("HybridGaussianFactor:", keyFormatter); + else + hgf->operator()(assignment) + ->print("HybridGaussianFactor, component:", keyFormatter); } else if (auto gf = std::dynamic_pointer_cast(factor)) { factor->print("GaussianFactor:\n", keyFormatter); + } else if (auto df = std::dynamic_pointer_cast(factor)) { factor->print("DiscreteFactor:\n", keyFormatter); } else if (auto hc = std::dynamic_pointer_cast(factor)) { @@ -90,15 +95,38 @@ static void printFactor(const std::shared_ptr &factor, } else if (hc->isDiscrete()) { factor->print("DiscreteConditional:\n", keyFormatter); } else { - hc->asHybrid() - ->choose(assignment) - ->print("HybridConditional, component:\n", keyFormatter); + if (assignment.empty()) + hc->print("HybridConditional:", keyFormatter); + else + hc->asHybrid() + ->choose(assignment) + ->print("HybridConditional, component:\n", keyFormatter); } } else { factor->print("Unknown factor type\n", keyFormatter); } } +/* ************************************************************************ */ +void HybridGaussianFactorGraph::print(const std::string &s, + const KeyFormatter &keyFormatter) const { + std::cout << (s.empty() ? "" : s + " ") << std::endl; + std::cout << "size: " << size() << std::endl; + + for (size_t i = 0; i < factors_.size(); i++) { + auto &&factor = factors_[i]; + if (factor == nullptr) { + std::cout << "Factor " << i << ": nullptr\n"; + continue; + } + // Print the factor + std::cout << "Factor " << i << "\n"; + printFactor(factor, {}, keyFormatter); + std::cout << "\n"; + } + std::cout.flush(); +} + /* ************************************************************************ */ void HybridGaussianFactorGraph::printErrors( const HybridValues &values, const std::string &str, @@ -106,7 +134,7 @@ void HybridGaussianFactorGraph::printErrors( const std::function &printCondition) const { - std::cout << str << "size: " << size() << std::endl << std::endl; + std::cout << str << " size: " << size() << std::endl << std::endl; for (size_t i = 0; i < factors_.size(); i++) { auto &&factor = factors_[i]; @@ -267,7 +295,7 @@ using Result = std::pair, */ static std::shared_ptr createDiscreteFactor( const DecisionTree &eliminationResults, - const DiscreteKeys &discreteSeparator) { + const DiscreteKeys &discreteSeparator) { auto negLogProbability = [&](const Result &pair) -> double { const auto &[conditional, factor] = pair; if (conditional && factor) { @@ -298,7 +326,7 @@ static std::shared_ptr createDiscreteFactor( // for conditional constants. static std::shared_ptr createHybridGaussianFactor( const DecisionTree &eliminationResults, - const DiscreteKeys &discreteSeparator) { + const DiscreteKeys &discreteSeparator) { // Correct for the normalization constant used up by the conditional auto correct = [&](const Result &pair) -> GaussianFactorValuePair { const auto &[conditional, factor] = pair; @@ -459,6 +487,7 @@ EliminateHybrid(const HybridGaussianFactorGraph &factors, } else if (hybrid_factor->isHybrid()) { only_continuous = false; only_discrete = false; + break; } } else if (auto cont_factor = std::dynamic_pointer_cast(factor)) { @@ -495,10 +524,11 @@ AlgebraicDecisionTree HybridGaussianFactorGraph::errorTree( } else if (auto df = std::dynamic_pointer_cast(factor)) { // If discrete, just add its errorTree as well result = result + df->errorTree(); + } else if (auto gf = std::dynamic_pointer_cast(factor)) { + // For a continuous only factor, just add its error + result = result + gf->error(continuousValues); } else { - // Everything else is a continuous only factor - HybridValues hv(continuousValues, DiscreteValues()); - result = result + factor->error(hv); // NOTE: yes, you can add constants + throwRuntimeError("HybridGaussianFactorGraph::errorTree", factor); } } return result; @@ -533,8 +563,13 @@ GaussianFactorGraph HybridGaussianFactorGraph::choose( gfg.push_back(gf); } else if (auto hgf = std::dynamic_pointer_cast(f)) { gfg.push_back((*hgf)(assignment)); - } else if (auto hgc = dynamic_pointer_cast(f)) { + } else if (auto hgc = std::dynamic_pointer_cast(f)) { gfg.push_back((*hgc)(assignment)); + } else if (auto hc = std::dynamic_pointer_cast(f)) { + if (auto gc = hc->asGaussian()) + gfg.push_back(gc); + else if (auto hgc = hc->asHybrid()) + gfg.push_back((*hgc)(assignment)); } else { continue; } diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.h b/gtsam/hybrid/HybridGaussianFactorGraph.h index ad6d6337d..d72e81489 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.h +++ b/gtsam/hybrid/HybridGaussianFactorGraph.h @@ -145,10 +145,9 @@ class GTSAM_EXPORT HybridGaussianFactorGraph /// @name Testable /// @{ - // TODO(dellaert): customize print and equals. - // void print( - // const std::string& s = "HybridGaussianFactorGraph", - // const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override; + void + print(const std::string &s = "HybridGaussianFactorGraph", + const KeyFormatter &keyFormatter = DefaultKeyFormatter) const override; /** * @brief Print the errors of each factor in the hybrid factor graph. diff --git a/gtsam/hybrid/HybridNonlinearFactor.cpp b/gtsam/hybrid/HybridNonlinearFactor.cpp index 56711b313..9378d07fe 100644 --- a/gtsam/hybrid/HybridNonlinearFactor.cpp +++ b/gtsam/hybrid/HybridNonlinearFactor.cpp @@ -196,8 +196,8 @@ std::shared_ptr HybridNonlinearFactor::linearize( } }; - HybridGaussianFactor::FactorValuePairs linearized_factors(factors_, - linearizeDT); + DecisionTree> + linearized_factors(factors_, linearizeDT); return std::make_shared(discreteKeys_, linearized_factors); diff --git a/gtsam/hybrid/tests/testHybridBayesTree.cpp b/gtsam/hybrid/tests/testHybridBayesTree.cpp index 81b257c32..f31846cb3 100644 --- a/gtsam/hybrid/tests/testHybridBayesTree.cpp +++ b/gtsam/hybrid/tests/testHybridBayesTree.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include "Switching.h" @@ -28,9 +29,319 @@ using namespace std; using namespace gtsam; -using noiseModel::Isotropic; +using symbol_shorthand::D; using symbol_shorthand::M; using symbol_shorthand::X; +using symbol_shorthand::Y; + +static const DiscreteKey m0(M(0), 2), m1(M(1), 2), m2(M(2), 2), m3(M(3), 2); + +/* ************************************************************************* */ +TEST(HybridGaussianFactorGraph, EliminateMultifrontal) { + // Test multifrontal elimination + HybridGaussianFactorGraph hfg; + + // Add priors on x0 and c1 + hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); + hfg.add(DecisionTreeFactor(m1, {2, 8})); + + Ordering ordering; + ordering.push_back(X(0)); + auto result = hfg.eliminatePartialMultifrontal(ordering); + + EXPECT_LONGS_EQUAL(result.first->size(), 1); + EXPECT_LONGS_EQUAL(result.second->size(), 1); +} + +/* ************************************************************************* */ +namespace two { +std::vector components(Key key) { + return {std::make_shared(key, I_3x3, Z_3x1), + std::make_shared(key, I_3x3, Vector3::Ones())}; +} +} // namespace two + +/* ************************************************************************* */ +TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalSimple) { + HybridGaussianFactorGraph hfg; + + hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); + hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); + + hfg.add(HybridGaussianFactor(m1, two::components(X(1)))); + + hfg.add(DecisionTreeFactor(m1, {2, 8})); + // TODO(Varun) Adding extra discrete variable not connected to continuous + // variable throws segfault + // hfg.add(DecisionTreeFactor({m1, m2, "1 2 3 4")); + + HybridBayesTree::shared_ptr result = hfg.eliminateMultifrontal(); + + // The bayes tree should have 3 cliques + EXPECT_LONGS_EQUAL(3, result->size()); +} + +/* ************************************************************************* */ +TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalCLG) { + HybridGaussianFactorGraph hfg; + + // Prior on x0 + hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); + // Factor between x0-x1 + hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); + + // Hybrid factor P(x1|c1) + hfg.add(HybridGaussianFactor(m1, two::components(X(1)))); + // Prior factor on c1 + hfg.add(DecisionTreeFactor(m1, {2, 8})); + + // Get a constrained ordering keeping c1 last + auto ordering_full = HybridOrdering(hfg); + + // Returns a Hybrid Bayes Tree with distribution P(x0|x1)P(x1|c1)P(c1) + HybridBayesTree::shared_ptr hbt = hfg.eliminateMultifrontal(ordering_full); + + EXPECT_LONGS_EQUAL(3, hbt->size()); +} + +/* ************************************************************************* */ +// Check assembling the Bayes Tree roots after we do partial elimination +TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalTwoClique) { + HybridGaussianFactorGraph hfg; + + hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); + hfg.add(JacobianFactor(X(1), I_3x3, X(2), -I_3x3, Z_3x1)); + + hfg.add(HybridGaussianFactor(m0, two::components(X(0)))); + hfg.add(HybridGaussianFactor(m1, two::components(X(2)))); + + hfg.add(DecisionTreeFactor({m1, m2}, "1 2 3 4")); + + hfg.add(JacobianFactor(X(3), I_3x3, X(4), -I_3x3, Z_3x1)); + hfg.add(JacobianFactor(X(4), I_3x3, X(5), -I_3x3, Z_3x1)); + + hfg.add(HybridGaussianFactor(m3, two::components(X(3)))); + hfg.add(HybridGaussianFactor(m2, two::components(X(5)))); + + auto ordering_full = + Ordering::ColamdConstrainedLast(hfg, {M(0), M(1), M(2), M(3)}); + + const auto [hbt, remaining] = hfg.eliminatePartialMultifrontal(ordering_full); + + // 9 cliques in the bayes tree and 0 remaining variables to eliminate. + EXPECT_LONGS_EQUAL(9, hbt->size()); + EXPECT_LONGS_EQUAL(0, remaining->size()); + + /* + (Fan) Explanation: the Junction tree will need to re-eliminate to get to the + marginal on X(1), which is not possible because it involves eliminating + discrete before continuous. The solution to this, however, is in Murphy02. + TLDR is that this is 1. expensive and 2. inexact. nevertheless it is doable. + And I believe that we should do this. + */ +} + +/* ************************************************************************* */ +void dotPrint(const HybridGaussianFactorGraph::shared_ptr &hfg, + const HybridBayesTree::shared_ptr &hbt, + const Ordering &ordering) { + DotWriter dw; + dw.positionHints['c'] = 2; + dw.positionHints['x'] = 1; + std::cout << hfg->dot(DefaultKeyFormatter, dw); + std::cout << "\n"; + hbt->dot(std::cout); + + std::cout << "\n"; + std::cout << hfg->eliminateSequential(ordering)->dot(DefaultKeyFormatter, dw); +} + +/* ************************************************************************* */ +// TODO(fan): make a graph like Varun's paper one +TEST(HybridGaussianFactorGraph, Switching) { + auto N = 12; + auto hfg = makeSwitchingChain(N); + + // X(5) will be the center, X(1-4), X(6-9) + // X(3), X(7) + // X(2), X(8) + // X(1), X(4), X(6), X(9) + // M(5) will be the center, M(1-4), M(6-8) + // M(3), M(7) + // M(1), M(4), M(2), M(6), M(8) + // auto ordering_full = + // Ordering(KeyVector{X(1), X(4), X(2), X(6), X(9), X(8), X(3), X(7), + // X(5), + // M(1), M(4), M(2), M(6), M(8), M(3), M(7), M(5)}); + KeyVector ordering; + + { + std::vector naturalX(N); + std::iota(naturalX.begin(), naturalX.end(), 1); + std::vector ordX; + std::transform(naturalX.begin(), naturalX.end(), std::back_inserter(ordX), + [](int x) { return X(x); }); + + auto [ndX, lvls] = makeBinaryOrdering(ordX); + std::copy(ndX.begin(), ndX.end(), std::back_inserter(ordering)); + // TODO(dellaert): this has no effect! + for (auto &l : lvls) { + l = -l; + } + } + { + std::vector naturalC(N - 1); + std::iota(naturalC.begin(), naturalC.end(), 1); + std::vector ordC; + std::transform(naturalC.begin(), naturalC.end(), std::back_inserter(ordC), + [](int x) { return M(x); }); + + // std::copy(ordC.begin(), ordC.end(), std::back_inserter(ordering)); + const auto [ndC, lvls] = makeBinaryOrdering(ordC); + std::copy(ndC.begin(), ndC.end(), std::back_inserter(ordering)); + } + auto ordering_full = Ordering(ordering); + + const auto [hbt, remaining] = + hfg->eliminatePartialMultifrontal(ordering_full); + + // 12 cliques in the bayes tree and 0 remaining variables to eliminate. + EXPECT_LONGS_EQUAL(12, hbt->size()); + EXPECT_LONGS_EQUAL(0, remaining->size()); +} + +/* ************************************************************************* */ +// TODO(fan): make a graph like Varun's paper one +TEST(HybridGaussianFactorGraph, SwitchingISAM) { + auto N = 11; + auto hfg = makeSwitchingChain(N); + + // X(5) will be the center, X(1-4), X(6-9) + // X(3), X(7) + // X(2), X(8) + // X(1), X(4), X(6), X(9) + // M(5) will be the center, M(1-4), M(6-8) + // M(3), M(7) + // M(1), M(4), M(2), M(6), M(8) + // auto ordering_full = + // Ordering(KeyVector{X(1), X(4), X(2), X(6), X(9), X(8), X(3), X(7), + // X(5), + // M(1), M(4), M(2), M(6), M(8), M(3), M(7), M(5)}); + KeyVector ordering; + + { + std::vector naturalX(N); + std::iota(naturalX.begin(), naturalX.end(), 1); + std::vector ordX; + std::transform(naturalX.begin(), naturalX.end(), std::back_inserter(ordX), + [](int x) { return X(x); }); + + auto [ndX, lvls] = makeBinaryOrdering(ordX); + std::copy(ndX.begin(), ndX.end(), std::back_inserter(ordering)); + // TODO(dellaert): this has no effect! + for (auto &l : lvls) { + l = -l; + } + } + { + std::vector naturalC(N - 1); + std::iota(naturalC.begin(), naturalC.end(), 1); + std::vector ordC; + std::transform(naturalC.begin(), naturalC.end(), std::back_inserter(ordC), + [](int x) { return M(x); }); + + // std::copy(ordC.begin(), ordC.end(), std::back_inserter(ordering)); + const auto [ndC, lvls] = makeBinaryOrdering(ordC); + std::copy(ndC.begin(), ndC.end(), std::back_inserter(ordering)); + } + auto ordering_full = Ordering(ordering); + + const auto [hbt, remaining] = + hfg->eliminatePartialMultifrontal(ordering_full); + + auto new_fg = makeSwitchingChain(12); + auto isam = HybridGaussianISAM(*hbt); + + // Run an ISAM update. + HybridGaussianFactorGraph factorGraph; + factorGraph.push_back(new_fg->at(new_fg->size() - 2)); + factorGraph.push_back(new_fg->at(new_fg->size() - 1)); + isam.update(factorGraph); + + // ISAM should have 12 factors after the last update + EXPECT_LONGS_EQUAL(12, isam.size()); +} + +/* ************************************************************************* */ +TEST(HybridGaussianFactorGraph, SwitchingTwoVar) { + const int N = 7; + auto hfg = makeSwitchingChain(N, X); + hfg->push_back(*makeSwitchingChain(N, Y, D)); + + for (int t = 1; t <= N; t++) { + hfg->add(JacobianFactor(X(t), I_3x3, Y(t), -I_3x3, Vector3(1.0, 0.0, 0.0))); + } + + KeyVector ordering; + + KeyVector naturalX(N); + std::iota(naturalX.begin(), naturalX.end(), 1); + KeyVector ordX; + for (size_t i = 1; i <= N; i++) { + ordX.emplace_back(X(i)); + ordX.emplace_back(Y(i)); + } + + for (size_t i = 1; i <= N - 1; i++) { + ordX.emplace_back(M(i)); + } + for (size_t i = 1; i <= N - 1; i++) { + ordX.emplace_back(D(i)); + } + + { + DotWriter dw; + dw.positionHints['x'] = 1; + dw.positionHints['c'] = 0; + dw.positionHints['d'] = 3; + dw.positionHints['y'] = 2; + // std::cout << hfg->dot(DefaultKeyFormatter, dw); + // std::cout << "\n"; + } + + { + DotWriter dw; + dw.positionHints['y'] = 9; + // dw.positionHints['c'] = 0; + // dw.positionHints['d'] = 3; + dw.positionHints['x'] = 1; + // std::cout << "\n"; + // std::cout << hfg->eliminateSequential(Ordering(ordX)) + // ->dot(DefaultKeyFormatter, dw); + // hfg->eliminateMultifrontal(Ordering(ordX))->dot(std::cout); + } + + Ordering ordering_partial; + for (size_t i = 1; i <= N; i++) { + ordering_partial.emplace_back(X(i)); + ordering_partial.emplace_back(Y(i)); + } + const auto [hbn, remaining] = + hfg->eliminatePartialSequential(ordering_partial); + + EXPECT_LONGS_EQUAL(14, hbn->size()); + EXPECT_LONGS_EQUAL(11, remaining->size()); + + { + DotWriter dw; + dw.positionHints['x'] = 1; + dw.positionHints['c'] = 0; + dw.positionHints['d'] = 3; + dw.positionHints['y'] = 2; + // std::cout << remaining->dot(DefaultKeyFormatter, dw); + // std::cout << "\n"; + } +} /* ****************************************************************************/ // Test multifrontal optimize @@ -97,7 +408,8 @@ TEST(HybridBayesTree, OptimizeAssignment) { // Create ordering. Ordering ordering; - for (size_t k = 0; k < s.K; k++) ordering.push_back(X(k)); + for (size_t k = 0; k < s.K; k++) + ordering.push_back(X(k)); const auto [hybridBayesNet, remainingFactorGraph] = s.linearizedFactorGraph.eliminatePartialSequential(ordering); @@ -139,20 +451,21 @@ TEST(HybridBayesTree, Optimize) { // Create ordering. Ordering ordering; - for (size_t k = 0; k < s.K; k++) ordering.push_back(X(k)); + for (size_t k = 0; k < s.K; k++) + ordering.push_back(X(k)); const auto [hybridBayesNet, remainingFactorGraph] = s.linearizedFactorGraph.eliminatePartialSequential(ordering); DiscreteFactorGraph dfg; - for (auto&& f : *remainingFactorGraph) { + for (auto &&f : *remainingFactorGraph) { auto discreteFactor = dynamic_pointer_cast(f); assert(discreteFactor); dfg.push_back(discreteFactor); } // Add the probabilities for each branch - DiscreteKeys discrete_keys = {{M(0), 2}, {M(1), 2}, {M(2), 2}}; + DiscreteKeys discrete_keys = {m0, m1, m2}; vector probs = {0.012519475, 0.041280228, 0.075018647, 0.081663656, 0.037152205, 0.12248971, 0.07349729, 0.08}; dfg.emplace_shared(discrete_keys, probs); diff --git a/gtsam/hybrid/tests/testHybridGaussianFactor.cpp b/gtsam/hybrid/tests/testHybridGaussianFactor.cpp index 1c9476eeb..71aac8224 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactor.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactor.cpp @@ -104,7 +104,7 @@ TEST(HybridGaussianFactor, Keys) { } /* ************************************************************************* */ -TEST(HybridGaussianFactor, Printing) { +TEST_DISABLED(HybridGaussianFactor, Printing) { using namespace test_constructor; HybridGaussianFactor hybridFactor(m1, {f10, f11}); @@ -123,6 +123,7 @@ Hybrid [x1 x2; 1]{ ] b = [ 0 0 ] No noise model +scalar: 0 1 Leaf : A[x1] = [ @@ -135,6 +136,7 @@ Hybrid [x1 x2; 1]{ ] b = [ 0 0 ] No noise model +scalar: 0 } )"; diff --git a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp index 4c88fa8cd..23ad0c135 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp @@ -13,39 +13,35 @@ * @file testHybridGaussianFactorGraph.cpp * @date Mar 11, 2022 * @author Fan Jiang + * @author Varun Agrawal + * @author Frank Dellaert */ -#include -#include +#include #include #include #include #include #include #include -#include #include #include #include #include #include #include -#include #include #include -#include #include #include #include #include -#include +#include +#include + #include -#include -#include -#include #include -#include #include #include "Switching.h" @@ -54,17 +50,15 @@ using namespace std; using namespace gtsam; -using gtsam::symbol_shorthand::D; using gtsam::symbol_shorthand::M; using gtsam::symbol_shorthand::N; using gtsam::symbol_shorthand::X; -using gtsam::symbol_shorthand::Y; using gtsam::symbol_shorthand::Z; // Set up sampling std::mt19937_64 kRng(42); -static const DiscreteKey m1(M(1), 2); +static const DiscreteKey m0(M(0), 2), m1(M(1), 2), m2(M(2), 2); /* ************************************************************************* */ TEST(HybridGaussianFactorGraph, Creation) { @@ -77,7 +71,7 @@ TEST(HybridGaussianFactorGraph, Creation) { // Define a hybrid gaussian conditional P(x0|x1, c0) // and add it to the factor graph. HybridGaussianConditional gm( - {M(0), 2}, + m0, {std::make_shared(X(0), Z_3x1, I_3x3, X(1), I_3x3), std::make_shared(X(0), Vector3::Ones(), I_3x3, X(1), I_3x3)}); @@ -98,22 +92,6 @@ TEST(HybridGaussianFactorGraph, EliminateSequential) { EXPECT_LONGS_EQUAL(result.first->size(), 1); } -/* ************************************************************************* */ -TEST(HybridGaussianFactorGraph, EliminateMultifrontal) { - // Test multifrontal elimination - HybridGaussianFactorGraph hfg; - - // Add priors on x0 and c1 - hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); - hfg.add(DecisionTreeFactor(m1, {2, 8})); - - Ordering ordering; - ordering.push_back(X(0)); - auto result = hfg.eliminatePartialMultifrontal(ordering); - - EXPECT_LONGS_EQUAL(result.first->size(), 1); - EXPECT_LONGS_EQUAL(result.second->size(), 1); -} /* ************************************************************************* */ namespace two { @@ -121,7 +99,7 @@ std::vector components(Key key) { return {std::make_shared(key, I_3x3, Z_3x1), std::make_shared(key, I_3x3, Vector3::Ones())}; } -} // namespace two +} // namespace two /* ************************************************************************* */ TEST(HybridGaussianFactorGraph, hybridEliminationOneFactor) { @@ -179,7 +157,7 @@ TEST(HybridGaussianFactorGraph, eliminateFullSequentialSimple) { // Discrete probability table for c1 hfg.add(DecisionTreeFactor(m1, {2, 8})); // Joint discrete probability table for c1, c2 - hfg.add(DecisionTreeFactor({{M(1), 2}, {M(2), 2}}, "1 2 3 4")); + hfg.add(DecisionTreeFactor({m1, m2}, "1 2 3 4")); HybridBayesNet::shared_ptr result = hfg.eliminateSequential(); @@ -187,296 +165,8 @@ TEST(HybridGaussianFactorGraph, eliminateFullSequentialSimple) { EXPECT_LONGS_EQUAL(4, result->size()); } -/* ************************************************************************* */ -TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalSimple) { - HybridGaussianFactorGraph hfg; - - hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); - hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); - - hfg.add(HybridGaussianFactor({M(1), 2}, two::components(X(1)))); - - hfg.add(DecisionTreeFactor(m1, {2, 8})); - // TODO(Varun) Adding extra discrete variable not connected to continuous - // variable throws segfault - // hfg.add(DecisionTreeFactor({{M(1), 2}, {M(2), 2}}, "1 2 3 4")); - - HybridBayesTree::shared_ptr result = hfg.eliminateMultifrontal(); - - // The bayes tree should have 3 cliques - EXPECT_LONGS_EQUAL(3, result->size()); - // GTSAM_PRINT(*result); - // GTSAM_PRINT(*result->marginalFactor(M(2))); -} - -/* ************************************************************************* */ -TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalCLG) { - HybridGaussianFactorGraph hfg; - - // Prior on x0 - hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); - // Factor between x0-x1 - hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); - - // Hybrid factor P(x1|c1) - hfg.add(HybridGaussianFactor(m1, two::components(X(1)))); - // Prior factor on c1 - hfg.add(DecisionTreeFactor(m1, {2, 8})); - - // Get a constrained ordering keeping c1 last - auto ordering_full = HybridOrdering(hfg); - - // Returns a Hybrid Bayes Tree with distribution P(x0|x1)P(x1|c1)P(c1) - HybridBayesTree::shared_ptr hbt = hfg.eliminateMultifrontal(ordering_full); - - EXPECT_LONGS_EQUAL(3, hbt->size()); -} - -/* ************************************************************************* */ /* - * This test is about how to assemble the Bayes Tree roots after we do partial - * elimination - */ -TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalTwoClique) { - HybridGaussianFactorGraph hfg; - - hfg.add(JacobianFactor(X(0), I_3x3, X(1), -I_3x3, Z_3x1)); - hfg.add(JacobianFactor(X(1), I_3x3, X(2), -I_3x3, Z_3x1)); - - hfg.add(HybridGaussianFactor({M(0), 2}, two::components(X(0)))); - hfg.add(HybridGaussianFactor({M(1), 2}, two::components(X(2)))); - - hfg.add(DecisionTreeFactor({{M(1), 2}, {M(2), 2}}, "1 2 3 4")); - - hfg.add(JacobianFactor(X(3), I_3x3, X(4), -I_3x3, Z_3x1)); - hfg.add(JacobianFactor(X(4), I_3x3, X(5), -I_3x3, Z_3x1)); - - hfg.add(HybridGaussianFactor({M(3), 2}, two::components(X(3)))); - hfg.add(HybridGaussianFactor({M(2), 2}, two::components(X(5)))); - - auto ordering_full = - Ordering::ColamdConstrainedLast(hfg, {M(0), M(1), M(2), M(3)}); - - const auto [hbt, remaining] = hfg.eliminatePartialMultifrontal(ordering_full); - - // 9 cliques in the bayes tree and 0 remaining variables to eliminate. - EXPECT_LONGS_EQUAL(9, hbt->size()); - EXPECT_LONGS_EQUAL(0, remaining->size()); - - /* - (Fan) Explanation: the Junction tree will need to re-eliminate to get to the - marginal on X(1), which is not possible because it involves eliminating - discrete before continuous. The solution to this, however, is in Murphy02. - TLDR is that this is 1. expensive and 2. inexact. nevertheless it is doable. - And I believe that we should do this. - */ -} - -void dotPrint(const HybridGaussianFactorGraph::shared_ptr &hfg, - const HybridBayesTree::shared_ptr &hbt, - const Ordering &ordering) { - DotWriter dw; - dw.positionHints['c'] = 2; - dw.positionHints['x'] = 1; - std::cout << hfg->dot(DefaultKeyFormatter, dw); - std::cout << "\n"; - hbt->dot(std::cout); - - std::cout << "\n"; - std::cout << hfg->eliminateSequential(ordering)->dot(DefaultKeyFormatter, dw); -} - -/* ************************************************************************* */ -// TODO(fan): make a graph like Varun's paper one -TEST(HybridGaussianFactorGraph, Switching) { - auto N = 12; - auto hfg = makeSwitchingChain(N); - - // X(5) will be the center, X(1-4), X(6-9) - // X(3), X(7) - // X(2), X(8) - // X(1), X(4), X(6), X(9) - // M(5) will be the center, M(1-4), M(6-8) - // M(3), M(7) - // M(1), M(4), M(2), M(6), M(8) - // auto ordering_full = - // Ordering(KeyVector{X(1), X(4), X(2), X(6), X(9), X(8), X(3), X(7), - // X(5), - // M(1), M(4), M(2), M(6), M(8), M(3), M(7), M(5)}); - KeyVector ordering; - - { - std::vector naturalX(N); - std::iota(naturalX.begin(), naturalX.end(), 1); - std::vector ordX; - std::transform(naturalX.begin(), naturalX.end(), std::back_inserter(ordX), - [](int x) { return X(x); }); - - auto [ndX, lvls] = makeBinaryOrdering(ordX); - std::copy(ndX.begin(), ndX.end(), std::back_inserter(ordering)); - // TODO(dellaert): this has no effect! - for (auto &l : lvls) { - l = -l; - } - } - { - std::vector naturalC(N - 1); - std::iota(naturalC.begin(), naturalC.end(), 1); - std::vector ordC; - std::transform(naturalC.begin(), naturalC.end(), std::back_inserter(ordC), - [](int x) { return M(x); }); - - // std::copy(ordC.begin(), ordC.end(), std::back_inserter(ordering)); - const auto [ndC, lvls] = makeBinaryOrdering(ordC); - std::copy(ndC.begin(), ndC.end(), std::back_inserter(ordering)); - } - auto ordering_full = Ordering(ordering); - - // GTSAM_PRINT(*hfg); - // GTSAM_PRINT(ordering_full); - - const auto [hbt, remaining] = - hfg->eliminatePartialMultifrontal(ordering_full); - - // 12 cliques in the bayes tree and 0 remaining variables to eliminate. - EXPECT_LONGS_EQUAL(12, hbt->size()); - EXPECT_LONGS_EQUAL(0, remaining->size()); -} - -/* ************************************************************************* */ -// TODO(fan): make a graph like Varun's paper one -TEST(HybridGaussianFactorGraph, SwitchingISAM) { - auto N = 11; - auto hfg = makeSwitchingChain(N); - - // X(5) will be the center, X(1-4), X(6-9) - // X(3), X(7) - // X(2), X(8) - // X(1), X(4), X(6), X(9) - // M(5) will be the center, M(1-4), M(6-8) - // M(3), M(7) - // M(1), M(4), M(2), M(6), M(8) - // auto ordering_full = - // Ordering(KeyVector{X(1), X(4), X(2), X(6), X(9), X(8), X(3), X(7), - // X(5), - // M(1), M(4), M(2), M(6), M(8), M(3), M(7), M(5)}); - KeyVector ordering; - - { - std::vector naturalX(N); - std::iota(naturalX.begin(), naturalX.end(), 1); - std::vector ordX; - std::transform(naturalX.begin(), naturalX.end(), std::back_inserter(ordX), - [](int x) { return X(x); }); - - auto [ndX, lvls] = makeBinaryOrdering(ordX); - std::copy(ndX.begin(), ndX.end(), std::back_inserter(ordering)); - // TODO(dellaert): this has no effect! - for (auto &l : lvls) { - l = -l; - } - } - { - std::vector naturalC(N - 1); - std::iota(naturalC.begin(), naturalC.end(), 1); - std::vector ordC; - std::transform(naturalC.begin(), naturalC.end(), std::back_inserter(ordC), - [](int x) { return M(x); }); - - // std::copy(ordC.begin(), ordC.end(), std::back_inserter(ordering)); - const auto [ndC, lvls] = makeBinaryOrdering(ordC); - std::copy(ndC.begin(), ndC.end(), std::back_inserter(ordering)); - } - auto ordering_full = Ordering(ordering); - - const auto [hbt, remaining] = - hfg->eliminatePartialMultifrontal(ordering_full); - - auto new_fg = makeSwitchingChain(12); - auto isam = HybridGaussianISAM(*hbt); - - // Run an ISAM update. - HybridGaussianFactorGraph factorGraph; - factorGraph.push_back(new_fg->at(new_fg->size() - 2)); - factorGraph.push_back(new_fg->at(new_fg->size() - 1)); - isam.update(factorGraph); - - // ISAM should have 12 factors after the last update - EXPECT_LONGS_EQUAL(12, isam.size()); -} - -/* ************************************************************************* */ -TEST(HybridGaussianFactorGraph, SwitchingTwoVar) { - const int N = 7; - auto hfg = makeSwitchingChain(N, X); - hfg->push_back(*makeSwitchingChain(N, Y, D)); - - for (int t = 1; t <= N; t++) { - hfg->add(JacobianFactor(X(t), I_3x3, Y(t), -I_3x3, Vector3(1.0, 0.0, 0.0))); - } - - KeyVector ordering; - - KeyVector naturalX(N); - std::iota(naturalX.begin(), naturalX.end(), 1); - KeyVector ordX; - for (size_t i = 1; i <= N; i++) { - ordX.emplace_back(X(i)); - ordX.emplace_back(Y(i)); - } - - for (size_t i = 1; i <= N - 1; i++) { - ordX.emplace_back(M(i)); - } - for (size_t i = 1; i <= N - 1; i++) { - ordX.emplace_back(D(i)); - } - - { - DotWriter dw; - dw.positionHints['x'] = 1; - dw.positionHints['c'] = 0; - dw.positionHints['d'] = 3; - dw.positionHints['y'] = 2; - // std::cout << hfg->dot(DefaultKeyFormatter, dw); - // std::cout << "\n"; - } - - { - DotWriter dw; - dw.positionHints['y'] = 9; - // dw.positionHints['c'] = 0; - // dw.positionHints['d'] = 3; - dw.positionHints['x'] = 1; - // std::cout << "\n"; - // std::cout << hfg->eliminateSequential(Ordering(ordX)) - // ->dot(DefaultKeyFormatter, dw); - // hfg->eliminateMultifrontal(Ordering(ordX))->dot(std::cout); - } - - Ordering ordering_partial; - for (size_t i = 1; i <= N; i++) { - ordering_partial.emplace_back(X(i)); - ordering_partial.emplace_back(Y(i)); - } - const auto [hbn, remaining] = - hfg->eliminatePartialSequential(ordering_partial); - - EXPECT_LONGS_EQUAL(14, hbn->size()); - EXPECT_LONGS_EQUAL(11, remaining->size()); - - { - DotWriter dw; - dw.positionHints['x'] = 1; - dw.positionHints['c'] = 0; - dw.positionHints['d'] = 3; - dw.positionHints['y'] = 2; - // std::cout << remaining->dot(DefaultKeyFormatter, dw); - // std::cout << "\n"; - } -} - -/* ****************************************************************************/ +****************************************************************************/ // Select a particular continuous factor graph given a discrete assignment TEST(HybridGaussianFactorGraph, DiscreteSelection) { Switching s(3); @@ -547,23 +237,43 @@ TEST(HybridGaussianFactorGraph, optimize) { // Test adding of gaussian conditional and re-elimination. TEST(HybridGaussianFactorGraph, Conditionals) { Switching switching(4); - HybridGaussianFactorGraph hfg; - hfg.push_back(switching.linearizedFactorGraph.at(0)); // P(X1) + HybridGaussianFactorGraph hfg; + hfg.push_back(switching.linearizedFactorGraph.at(0)); // P(X0) Ordering ordering; ordering.push_back(X(0)); HybridBayesNet::shared_ptr bayes_net = hfg.eliminateSequential(ordering); - hfg.push_back(switching.linearizedFactorGraph.at(1)); // P(X1, X2 | M1) - hfg.push_back(*bayes_net); - hfg.push_back(switching.linearizedFactorGraph.at(2)); // P(X2, X3 | M2) - hfg.push_back(switching.linearizedFactorGraph.at(5)); // P(M1) - ordering.push_back(X(1)); - ordering.push_back(X(2)); - ordering.push_back(M(0)); - ordering.push_back(M(1)); + HybridGaussianFactorGraph hfg2; + hfg2.push_back(*bayes_net); // P(X0) + hfg2.push_back(switching.linearizedFactorGraph.at(1)); // P(X0, X1 | M0) + hfg2.push_back(switching.linearizedFactorGraph.at(2)); // P(X1, X2 | M1) + hfg2.push_back(switching.linearizedFactorGraph.at(5)); // P(M1) + ordering += X(1), X(2), M(0), M(1); - bayes_net = hfg.eliminateSequential(ordering); + // Created product of first two factors and check eliminate: + HybridGaussianFactorGraph fragment; + fragment.push_back(hfg2[0]); + fragment.push_back(hfg2[1]); + + // Check that product + HybridGaussianProductFactor product = fragment.collectProductFactor(); + auto leaf = fragment(DiscreteValues{{M(0), 0}}); + EXPECT_LONGS_EQUAL(2, leaf.size()); + + // Check product and that pruneEmpty does not touch it + auto pruned = product.removeEmpty(); + LONGS_EQUAL(2, pruned.nrLeaves()); + + // Test eliminate + auto [hybridConditional, factor] = fragment.eliminate({X(0)}); + EXPECT(hybridConditional->isHybrid()); + EXPECT(hybridConditional->keys() == KeyVector({X(0), X(1), M(0)})); + + EXPECT(dynamic_pointer_cast(factor)); + EXPECT(factor->keys() == KeyVector({X(1), M(0)})); + + bayes_net = hfg2.eliminateSequential(ordering); HybridValues result = bayes_net->optimize(); @@ -647,7 +357,7 @@ TEST(HybridGaussianFactorGraph, IncrementalErrorTree) { HybridValues delta = hybridBayesNet->optimize(); auto error_tree = graph.errorTree(delta.continuous()); - std::vector discrete_keys = {{M(0), 2}, {M(1), 2}}; + std::vector discrete_keys = {m0, m1}; std::vector leaves = {2.7916153, 1.5888555, 1.7233422, 1.6191947}; AlgebraicDecisionTree expected_error(discrete_keys, leaves); @@ -712,11 +422,12 @@ TEST(HybridGaussianFactorGraph, collectProductFactor) { /* ****************************************************************************/ // Check that the factor graph unnormalized probability is proportional to the -// Bayes net probability for the given measurements. -bool ratioTest(const HybridBayesNet &bn, const VectorValues &measurements, - const HybridGaussianFactorGraph &fg, size_t num_samples = 100) { + // Bayes net probability for the given measurements. + bool + ratioTest(const HybridBayesNet &bn, const VectorValues &measurements, + const HybridGaussianFactorGraph &fg, size_t num_samples = 100) { auto compute_ratio = [&](HybridValues *sample) -> double { - sample->update(measurements); // update sample with given measurements: + sample->update(measurements); // update sample with given measurements: return bn.evaluate(*sample) / fg.probPrime(*sample); }; @@ -726,7 +437,8 @@ bool ratioTest(const HybridBayesNet &bn, const VectorValues &measurements, // Test ratios for a number of independent samples: for (size_t i = 0; i < num_samples; i++) { HybridValues sample = bn.sample(&kRng); - if (std::abs(expected_ratio - compute_ratio(&sample)) > 1e-6) return false; + if (std::abs(expected_ratio - compute_ratio(&sample)) > 1e-6) + return false; } return true; } @@ -737,7 +449,7 @@ bool ratioTest(const HybridBayesNet &bn, const VectorValues &measurements, bool ratioTest(const HybridBayesNet &bn, const VectorValues &measurements, const HybridBayesNet &posterior, size_t num_samples = 100) { auto compute_ratio = [&](HybridValues *sample) -> double { - sample->update(measurements); // update sample with given measurements: + sample->update(measurements); // update sample with given measurements: return bn.evaluate(*sample) / posterior.evaluate(*sample); }; @@ -749,7 +461,8 @@ bool ratioTest(const HybridBayesNet &bn, const VectorValues &measurements, HybridValues sample = bn.sample(&kRng); // GTSAM_PRINT(sample); // std::cout << "ratio: " << compute_ratio(&sample) << std::endl; - if (std::abs(expected_ratio - compute_ratio(&sample)) > 1e-6) return false; + if (std::abs(expected_ratio - compute_ratio(&sample)) > 1e-6) + return false; } return true; } diff --git a/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp b/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp index bd830794a..6d650246e 100644 --- a/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp @@ -34,7 +34,6 @@ using namespace std; using namespace gtsam; using symbol_shorthand::M; using symbol_shorthand::X; -using symbol_shorthand::Z; /* ************************************************************************* */ namespace examples { diff --git a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp index 3a26f4486..ad3b2afff 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp @@ -34,6 +34,7 @@ #include #include "Switching.h" +#include "Test.h" // Include for test suite #include @@ -498,7 +499,7 @@ TEST(HybridNonlinearFactorGraph, Full_Elimination) { /**************************************************************************** * Test printing */ -TEST(HybridNonlinearFactorGraph, Printing) { +TEST_DISABLED(HybridNonlinearFactorGraph, Printing) { Switching self(3); auto linearizedFactorGraph = self.linearizedFactorGraph; @@ -514,78 +515,98 @@ TEST(HybridNonlinearFactorGraph, Printing) { #ifdef GTSAM_DT_MERGING string expected_hybridFactorGraph = R"( size: 7 -factor 0: +Factor 0 +GaussianFactor: + A[x0] = [ - 10 + 10 ] b = [ -10 ] No noise model -factor 1: -HybridGaussianFactor + +Factor 1 +HybridGaussianFactor: Hybrid [x0 x1; m0]{ Choice(m0) 0 Leaf : A[x0] = [ - -1 + -1 ] A[x1] = [ - 1 + 1 ] b = [ -1 ] No noise model +scalar: 0 1 Leaf : A[x0] = [ - -1 + -1 ] A[x1] = [ - 1 + 1 ] b = [ -0 ] No noise model +scalar: 0 } -factor 2: -HybridGaussianFactor + +Factor 2 +HybridGaussianFactor: Hybrid [x1 x2; m1]{ Choice(m1) 0 Leaf : A[x1] = [ - -1 + -1 ] A[x2] = [ - 1 + 1 ] b = [ -1 ] No noise model +scalar: 0 1 Leaf : A[x1] = [ - -1 + -1 ] A[x2] = [ - 1 + 1 ] b = [ -0 ] No noise model +scalar: 0 } -factor 3: + +Factor 3 +GaussianFactor: + A[x1] = [ - 10 + 10 ] b = [ -10 ] No noise model -factor 4: + +Factor 4 +GaussianFactor: + A[x2] = [ - 10 + 10 ] b = [ -10 ] No noise model -factor 5: P( m0 ): + +Factor 5 +DiscreteFactor: + P( m0 ): Leaf 0.5 -factor 6: P( m1 | m0 ): + +Factor 6 +DiscreteFactor: + P( m1 | m0 ): Choice(m1) 0 Choice(m0) 0 0 Leaf 0.33333333 @@ -594,6 +615,7 @@ factor 6: P( m1 | m0 ): 1 0 Leaf 0.66666667 1 1 Leaf 0.4 + )"; #else string expected_hybridFactorGraph = R"( @@ -686,7 +708,7 @@ factor 6: P( m1 | m0 ): // Expected output for hybridBayesNet. string expected_hybridBayesNet = R"( size: 3 -conditional 0: Hybrid P( x0 | x1 m0) +conditional 0: P( x0 | x1 m0) Discrete Keys = (m0, 2), logNormalizationConstant: 1.38862 @@ -705,7 +727,7 @@ conditional 0: Hybrid P( x0 | x1 m0) logNormalizationConstant: 1.38862 No noise model -conditional 1: Hybrid P( x1 | x2 m0 m1) +conditional 1: P( x1 | x2 m0 m1) Discrete Keys = (m0, 2), (m1, 2), logNormalizationConstant: 1.3935 @@ -740,7 +762,7 @@ conditional 1: Hybrid P( x1 | x2 m0 m1) logNormalizationConstant: 1.3935 No noise model -conditional 2: Hybrid P( x2 | m0 m1) +conditional 2: P( x2 | m0 m1) Discrete Keys = (m0, 2), (m1, 2), logNormalizationConstant: 1.38857 From 584a71fb944b0536b6497caecfa5d2236b5d3e2c Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 6 Oct 2024 17:50:22 +0900 Subject: [PATCH 046/204] Product now has scalars --- gtsam/hybrid/HybridGaussianConditional.cpp | 159 +++++++++--------- gtsam/hybrid/HybridGaussianFactor.cpp | 117 ++++++------- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 156 ++++++++--------- gtsam/hybrid/HybridGaussianProductFactor.cpp | 54 +++--- gtsam/hybrid/HybridGaussianProductFactor.h | 20 ++- .../tests/testHybridGaussianFactorGraph.cpp | 119 +++++++------ .../tests/testHybridGaussianProductFactor.cpp | 71 ++++---- 7 files changed, 343 insertions(+), 353 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index a42485f10..a6dcc6624 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -22,8 +22,8 @@ #include #include #include -#include #include +#include #include #include #include @@ -44,7 +44,7 @@ struct HybridGaussianConditional::Helper { /// Construct from a vector of mean and sigma pairs, plus extra args. template - explicit Helper(const DiscreteKey &mode, const P &p, Args &&...args) { + explicit Helper(const DiscreteKey& mode, const P& p, Args&&... args) { nrFrontals = 1; minNegLogConstant = std::numeric_limits::infinity(); @@ -52,9 +52,8 @@ struct HybridGaussianConditional::Helper { std::vector gcs; fvs.reserve(p.size()); gcs.reserve(p.size()); - for (auto &&[mean, sigma] : p) { - auto gaussianConditional = - GC::sharedMeanAndStddev(std::forward(args)..., mean, sigma); + for (auto&& [mean, sigma] : p) { + auto gaussianConditional = GC::sharedMeanAndStddev(std::forward(args)..., mean, sigma); double value = gaussianConditional->negLogConstant(); minNegLogConstant = std::min(minNegLogConstant, value); fvs.emplace_back(gaussianConditional, value); @@ -66,10 +65,9 @@ struct HybridGaussianConditional::Helper { } /// Construct from tree of GaussianConditionals. - explicit Helper(const Conditionals &conditionals) - : conditionals(conditionals), - minNegLogConstant(std::numeric_limits::infinity()) { - auto func = [this](const GC::shared_ptr &c) -> GaussianFactorValuePair { + explicit Helper(const Conditionals& conditionals) + : conditionals(conditionals), minNegLogConstant(std::numeric_limits::infinity()) { + auto func = [this](const GC::shared_ptr& c) -> GaussianFactorValuePair { double value = 0.0; if (c) { if (!nrFrontals.has_value()) { @@ -90,56 +88,61 @@ struct HybridGaussianConditional::Helper { }; /* *******************************************************************************/ -HybridGaussianConditional::HybridGaussianConditional( - const DiscreteKeys &discreteParents, const Helper &helper) +HybridGaussianConditional::HybridGaussianConditional(const DiscreteKeys& discreteParents, + const Helper& helper) : BaseFactor(discreteParents, helper.pairs), BaseConditional(*helper.nrFrontals), conditionals_(helper.conditionals), negLogConstant_(helper.minNegLogConstant) {} HybridGaussianConditional::HybridGaussianConditional( - const DiscreteKey &discreteParent, - const std::vector &conditionals) + const DiscreteKey& discreteParent, + const std::vector& conditionals) : HybridGaussianConditional(DiscreteKeys{discreteParent}, Conditionals({discreteParent}, conditionals)) {} HybridGaussianConditional::HybridGaussianConditional( - const DiscreteKey &discreteParent, Key key, // - const std::vector> ¶meters) + const DiscreteKey& discreteParent, + Key key, // + const std::vector>& parameters) : HybridGaussianConditional(DiscreteKeys{discreteParent}, Helper(discreteParent, parameters, key)) {} HybridGaussianConditional::HybridGaussianConditional( - const DiscreteKey &discreteParent, Key key, // - const Matrix &A, Key parent, - const std::vector> ¶meters) - : HybridGaussianConditional( - DiscreteKeys{discreteParent}, - Helper(discreteParent, parameters, key, A, parent)) {} + const DiscreteKey& discreteParent, + Key key, // + const Matrix& A, + Key parent, + const std::vector>& parameters) + : HybridGaussianConditional(DiscreteKeys{discreteParent}, + Helper(discreteParent, parameters, key, A, parent)) {} HybridGaussianConditional::HybridGaussianConditional( - const DiscreteKey &discreteParent, Key key, // - const Matrix &A1, Key parent1, const Matrix &A2, Key parent2, - const std::vector> ¶meters) - : HybridGaussianConditional( - DiscreteKeys{discreteParent}, - Helper(discreteParent, parameters, key, A1, parent1, A2, parent2)) {} + const DiscreteKey& discreteParent, + Key key, // + const Matrix& A1, + Key parent1, + const Matrix& A2, + Key parent2, + const std::vector>& parameters) + : HybridGaussianConditional(DiscreteKeys{discreteParent}, + Helper(discreteParent, parameters, key, A1, parent1, A2, parent2)) { +} HybridGaussianConditional::HybridGaussianConditional( - const DiscreteKeys &discreteParents, - const HybridGaussianConditional::Conditionals &conditionals) + const DiscreteKeys& discreteParents, + const HybridGaussianConditional::Conditionals& conditionals) : HybridGaussianConditional(discreteParents, Helper(conditionals)) {} /* *******************************************************************************/ -const HybridGaussianConditional::Conditionals & -HybridGaussianConditional::conditionals() const { +const HybridGaussianConditional::Conditionals& HybridGaussianConditional::conditionals() const { return conditionals_; } /* *******************************************************************************/ -HybridGaussianProductFactor HybridGaussianConditional::asProductFactor() - const { - auto wrap = [this](const std::shared_ptr &gc) { +HybridGaussianProductFactor HybridGaussianConditional::asProductFactor() const { + auto wrap = [this](const std::shared_ptr& gc) + -> std::pair { // First check if conditional has not been pruned if (gc) { const double Cgm_Kgcm = gc->negLogConstant() - this->negLogConstant_; @@ -151,10 +154,17 @@ HybridGaussianProductFactor HybridGaussianConditional::asProductFactor() Vector c(1); c << std::sqrt(2.0 * Cgm_Kgcm); auto constantFactor = std::make_shared(c); - return GaussianFactorGraph{gc, constantFactor}; + return {GaussianFactorGraph{gc, constantFactor}, Cgm_Kgcm}; + } else { + // The scalar can be zero. + // TODO(Frank): after hiding is gone, this should be only case here. + return {GaussianFactorGraph{gc}, Cgm_Kgcm}; } + } else { + // If the conditional is pruned, return an empty GaussianFactorGraph with zero scalar sum + // TODO(Frank): Could we just return an *empty* GaussianFactorGraph? + return {GaussianFactorGraph{nullptr}, 0.0}; } - return GaussianFactorGraph{gc}; }; return {{conditionals_, wrap}}; } @@ -162,7 +172,7 @@ HybridGaussianProductFactor HybridGaussianConditional::asProductFactor() /* *******************************************************************************/ size_t HybridGaussianConditional::nrComponents() const { size_t total = 0; - conditionals_.visit([&total](const GaussianFactor::shared_ptr &node) { + conditionals_.visit([&total](const GaussianFactor::shared_ptr& node) { if (node) total += 1; }); return total; @@ -170,21 +180,19 @@ size_t HybridGaussianConditional::nrComponents() const { /* *******************************************************************************/ GaussianConditional::shared_ptr HybridGaussianConditional::choose( - const DiscreteValues &discreteValues) const { - auto &ptr = conditionals_(discreteValues); + const DiscreteValues& discreteValues) const { + auto& ptr = conditionals_(discreteValues); if (!ptr) return nullptr; auto conditional = std::dynamic_pointer_cast(ptr); if (conditional) return conditional; else - throw std::logic_error( - "A HybridGaussianConditional unexpectedly contained a non-conditional"); + throw std::logic_error("A HybridGaussianConditional unexpectedly contained a non-conditional"); } /* *******************************************************************************/ -bool HybridGaussianConditional::equals(const HybridFactor &lf, - double tol) const { - const This *e = dynamic_cast(&lf); +bool HybridGaussianConditional::equals(const HybridFactor& lf, double tol) const { + const This* e = dynamic_cast(&lf); if (e == nullptr) return false; // This will return false if either conditionals_ is empty or e->conditionals_ @@ -193,27 +201,26 @@ bool HybridGaussianConditional::equals(const HybridFactor &lf, // Check the base and the factors: return BaseFactor::equals(*e, tol) && - conditionals_.equals( - e->conditionals_, [tol](const auto &f1, const auto &f2) { - return (!f1 && !f2) || (f1 && f2 && f1->equals(*f2, tol)); - }); + conditionals_.equals(e->conditionals_, [tol](const auto& f1, const auto& f2) { + return (!f1 && !f2) || (f1 && f2 && f1->equals(*f2, tol)); + }); } /* *******************************************************************************/ -void HybridGaussianConditional::print(const std::string &s, - const KeyFormatter &formatter) const { +void HybridGaussianConditional::print(const std::string& s, const KeyFormatter& formatter) const { std::cout << (s.empty() ? "" : s + "\n"); BaseConditional::print("", formatter); std::cout << " Discrete Keys = "; - for (auto &dk : discreteKeys()) { + for (auto& dk : discreteKeys()) { std::cout << "(" << formatter(dk.first) << ", " << dk.second << "), "; } std::cout << std::endl << " logNormalizationConstant: " << -negLogConstant() << std::endl << std::endl; conditionals_.print( - "", [&](Key k) { return formatter(k); }, - [&](const GaussianConditional::shared_ptr &gf) -> std::string { + "", + [&](Key k) { return formatter(k); }, + [&](const GaussianConditional::shared_ptr& gf) -> std::string { RedirectCout rd; if (gf && !gf->empty()) { gf->print("", formatter); @@ -230,20 +237,19 @@ KeyVector HybridGaussianConditional::continuousParents() const { const auto range = parents(); KeyVector continuousParentKeys(range.begin(), range.end()); // Loop over all discrete keys: - for (const auto &discreteKey : discreteKeys()) { + for (const auto& discreteKey : discreteKeys()) { const Key key = discreteKey.first; // remove that key from continuousParentKeys: - continuousParentKeys.erase(std::remove(continuousParentKeys.begin(), - continuousParentKeys.end(), key), - continuousParentKeys.end()); + continuousParentKeys.erase( + std::remove(continuousParentKeys.begin(), continuousParentKeys.end(), key), + continuousParentKeys.end()); } return continuousParentKeys; } /* ************************************************************************* */ -bool HybridGaussianConditional::allFrontalsGiven( - const VectorValues &given) const { - for (auto &&kv : given) { +bool HybridGaussianConditional::allFrontalsGiven(const VectorValues& given) const { + for (auto&& kv : given) { if (given.find(kv.first) == given.end()) { return false; } @@ -253,7 +259,7 @@ bool HybridGaussianConditional::allFrontalsGiven( /* ************************************************************************* */ std::shared_ptr HybridGaussianConditional::likelihood( - const VectorValues &given) const { + const VectorValues& given) const { if (!allFrontalsGiven(given)) { throw std::runtime_error( "HybridGaussianConditional::likelihood: given values are missing some " @@ -264,8 +270,7 @@ std::shared_ptr HybridGaussianConditional::likelihood( const KeyVector continuousParentKeys = continuousParents(); const HybridGaussianFactor::FactorValuePairs likelihoods( conditionals_, - [&](const GaussianConditional::shared_ptr &conditional) - -> GaussianFactorValuePair { + [&](const GaussianConditional::shared_ptr& conditional) -> GaussianFactorValuePair { const auto likelihood_m = conditional->likelihood(given); const double Cgm_Kgcm = conditional->negLogConstant() - negLogConstant_; if (Cgm_Kgcm == 0.0) { @@ -276,26 +281,24 @@ std::shared_ptr HybridGaussianConditional::likelihood( return {likelihood_m, Cgm_Kgcm}; } }); - return std::make_shared(discreteParentKeys, - likelihoods); + return std::make_shared(discreteParentKeys, likelihoods); } /* ************************************************************************* */ -std::set DiscreteKeysAsSet(const DiscreteKeys &discreteKeys) { +std::set DiscreteKeysAsSet(const DiscreteKeys& discreteKeys) { std::set s(discreteKeys.begin(), discreteKeys.end()); return s; } /* *******************************************************************************/ HybridGaussianConditional::shared_ptr HybridGaussianConditional::prune( - const DecisionTreeFactor &discreteProbs) const { + const DecisionTreeFactor& discreteProbs) const { // Find keys in discreteProbs.keys() but not in this->keys(): std::set mine(this->keys().begin(), this->keys().end()); - std::set theirs(discreteProbs.keys().begin(), - discreteProbs.keys().end()); + std::set theirs(discreteProbs.keys().begin(), discreteProbs.keys().end()); std::vector diff; - std::set_difference(theirs.begin(), theirs.end(), mine.begin(), mine.end(), - std::back_inserter(diff)); + std::set_difference( + theirs.begin(), theirs.end(), mine.begin(), mine.end(), std::back_inserter(diff)); // Find maximum probability value for every combination of our keys. Ordering keys(diff); @@ -303,26 +306,24 @@ HybridGaussianConditional::shared_ptr HybridGaussianConditional::prune( // Check the max value for every combination of our keys. // If the max value is 0.0, we can prune the corresponding conditional. - auto pruner = [&](const Assignment &choices, - const GaussianConditional::shared_ptr &conditional) - -> GaussianConditional::shared_ptr { + auto pruner = + [&](const Assignment& choices, + const GaussianConditional::shared_ptr& conditional) -> GaussianConditional::shared_ptr { return (max->evaluate(choices) == 0.0) ? nullptr : conditional; }; auto pruned_conditionals = conditionals_.apply(pruner); - return std::make_shared(discreteKeys(), - pruned_conditionals); + return std::make_shared(discreteKeys(), pruned_conditionals); } /* *******************************************************************************/ -double HybridGaussianConditional::logProbability( - const HybridValues &values) const { +double HybridGaussianConditional::logProbability(const HybridValues& values) const { auto conditional = conditionals_(values.discrete()); return conditional->logProbability(values.continuous()); } /* *******************************************************************************/ -double HybridGaussianConditional::evaluate(const HybridValues &values) const { +double HybridGaussianConditional::evaluate(const HybridValues& values) const { auto conditional = conditionals_(values.discrete()); return conditional->evaluate(values.continuous()); } diff --git a/gtsam/hybrid/HybridGaussianFactor.cpp b/gtsam/hybrid/HybridGaussianFactor.cpp index acdd9ef89..d49630b64 100644 --- a/gtsam/hybrid/HybridGaussianFactor.cpp +++ b/gtsam/hybrid/HybridGaussianFactor.cpp @@ -32,8 +32,8 @@ namespace gtsam { /* *******************************************************************************/ -HybridGaussianFactor::FactorValuePairs -HybridGaussianFactor::augment(const FactorValuePairs &factors) { +HybridGaussianFactor::FactorValuePairs HybridGaussianFactor::augment( + const FactorValuePairs& factors) { // Find the minimum value so we can "proselytize" to positive values. // Done because we can't have sqrt of negative numbers. DecisionTree gaussianFactors; @@ -44,18 +44,16 @@ HybridGaussianFactor::augment(const FactorValuePairs &factors) { double min_value = valueTree.min(); // Finally, update the [A|b] matrices. - auto update = [&min_value](const auto &gfv) -> GaussianFactorValuePair { + auto update = [&min_value](const auto& gfv) -> GaussianFactorValuePair { auto [gf, value] = gfv; auto jf = std::dynamic_pointer_cast(gf); - if (!jf) - return {gf, 0.0}; // should this be zero or infinite? + if (!jf) return {gf, 0.0}; // should this be zero or infinite? double normalized_value = value - min_value; // If the value is 0, do nothing - if (normalized_value == 0.0) - return {gf, 0.0}; + if (normalized_value == 0.0) return {gf, value}; GaussianFactorGraph gfg; gfg.push_back(jf); @@ -66,40 +64,42 @@ HybridGaussianFactor::augment(const FactorValuePairs &factors) { auto constantFactor = std::make_shared(c); gfg.push_back(constantFactor); - return {std::make_shared(gfg), normalized_value}; + // NOTE(Frank): we store the actual value, not the normalized value: + return {std::make_shared(gfg), value}; }; return FactorValuePairs(factors, update); } /* *******************************************************************************/ struct HybridGaussianFactor::ConstructorHelper { - KeyVector continuousKeys; // Continuous keys extracted from factors - DiscreteKeys discreteKeys; // Discrete keys provided to the constructors - FactorValuePairs pairs; // The decision tree with factors and scalars + KeyVector continuousKeys; // Continuous keys extracted from factors + DiscreteKeys discreteKeys; // Discrete keys provided to the constructors + FactorValuePairs pairs; // The decision tree with factors and scalars - ConstructorHelper(const DiscreteKey &discreteKey, - const std::vector &factors) + /// Constructor for a single discrete key and a vector of Gaussian factors + ConstructorHelper(const DiscreteKey& discreteKey, + const std::vector& factors) : discreteKeys({discreteKey}) { // Extract continuous keys from the first non-null factor - for (const auto &factor : factors) { + for (const auto& factor : factors) { if (factor && continuousKeys.empty()) { continuousKeys = factor->keys(); break; } } // Build the FactorValuePairs DecisionTree - pairs = FactorValuePairs( - DecisionTree(discreteKeys, factors), - [](const auto &f) { - return std::pair{f, 0.0}; - }); + pairs = FactorValuePairs(DecisionTree(discreteKeys, factors), + [](const auto& f) { + return std::pair{f, 0.0}; + }); } - ConstructorHelper(const DiscreteKey &discreteKey, - const std::vector &factorPairs) + /// Constructor for a single discrete key and a vector of GaussianFactorValuePairs + ConstructorHelper(const DiscreteKey& discreteKey, + const std::vector& factorPairs) : discreteKeys({discreteKey}) { // Extract continuous keys from the first non-null factor - for (const auto &pair : factorPairs) { + for (const auto& pair : factorPairs) { if (pair.first && continuousKeys.empty()) { continuousKeys = pair.first->keys(); break; @@ -110,12 +110,12 @@ struct HybridGaussianFactor::ConstructorHelper { pairs = FactorValuePairs(discreteKeys, factorPairs); } - ConstructorHelper(const DiscreteKeys &discreteKeys, - const FactorValuePairs &factorPairs) + /// Constructor for a vector of discrete keys and a vector of GaussianFactorValuePairs + ConstructorHelper(const DiscreteKeys& discreteKeys, const FactorValuePairs& factorPairs) : discreteKeys(discreteKeys) { // Extract continuous keys from the first non-null factor // TODO: just stop after first non-null factor - factorPairs.visit([&](const GaussianFactorValuePair &pair) { + factorPairs.visit([&](const GaussianFactorValuePair& pair) { if (pair.first && continuousKeys.empty()) { continuousKeys = pair.first->keys(); } @@ -127,40 +127,32 @@ struct HybridGaussianFactor::ConstructorHelper { }; /* *******************************************************************************/ -HybridGaussianFactor::HybridGaussianFactor(const ConstructorHelper &helper) - : Base(helper.continuousKeys, helper.discreteKeys), - factors_(augment(helper.pairs)) {} +HybridGaussianFactor::HybridGaussianFactor(const ConstructorHelper& helper) + : Base(helper.continuousKeys, helper.discreteKeys), factors_(augment(helper.pairs)) {} -/* *******************************************************************************/ HybridGaussianFactor::HybridGaussianFactor( - const DiscreteKey &discreteKey, - const std::vector &factorPairs) + const DiscreteKey& discreteKey, const std::vector& factorPairs) : HybridGaussianFactor(ConstructorHelper(discreteKey, factorPairs)) {} -/* *******************************************************************************/ -HybridGaussianFactor::HybridGaussianFactor( - const DiscreteKey &discreteKey, - const std::vector &factorPairs) +HybridGaussianFactor::HybridGaussianFactor(const DiscreteKey& discreteKey, + const std::vector& factorPairs) : HybridGaussianFactor(ConstructorHelper(discreteKey, factorPairs)) {} -/* *******************************************************************************/ -HybridGaussianFactor::HybridGaussianFactor(const DiscreteKeys &discreteKeys, - const FactorValuePairs &factorPairs) +HybridGaussianFactor::HybridGaussianFactor(const DiscreteKeys& discreteKeys, + const FactorValuePairs& factorPairs) : HybridGaussianFactor(ConstructorHelper(discreteKeys, factorPairs)) {} /* *******************************************************************************/ -bool HybridGaussianFactor::equals(const HybridFactor &lf, double tol) const { - const This *e = dynamic_cast(&lf); - if (e == nullptr) - return false; +bool HybridGaussianFactor::equals(const HybridFactor& lf, double tol) const { + const This* e = dynamic_cast(&lf); + if (e == nullptr) return false; // This will return false if either factors_ is empty or e->factors_ is // empty, but not if both are empty or both are not empty: - if (factors_.empty() ^ e->factors_.empty()) - return false; + if (factors_.empty() ^ e->factors_.empty()) return false; // Check the base and the factors: - auto compareFunc = [tol](const auto &pair1, const auto &pair2) { + auto compareFunc = [tol](const auto& pair1, const auto& pair2) { auto f1 = pair1.first, f2 = pair2.first; bool match = (!f1 && !f2) || (f1 && f2 && f1->equals(*f2, tol)); return match && gtsam::equal(pair1.second, pair2.second, tol); @@ -169,8 +161,7 @@ bool HybridGaussianFactor::equals(const HybridFactor &lf, double tol) const { } /* *******************************************************************************/ -void HybridGaussianFactor::print(const std::string &s, - const KeyFormatter &formatter) const { +void HybridGaussianFactor::print(const std::string& s, const KeyFormatter& formatter) const { std::cout << (s.empty() ? "" : s + "\n"); HybridFactor::print("", formatter); std::cout << "{\n"; @@ -178,8 +169,9 @@ void HybridGaussianFactor::print(const std::string &s, std::cout << " empty" << std::endl; } else { factors_.print( - "", [&](Key k) { return formatter(k); }, - [&](const auto &pair) -> std::string { + "", + [&](Key k) { return formatter(k); }, + [&](const auto& pair) -> std::string { RedirectCout rd; std::cout << ":\n"; if (pair.first) { @@ -195,22 +187,25 @@ void HybridGaussianFactor::print(const std::string &s, } /* *******************************************************************************/ -HybridGaussianFactor::sharedFactor -HybridGaussianFactor::operator()(const DiscreteValues &assignment) const { +HybridGaussianFactor::sharedFactor HybridGaussianFactor::operator()( + const DiscreteValues& assignment) const { return factors_(assignment).first; } /* *******************************************************************************/ HybridGaussianProductFactor HybridGaussianFactor::asProductFactor() const { - return {{factors_, - [](const auto &pair) { return GaussianFactorGraph{pair.first}; }}}; + // Implemented by creating a new DecisionTree where: + // - The structure (keys and assignments) is preserved from factors_ + // - Each leaf converted to a GaussianFactorGraph with just the factor and its scalar. + return {{factors_, [](const auto& pair) -> std::pair { + return {GaussianFactorGraph{pair.first}, pair.second}; + }}}; } /* *******************************************************************************/ /// Helper method to compute the error of a component. -static double -PotentiallyPrunedComponentError(const GaussianFactor::shared_ptr &gf, - const VectorValues &values) { +static double PotentiallyPrunedComponentError(const GaussianFactor::shared_ptr& gf, + const VectorValues& values) { // Check if valid pointer if (gf) { return gf->error(values); @@ -222,10 +217,10 @@ PotentiallyPrunedComponentError(const GaussianFactor::shared_ptr &gf, } /* *******************************************************************************/ -AlgebraicDecisionTree -HybridGaussianFactor::errorTree(const VectorValues &continuousValues) const { +AlgebraicDecisionTree HybridGaussianFactor::errorTree( + const VectorValues& continuousValues) const { // functor to convert from sharedFactor to double error value. - auto errorFunc = [&continuousValues](const auto &pair) { + auto errorFunc = [&continuousValues](const auto& pair) { return PotentiallyPrunedComponentError(pair.first, continuousValues); }; DecisionTree error_tree(factors_, errorFunc); @@ -233,10 +228,10 @@ HybridGaussianFactor::errorTree(const VectorValues &continuousValues) const { } /* *******************************************************************************/ -double HybridGaussianFactor::error(const HybridValues &values) const { +double HybridGaussianFactor::error(const HybridValues& values) const { // Directly index to get the component, no need to build the whole tree. const auto pair = factors_(values.discrete()); return PotentiallyPrunedComponentError(pair.first, values.continuous()); } -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 5d2b4c203..b94905652 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -18,7 +18,6 @@ * @date Mar 11, 2022 */ -#include "gtsam/discrete/DiscreteValues.h" #include #include #include @@ -40,6 +39,7 @@ #include #include #include +#include "gtsam/discrete/DiscreteValues.h" #include #include @@ -59,15 +59,14 @@ using std::dynamic_pointer_cast; /* ************************************************************************ */ // Throw a runtime exception for method specified in string s, and factor f: -static void throwRuntimeError(const std::string &s, - const std::shared_ptr &f) { - auto &fr = *f; - throw std::runtime_error(s + " not implemented for factor type " + - demangle(typeid(fr).name()) + "."); +static void throwRuntimeError(const std::string& s, const std::shared_ptr& f) { + auto& fr = *f; + throw std::runtime_error(s + " not implemented for factor type " + demangle(typeid(fr).name()) + + "."); } /* ************************************************************************ */ -const Ordering HybridOrdering(const HybridGaussianFactorGraph &graph) { +const Ordering HybridOrdering(const HybridGaussianFactorGraph& graph) { KeySet discrete_keys = graph.discreteKeySet(); const VariableIndex index(graph); return Ordering::ColamdConstrainedLast( @@ -75,15 +74,14 @@ const Ordering HybridOrdering(const HybridGaussianFactorGraph &graph) { } /* ************************************************************************ */ -static void printFactor(const std::shared_ptr &factor, - const DiscreteValues &assignment, - const KeyFormatter &keyFormatter) { +static void printFactor(const std::shared_ptr& factor, + const DiscreteValues& assignment, + const KeyFormatter& keyFormatter) { if (auto hgf = std::dynamic_pointer_cast(factor)) { if (assignment.empty()) hgf->print("HybridGaussianFactor:", keyFormatter); else - hgf->operator()(assignment) - ->print("HybridGaussianFactor, component:", keyFormatter); + hgf->operator()(assignment)->print("HybridGaussianFactor, component:", keyFormatter); } else if (auto gf = std::dynamic_pointer_cast(factor)) { factor->print("GaussianFactor:\n", keyFormatter); @@ -98,9 +96,7 @@ static void printFactor(const std::shared_ptr &factor, if (assignment.empty()) hc->print("HybridConditional:", keyFormatter); else - hc->asHybrid() - ->choose(assignment) - ->print("HybridConditional, component:\n", keyFormatter); + hc->asHybrid()->choose(assignment)->print("HybridConditional, component:\n", keyFormatter); } } else { factor->print("Unknown factor type\n", keyFormatter); @@ -108,13 +104,13 @@ static void printFactor(const std::shared_ptr &factor, } /* ************************************************************************ */ -void HybridGaussianFactorGraph::print(const std::string &s, - const KeyFormatter &keyFormatter) const { +void HybridGaussianFactorGraph::print(const std::string& s, + const KeyFormatter& keyFormatter) const { std::cout << (s.empty() ? "" : s + " ") << std::endl; std::cout << "size: " << size() << std::endl; for (size_t i = 0; i < factors_.size(); i++) { - auto &&factor = factors_[i]; + auto&& factor = factors_[i]; if (factor == nullptr) { std::cout << "Factor " << i << ": nullptr\n"; continue; @@ -129,15 +125,15 @@ void HybridGaussianFactorGraph::print(const std::string &s, /* ************************************************************************ */ void HybridGaussianFactorGraph::printErrors( - const HybridValues &values, const std::string &str, - const KeyFormatter &keyFormatter, - const std::function - &printCondition) const { + const HybridValues& values, + const std::string& str, + const KeyFormatter& keyFormatter, + const std::function& + printCondition) const { std::cout << str << " size: " << size() << std::endl << std::endl; for (size_t i = 0; i < factors_.size(); i++) { - auto &&factor = factors_[i]; + auto&& factor = factors_[i]; if (factor == nullptr) { std::cout << "Factor " << i << ": nullptr\n"; continue; @@ -157,14 +153,13 @@ void HybridGaussianFactorGraph::printErrors( /* ************************************************************************ */ // TODO(dellaert): it's probably more efficient to first collect the discrete // keys, and then loop over all assignments to populate a vector. -HybridGaussianProductFactor -HybridGaussianFactorGraph::collectProductFactor() const { +HybridGaussianProductFactor HybridGaussianFactorGraph::collectProductFactor() const { HybridGaussianProductFactor result; - for (auto &f : factors_) { + for (auto& f : factors_) { // TODO(dellaert): can we make this cleaner and less error-prone? if (auto orphan = dynamic_pointer_cast(f)) { - continue; // Ignore OrphanWrapper + continue; // Ignore OrphanWrapper } else if (auto gf = dynamic_pointer_cast(f)) { result += gf; } else if (auto gc = dynamic_pointer_cast(f)) { @@ -172,7 +167,7 @@ HybridGaussianFactorGraph::collectProductFactor() const { } else if (auto gmf = dynamic_pointer_cast(f)) { result += *gmf; } else if (auto gm = dynamic_pointer_cast(f)) { - result += *gm; // handled above already? + result += *gm; // handled above already? } else if (auto hc = dynamic_pointer_cast(f)) { if (auto gm = hc->asHybrid()) { result += *gm; @@ -198,11 +193,10 @@ HybridGaussianFactorGraph::collectProductFactor() const { } /* ************************************************************************ */ -static std::pair> -continuousElimination(const HybridGaussianFactorGraph &factors, - const Ordering &frontalKeys) { +static std::pair> continuousElimination( + const HybridGaussianFactorGraph& factors, const Ordering& frontalKeys) { GaussianFactorGraph gfg; - for (auto &f : factors) { + for (auto& f : factors) { if (auto gf = dynamic_pointer_cast(f)) { gfg.push_back(gf); } else if (auto orphan = dynamic_pointer_cast(f)) { @@ -230,7 +224,7 @@ continuousElimination(const HybridGaussianFactorGraph &factors, * @return AlgebraicDecisionTree */ static AlgebraicDecisionTree probabilitiesFromNegativeLogValues( - const AlgebraicDecisionTree &logValues) { + const AlgebraicDecisionTree& logValues) { // Perform normalization double min_log = logValues.min(); AlgebraicDecisionTree probabilities = DecisionTree( @@ -241,18 +235,17 @@ static AlgebraicDecisionTree probabilitiesFromNegativeLogValues( } /* ************************************************************************ */ -static std::pair> -discreteElimination(const HybridGaussianFactorGraph &factors, - const Ordering &frontalKeys) { +static std::pair> discreteElimination( + const HybridGaussianFactorGraph& factors, const Ordering& frontalKeys) { DiscreteFactorGraph dfg; - for (auto &f : factors) { + for (auto& f : factors) { if (auto df = dynamic_pointer_cast(f)) { dfg.push_back(df); } else if (auto gmf = dynamic_pointer_cast(f)) { // Case where we have a HybridGaussianFactor with no continuous keys. // In this case, compute discrete probabilities. - auto logProbability = [&](const auto &pair) -> double { + auto logProbability = [&](const auto& pair) -> double { auto [factor, _] = pair; if (!factor) return 0.0; return factor->error(VectorValues()); @@ -262,8 +255,7 @@ discreteElimination(const HybridGaussianFactorGraph &factors, AlgebraicDecisionTree probabilities = probabilitiesFromNegativeLogValues(logProbabilities); - dfg.emplace_shared(gmf->discreteKeys(), - probabilities); + dfg.emplace_shared(gmf->discreteKeys(), probabilities); } else if (auto orphan = dynamic_pointer_cast(f)) { // Ignore orphaned clique. @@ -284,8 +276,8 @@ discreteElimination(const HybridGaussianFactorGraph &factors, } /* ************************************************************************ */ -using Result = std::pair, - HybridGaussianFactor::sharedFactor>; +using Result = std::pair, GaussianFactor::shared_ptr>; +using ResultTree = DecisionTree>; /** * Compute the probability p(μ;m) = exp(-error(μ;m)) * sqrt(det(2π Σ_m) @@ -293,11 +285,10 @@ using Result = std::pair, * The residual error contains no keys, and only * depends on the discrete separator if present. */ -static std::shared_ptr createDiscreteFactor( - const DecisionTree &eliminationResults, - const DiscreteKeys &discreteSeparator) { - auto negLogProbability = [&](const Result &pair) -> double { - const auto &[conditional, factor] = pair; +static std::shared_ptr createDiscreteFactor(const ResultTree& eliminationResults, + const DiscreteKeys& discreteSeparator) { + auto negLogProbability = [&](const auto& pair) -> double { + const auto& [conditional, factor] = pair.first; if (conditional && factor) { static const VectorValues kEmpty; // If the factor is not null, it has no keys, just contains the residual. @@ -324,12 +315,11 @@ static std::shared_ptr createDiscreteFactor( // Create HybridGaussianFactor on the separator, taking care to correct // for conditional constants. -static std::shared_ptr createHybridGaussianFactor( - const DecisionTree &eliminationResults, - const DiscreteKeys &discreteSeparator) { +static std::shared_ptr createHybridGaussianFactor(const ResultTree& eliminationResults, + const DiscreteKeys& discreteSeparator) { // Correct for the normalization constant used up by the conditional - auto correct = [&](const Result &pair) -> GaussianFactorValuePair { - const auto &[conditional, factor] = pair; + auto correct = [&](const auto& pair) -> GaussianFactorValuePair { + const auto& [conditional, factor] = pair.first; if (conditional && factor) { auto hf = std::dynamic_pointer_cast(factor); if (!hf) throw std::runtime_error("Expected HessianFactor!"); @@ -339,29 +329,27 @@ static std::shared_ptr createHybridGaussianFactor( const double negLogK = conditional->negLogConstant(); hf->constantTerm() += -2.0 * negLogK; return {factor, negLogK}; - } else if (!conditional && !factor){ + } else if (!conditional && !factor) { return {nullptr, 0.0}; // TODO(frank): or should this be infinity? } else { - throw std::runtime_error("createHybridGaussianFactors has mixed NULLs"); + throw std::runtime_error("createHybridGaussianFactors has mixed NULLs"); } }; - DecisionTree newFactors(eliminationResults, - correct); + DecisionTree newFactors(eliminationResults, correct); return std::make_shared(discreteSeparator, newFactors); } /* *******************************************************************************/ /// Get the discrete keys from the HybridGaussianFactorGraph as DiscreteKeys. -static auto GetDiscreteKeys = - [](const HybridGaussianFactorGraph &hfg) -> DiscreteKeys { +static auto GetDiscreteKeys = [](const HybridGaussianFactorGraph& hfg) -> DiscreteKeys { const std::set discreteKeySet = hfg.discreteKeys(); return {discreteKeySet.begin(), discreteKeySet.end()}; }; /* *******************************************************************************/ std::pair> -HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { +HybridGaussianFactorGraph::eliminate(const Ordering& keys) const { // Since we eliminate all continuous variables first, // the discrete separator will be *all* the discrete keys. DiscreteKeys discreteSeparator = GetDiscreteKeys(*this); @@ -377,9 +365,12 @@ HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { // This is the elimination method on the leaf nodes bool someContinuousLeft = false; - auto eliminate = [&](const GaussianFactorGraph &graph) -> Result { + auto eliminate = + [&](const std::pair& pair) -> std::pair { + const auto& [graph, scalar] = pair; + if (graph.empty()) { - return {nullptr, nullptr}; + return {{nullptr, nullptr}, 0.0}; } // Expensive elimination of product factor. @@ -388,25 +379,24 @@ HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { // Record whether there any continuous variables left someContinuousLeft |= !result.second->empty(); - return result; + return {result, scalar}; }; // Perform elimination! - DecisionTree eliminationResults(prunedProductFactor, eliminate); + ResultTree eliminationResults(prunedProductFactor, eliminate); // If there are no more continuous parents we create a DiscreteFactor with the // error for each discrete choice. Otherwise, create a HybridGaussianFactor // on the separator, taking care to correct for conditional constants. - auto newFactor = - someContinuousLeft - ? createHybridGaussianFactor(eliminationResults, discreteSeparator) - : createDiscreteFactor(eliminationResults, discreteSeparator); + auto newFactor = someContinuousLeft + ? createHybridGaussianFactor(eliminationResults, discreteSeparator) + : createDiscreteFactor(eliminationResults, discreteSeparator); // Create the HybridGaussianConditional from the conditionals HybridGaussianConditional::Conditionals conditionals( - eliminationResults, [](const Result &pair) { return pair.first; }); - auto hybridGaussian = std::make_shared( - discreteSeparator, conditionals); + eliminationResults, [](const auto& pair) { return pair.first.first; }); + auto hybridGaussian = + std::make_shared(discreteSeparator, conditionals); return {std::make_shared(hybridGaussian), newFactor}; } @@ -426,8 +416,7 @@ HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { * be INCORRECT and there will be NO error raised. */ std::pair> // -EliminateHybrid(const HybridGaussianFactorGraph &factors, - const Ordering &keys) { +EliminateHybrid(const HybridGaussianFactorGraph& factors, const Ordering& keys) { // NOTE: Because we are in the Conditional Gaussian regime there are only // a few cases: // 1. continuous variable, make a hybrid Gaussian conditional if there are @@ -478,7 +467,7 @@ EliminateHybrid(const HybridGaussianFactorGraph &factors, // 3. if not, we do hybrid elimination: bool only_discrete = true, only_continuous = true; - for (auto &&factor : factors) { + for (auto&& factor : factors) { if (auto hybrid_factor = std::dynamic_pointer_cast(factor)) { if (hybrid_factor->isDiscrete()) { only_continuous = false; @@ -489,11 +478,9 @@ EliminateHybrid(const HybridGaussianFactorGraph &factors, only_discrete = false; break; } - } else if (auto cont_factor = - std::dynamic_pointer_cast(factor)) { + } else if (auto cont_factor = std::dynamic_pointer_cast(factor)) { only_discrete = false; - } else if (auto discrete_factor = - std::dynamic_pointer_cast(factor)) { + } else if (auto discrete_factor = std::dynamic_pointer_cast(factor)) { only_continuous = false; } } @@ -514,10 +501,10 @@ EliminateHybrid(const HybridGaussianFactorGraph &factors, /* ************************************************************************ */ AlgebraicDecisionTree HybridGaussianFactorGraph::errorTree( - const VectorValues &continuousValues) const { + const VectorValues& continuousValues) const { AlgebraicDecisionTree result(0.0); // Iterate over each factor. - for (auto &factor : factors_) { + for (auto& factor : factors_) { if (auto hf = std::dynamic_pointer_cast(factor)) { // Add errorTree for hybrid factors, includes HybridGaussianConditionals! result = result + hf->errorTree(continuousValues); @@ -535,7 +522,7 @@ AlgebraicDecisionTree HybridGaussianFactorGraph::errorTree( } /* ************************************************************************ */ -double HybridGaussianFactorGraph::probPrime(const HybridValues &values) const { +double HybridGaussianFactorGraph::probPrime(const HybridValues& values) const { double error = this->error(values); // NOTE: The 0.5 term is handled by each factor return std::exp(-error); @@ -543,7 +530,7 @@ double HybridGaussianFactorGraph::probPrime(const HybridValues &values) const { /* ************************************************************************ */ AlgebraicDecisionTree HybridGaussianFactorGraph::discretePosterior( - const VectorValues &continuousValues) const { + const VectorValues& continuousValues) const { AlgebraicDecisionTree errors = this->errorTree(continuousValues); AlgebraicDecisionTree p = errors.apply([](double error) { // NOTE: The 0.5 term is handled by each factor @@ -553,10 +540,9 @@ AlgebraicDecisionTree HybridGaussianFactorGraph::discretePosterior( } /* ************************************************************************ */ -GaussianFactorGraph HybridGaussianFactorGraph::choose( - const DiscreteValues &assignment) const { +GaussianFactorGraph HybridGaussianFactorGraph::choose(const DiscreteValues& assignment) const { GaussianFactorGraph gfg; - for (auto &&f : *this) { + for (auto&& f : *this) { if (auto gf = std::dynamic_pointer_cast(f)) { gfg.push_back(gf); } else if (auto gc = std::dynamic_pointer_cast(f)) { diff --git a/gtsam/hybrid/HybridGaussianProductFactor.cpp b/gtsam/hybrid/HybridGaussianProductFactor.cpp index c9b4c07dd..2430750d1 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.cpp +++ b/gtsam/hybrid/HybridGaussianProductFactor.cpp @@ -24,66 +24,64 @@ namespace gtsam { -static GaussianFactorGraph add(const GaussianFactorGraph &graph1, - const GaussianFactorGraph &graph2) { - auto result = graph1; - result.push_back(graph2); - return result; +using Y = HybridGaussianProductFactor::Y; + +static Y add(const Y& y1, const Y& y2) { + GaussianFactorGraph result = y1.first; + result.push_back(y2.first); + return {result, y1.second + y2.second}; }; -HybridGaussianProductFactor operator+(const HybridGaussianProductFactor &a, - const HybridGaussianProductFactor &b) { +HybridGaussianProductFactor operator+(const HybridGaussianProductFactor& a, + const HybridGaussianProductFactor& b) { return a.empty() ? b : HybridGaussianProductFactor(a.apply(b, add)); } HybridGaussianProductFactor HybridGaussianProductFactor::operator+( - const HybridGaussianFactor &factor) const { + const HybridGaussianFactor& factor) const { return *this + factor.asProductFactor(); } HybridGaussianProductFactor HybridGaussianProductFactor::operator+( - const GaussianFactor::shared_ptr &factor) const { + const GaussianFactor::shared_ptr& factor) const { return *this + HybridGaussianProductFactor(factor); } -HybridGaussianProductFactor &HybridGaussianProductFactor::operator+=( - const GaussianFactor::shared_ptr &factor) { +HybridGaussianProductFactor& HybridGaussianProductFactor::operator+=( + const GaussianFactor::shared_ptr& factor) { *this = *this + factor; return *this; } -HybridGaussianProductFactor & -HybridGaussianProductFactor::operator+=(const HybridGaussianFactor &factor) { +HybridGaussianProductFactor& HybridGaussianProductFactor::operator+=( + const HybridGaussianFactor& factor) { *this = *this + factor; return *this; } -void HybridGaussianProductFactor::print(const std::string &s, - const KeyFormatter &formatter) const { +void HybridGaussianProductFactor::print(const std::string& s, const KeyFormatter& formatter) const { KeySet keys; - auto printer = [&](const Y &graph) { - if (keys.size() == 0) - keys = graph.keys(); - return "Graph of size " + std::to_string(graph.size()); + auto printer = [&](const Y& y) { + if (keys.empty()) keys = y.first.keys(); + return "Graph of size " + std::to_string(y.first.size()) + + ", scalar sum: " + std::to_string(y.second); }; Base::print(s, formatter, printer); - if (keys.size() > 0) { + if (!keys.empty()) { std::stringstream ss; ss << s << " Keys:"; - for (auto &&key : keys) - ss << " " << formatter(key); + for (auto&& key : keys) ss << " " << formatter(key); std::cout << ss.str() << "." << std::endl; } } HybridGaussianProductFactor HybridGaussianProductFactor::removeEmpty() const { - auto emptyGaussian = [](const GaussianFactorGraph &graph) { - bool hasNull = - std::any_of(graph.begin(), graph.end(), - [](const GaussianFactor::shared_ptr &ptr) { return !ptr; }); - return hasNull ? GaussianFactorGraph() : graph; + auto emptyGaussian = [](const Y& y) { + bool hasNull = std::any_of( + y.first.begin(), y.first.end(), [](const GaussianFactor::shared_ptr& ptr) { return !ptr; }); + return hasNull ? Y{GaussianFactorGraph(), 0.0} : y; }; return {Base(*this, emptyGaussian)}; } -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianProductFactor.h b/gtsam/hybrid/HybridGaussianProductFactor.h index f1bd8bc3c..6d33daa1b 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.h +++ b/gtsam/hybrid/HybridGaussianProductFactor.h @@ -26,10 +26,11 @@ namespace gtsam { class HybridGaussianFactor; -/// Alias for DecisionTree of GaussianFactorGraphs -class HybridGaussianProductFactor : public DecisionTree { +/// Alias for DecisionTree of GaussianFactorGraphs and their scalar sums +class HybridGaussianProductFactor + : public DecisionTree> { public: - using Y = GaussianFactorGraph; + using Y = std::pair; using Base = DecisionTree; /// @name Constructors @@ -44,7 +45,8 @@ class HybridGaussianProductFactor : public DecisionTree - HybridGaussianProductFactor(const std::shared_ptr& factor) : Base(Y{factor}) {} + HybridGaussianProductFactor(const std::shared_ptr& factor) + : Base(Y{GaussianFactorGraph{factor}, 0.0}) {} /** * @brief Construct from DecisionTree @@ -88,7 +90,9 @@ class HybridGaussianProductFactor : public DecisionTree(X(0), Z_3x1, I_3x3, X(1), I_3x3), - std::make_shared(X(0), Vector3::Ones(), I_3x3, X(1), - I_3x3)}); + std::make_shared(X(0), Vector3::Ones(), I_3x3, X(1), I_3x3)}); hfg.add(gm); EXPECT_LONGS_EQUAL(2, hfg.size()); @@ -99,7 +99,7 @@ std::vector components(Key key) { return {std::make_shared(key, I_3x3, Z_3x1), std::make_shared(key, I_3x3, Vector3::Ones())}; } -} // namespace two +} // namespace two /* ************************************************************************* */ TEST(HybridGaussianFactorGraph, hybridEliminationOneFactor) { @@ -239,16 +239,16 @@ TEST(HybridGaussianFactorGraph, Conditionals) { Switching switching(4); HybridGaussianFactorGraph hfg; - hfg.push_back(switching.linearizedFactorGraph.at(0)); // P(X0) + hfg.push_back(switching.linearizedFactorGraph.at(0)); // P(X0) Ordering ordering; ordering.push_back(X(0)); HybridBayesNet::shared_ptr bayes_net = hfg.eliminateSequential(ordering); HybridGaussianFactorGraph hfg2; - hfg2.push_back(*bayes_net); // P(X0) - hfg2.push_back(switching.linearizedFactorGraph.at(1)); // P(X0, X1 | M0) - hfg2.push_back(switching.linearizedFactorGraph.at(2)); // P(X1, X2 | M1) - hfg2.push_back(switching.linearizedFactorGraph.at(5)); // P(M1) + hfg2.push_back(*bayes_net); // P(X0) + hfg2.push_back(switching.linearizedFactorGraph.at(1)); // P(X0, X1 | M0) + hfg2.push_back(switching.linearizedFactorGraph.at(2)); // P(X1, X2 | M1) + hfg2.push_back(switching.linearizedFactorGraph.at(5)); // P(M1) ordering += X(1), X(2), M(0), M(1); // Created product of first two factors and check eliminate: @@ -282,8 +282,7 @@ TEST(HybridGaussianFactorGraph, Conditionals) { expected_continuous.insert(X(1), 1); expected_continuous.insert(X(2), 2); expected_continuous.insert(X(3), 4); - Values result_continuous = - switching.linearizationPoint.retract(result.continuous()); + Values result_continuous = switching.linearizationPoint.retract(result.continuous()); EXPECT(assert_equal(expected_continuous, result_continuous)); DiscreteValues expected_discrete; @@ -318,7 +317,7 @@ TEST(HybridGaussianFactorGraph, ErrorAndProbPrimeTree) { // ϕ(x0) ϕ(x0,x1,m0) ϕ(x1,x2,m1) ϕ(x0;z0) ϕ(x1;z1) ϕ(x2;z2) ϕ(m0) ϕ(m0,m1) Switching s(3); - const HybridGaussianFactorGraph &graph = s.linearizedFactorGraph; + const HybridGaussianFactorGraph& graph = s.linearizedFactorGraph; const HybridBayesNet::shared_ptr hybridBayesNet = graph.eliminateSequential(); @@ -376,19 +375,18 @@ TEST(HybridGaussianFactorGraph, IncrementalErrorTree) { auto error_tree2 = graph.errorTree(delta.continuous()); // regression - leaves = {0.50985198, 0.0097577296, 0.50009425, 0, - 0.52922138, 0.029127133, 0.50985105, 0.0097567964}; + leaves = { + 0.50985198, 0.0097577296, 0.50009425, 0, 0.52922138, 0.029127133, 0.50985105, 0.0097567964}; AlgebraicDecisionTree expected_error2(s.modes, leaves); EXPECT(assert_equal(expected_error, error_tree, 1e-7)); } /* ****************************************************************************/ -// Check that assembleGraphTree assembles Gaussian factor graphs for each -// assignment. +// Check that collectProductFactor works correctly. TEST(HybridGaussianFactorGraph, collectProductFactor) { const int num_measurements = 1; - auto fg = tiny::createHybridGaussianFactorGraph( - num_measurements, VectorValues{{Z(0), Vector1(5.0)}}); + VectorValues vv{{Z(0), Vector1(5.0)}}; + auto fg = tiny::createHybridGaussianFactorGraph(num_measurements, vv); EXPECT_LONGS_EQUAL(3, fg.size()); // Assemble graph tree: @@ -411,23 +409,26 @@ TEST(HybridGaussianFactorGraph, collectProductFactor) { DiscreteValues d0{{M(0), 0}}, d1{{M(0), 1}}; // Expected decision tree with two factor graphs: - // f(x0;mode=0)P(x0) and f(x0;mode=1)P(x0) - HybridGaussianProductFactor expected{ - {M(0), GaussianFactorGraph(std::vector{(*hybrid)(d0), prior}), - GaussianFactorGraph(std::vector{(*hybrid)(d1), prior})}}; + // f(x0;mode=0)P(x0) + GaussianFactorGraph expectedFG0{(*hybrid)(d0), prior}; + EXPECT(assert_equal(expectedFG0, actual(d0).first, 1e-5)); + EXPECT(assert_equal(0.0, actual(d0).second, 1e-5)); - EXPECT(assert_equal(expected(d0), actual(d0), 1e-5)); - EXPECT(assert_equal(expected(d1), actual(d1), 1e-5)); + // f(x0;mode=1)P(x0) + GaussianFactorGraph expectedFG1{(*hybrid)(d1), prior}; + EXPECT(assert_equal(expectedFG1, actual(d1).first, 1e-5)); + EXPECT(assert_equal(1.79176, actual(d1).second, 1e-5)); } /* ****************************************************************************/ // Check that the factor graph unnormalized probability is proportional to the - // Bayes net probability for the given measurements. - bool - ratioTest(const HybridBayesNet &bn, const VectorValues &measurements, - const HybridGaussianFactorGraph &fg, size_t num_samples = 100) { - auto compute_ratio = [&](HybridValues *sample) -> double { - sample->update(measurements); // update sample with given measurements: +// Bayes net probability for the given measurements. +bool ratioTest(const HybridBayesNet& bn, + const VectorValues& measurements, + const HybridGaussianFactorGraph& fg, + size_t num_samples = 100) { + auto compute_ratio = [&](HybridValues* sample) -> double { + sample->update(measurements); // update sample with given measurements: return bn.evaluate(*sample) / fg.probPrime(*sample); }; @@ -437,8 +438,7 @@ TEST(HybridGaussianFactorGraph, collectProductFactor) { // Test ratios for a number of independent samples: for (size_t i = 0; i < num_samples; i++) { HybridValues sample = bn.sample(&kRng); - if (std::abs(expected_ratio - compute_ratio(&sample)) > 1e-6) - return false; + if (std::abs(expected_ratio - compute_ratio(&sample)) > 1e-6) return false; } return true; } @@ -446,10 +446,12 @@ TEST(HybridGaussianFactorGraph, collectProductFactor) { /* ****************************************************************************/ // Check that the bayes net unnormalized probability is proportional to the // Bayes net probability for the given measurements. -bool ratioTest(const HybridBayesNet &bn, const VectorValues &measurements, - const HybridBayesNet &posterior, size_t num_samples = 100) { - auto compute_ratio = [&](HybridValues *sample) -> double { - sample->update(measurements); // update sample with given measurements: +bool ratioTest(const HybridBayesNet& bn, + const VectorValues& measurements, + const HybridBayesNet& posterior, + size_t num_samples = 100) { + auto compute_ratio = [&](HybridValues* sample) -> double { + sample->update(measurements); // update sample with given measurements: return bn.evaluate(*sample) / posterior.evaluate(*sample); }; @@ -461,8 +463,7 @@ bool ratioTest(const HybridBayesNet &bn, const VectorValues &measurements, HybridValues sample = bn.sample(&kRng); // GTSAM_PRINT(sample); // std::cout << "ratio: " << compute_ratio(&sample) << std::endl; - if (std::abs(expected_ratio - compute_ratio(&sample)) > 1e-6) - return false; + if (std::abs(expected_ratio - compute_ratio(&sample)) > 1e-6) return false; } return true; } @@ -484,10 +485,10 @@ TEST(HybridGaussianFactorGraph, EliminateTiny1) { // Create hybrid Gaussian factor on X(0). using tiny::mode; // regression, but mean checked to be 5.0 in both cases: - const auto conditional0 = std::make_shared( - X(0), Vector1(14.1421), I_1x1 * 2.82843), - conditional1 = std::make_shared( - X(0), Vector1(10.1379), I_1x1 * 2.02759); + const auto conditional0 = + std::make_shared(X(0), Vector1(14.1421), I_1x1 * 2.82843), + conditional1 = + std::make_shared(X(0), Vector1(10.1379), I_1x1 * 2.02759); expectedBayesNet.emplace_shared( mode, std::vector{conditional0, conditional1}); @@ -515,8 +516,7 @@ TEST(HybridGaussianFactorGraph, EliminateTiny1Swapped) { bn.emplace_shared(m1, Z(0), I_1x1, X(0), parms); // Create prior on X(0). - bn.push_back( - GaussianConditional::sharedMeanAndStddev(X(0), Vector1(5.0), 0.5)); + bn.push_back(GaussianConditional::sharedMeanAndStddev(X(0), Vector1(5.0), 0.5)); // Add prior on m1. bn.emplace_shared(m1, "1/1"); @@ -534,10 +534,10 @@ TEST(HybridGaussianFactorGraph, EliminateTiny1Swapped) { // Create hybrid Gaussian factor on X(0). // regression, but mean checked to be 5.0 in both cases: - const auto conditional0 = std::make_shared( - X(0), Vector1(10.1379), I_1x1 * 2.02759), - conditional1 = std::make_shared( - X(0), Vector1(14.1421), I_1x1 * 2.82843); + const auto conditional0 = + std::make_shared(X(0), Vector1(10.1379), I_1x1 * 2.02759), + conditional1 = + std::make_shared(X(0), Vector1(14.1421), I_1x1 * 2.82843); expectedBayesNet.emplace_shared( m1, std::vector{conditional0, conditional1}); @@ -570,10 +570,10 @@ TEST(HybridGaussianFactorGraph, EliminateTiny2) { // Create hybrid Gaussian factor on X(0). using tiny::mode; // regression, but mean checked to be 5.0 in both cases: - const auto conditional0 = std::make_shared( - X(0), Vector1(17.3205), I_1x1 * 3.4641), - conditional1 = std::make_shared( - X(0), Vector1(10.274), I_1x1 * 2.0548); + const auto conditional0 = + std::make_shared(X(0), Vector1(17.3205), I_1x1 * 3.4641), + conditional1 = + std::make_shared(X(0), Vector1(10.274), I_1x1 * 2.0548); expectedBayesNet.emplace_shared( mode, std::vector{conditional0, conditional1}); @@ -617,27 +617,25 @@ TEST(HybridGaussianFactorGraph, EliminateSwitchingNetwork) { // NOTE: we add reverse topological so we can sample from the Bayes net.: // Add measurements: - std::vector> measurementModels{{Z_1x1, 3}, - {Z_1x1, 0.5}}; + std::vector> measurementModels{{Z_1x1, 3}, {Z_1x1, 0.5}}; for (size_t t : {0, 1, 2}) { // Create hybrid Gaussian factor on Z(t) conditioned on X(t) and mode N(t): const auto noise_mode_t = DiscreteKey{N(t), 2}; - bn.emplace_shared(noise_mode_t, Z(t), I_1x1, - X(t), measurementModels); + bn.emplace_shared( + noise_mode_t, Z(t), I_1x1, X(t), measurementModels); // Create prior on discrete mode N(t): bn.emplace_shared(noise_mode_t, "20/80"); } // Add motion models. TODO(frank): why are they exactly the same? - std::vector> motionModels{{Z_1x1, 0.2}, - {Z_1x1, 0.2}}; + std::vector> motionModels{{Z_1x1, 0.2}, {Z_1x1, 0.2}}; for (size_t t : {2, 1}) { // Create hybrid Gaussian factor on X(t) conditioned on X(t-1) // and mode M(t-1): const auto motion_model_t = DiscreteKey{M(t), 2}; - bn.emplace_shared(motion_model_t, X(t), I_1x1, - X(t - 1), motionModels); + bn.emplace_shared( + motion_model_t, X(t), I_1x1, X(t - 1), motionModels); // Create prior on motion model M(t): bn.emplace_shared(motion_model_t, "40/60"); @@ -650,8 +648,7 @@ TEST(HybridGaussianFactorGraph, EliminateSwitchingNetwork) { EXPECT_LONGS_EQUAL(6, bn.sample().continuous().size()); // Create measurements consistent with moving right every time: - const VectorValues measurements{ - {Z(0), Vector1(0.0)}, {Z(1), Vector1(1.0)}, {Z(2), Vector1(2.0)}}; + const VectorValues measurements{{Z(0), Vector1(0.0)}, {Z(1), Vector1(1.0)}, {Z(2), Vector1(2.0)}}; const HybridGaussianFactorGraph fg = bn.toFactorGraph(measurements); // Factor graph is: diff --git a/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp b/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp index 6d650246e..017df14a5 100644 --- a/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp @@ -16,11 +16,11 @@ * @date October 2024 */ -#include "gtsam/inference/Key.h" #include #include #include #include +#include #include #include #include @@ -39,29 +39,27 @@ using symbol_shorthand::X; namespace examples { static const DiscreteKey m1(M(1), 2), m2(M(2), 3); -auto A1 = Matrix::Zero(2, 1); -auto A2 = Matrix::Zero(2, 2); -auto b = Matrix::Zero(2, 1); +const auto A1 = Matrix::Zero(2, 1); +const auto A2 = Matrix::Zero(2, 2); +const auto b = Matrix::Zero(2, 1); -auto f10 = std::make_shared(X(1), A1, X(2), A2, b); -auto f11 = std::make_shared(X(1), A1, X(2), A2, b); +const auto f10 = std::make_shared(X(1), A1, X(2), A2, b); +const auto f11 = std::make_shared(X(1), A1, X(2), A2, b); +const HybridGaussianFactor hybridFactorA(m1, {{f10, 10}, {f11, 11}}); -auto A3 = Matrix::Zero(2, 3); -auto f20 = std::make_shared(X(1), A1, X(3), A3, b); -auto f21 = std::make_shared(X(1), A1, X(3), A3, b); -auto f22 = std::make_shared(X(1), A1, X(3), A3, b); +const auto A3 = Matrix::Zero(2, 3); +const auto f20 = std::make_shared(X(1), A1, X(3), A3, b); +const auto f21 = std::make_shared(X(1), A1, X(3), A3, b); +const auto f22 = std::make_shared(X(1), A1, X(3), A3, b); -HybridGaussianFactor hybridFactorA(m1, {f10, f11}); -HybridGaussianFactor hybridFactorB(m2, {f20, f21, f22}); +const HybridGaussianFactor hybridFactorB(m2, {{f20, 20}, {f21, 21}, {f22, 22}}); // Simulate a pruned hybrid factor, in this case m2==1 is nulled out. -HybridGaussianFactor prunedFactorB(m2, {f20, nullptr, f22}); -} // namespace examples +const HybridGaussianFactor prunedFactorB(m2, {{f20, 20}, {nullptr, 1000}, {f22, 22}}); +} // namespace examples /* ************************************************************************* */ // Constructor -TEST(HybridGaussianProductFactor, Construct) { - HybridGaussianProductFactor product; -} +TEST(HybridGaussianProductFactor, Construct) { HybridGaussianProductFactor product; } /* ************************************************************************* */ // Add two Gaussian factors and check only one leaf in tree @@ -80,9 +78,10 @@ TEST(HybridGaussianProductFactor, AddTwoGaussianFactors) { auto leaf = product(Assignment()); // Check that the leaf contains both factors - EXPECT_LONGS_EQUAL(2, leaf.size()); - EXPECT(leaf.at(0) == f10); - EXPECT(leaf.at(1) == f11); + EXPECT_LONGS_EQUAL(2, leaf.first.size()); + EXPECT(leaf.first.at(0) == f10); + EXPECT(leaf.first.at(1) == f11); + EXPECT_DOUBLES_EQUAL(0, leaf.second, 1e-9); } /* ************************************************************************* */ @@ -107,9 +106,10 @@ TEST(HybridGaussianProductFactor, AddTwoGaussianConditionals) { auto leaf = product(Assignment()); // Check that the leaf contains both conditionals - EXPECT_LONGS_EQUAL(2, leaf.size()); - EXPECT(leaf.at(0) == gc1); - EXPECT(leaf.at(1) == gc2); + EXPECT_LONGS_EQUAL(2, leaf.first.size()); + EXPECT(leaf.first.at(0) == gc1); + EXPECT(leaf.first.at(1) == gc2); + EXPECT_DOUBLES_EQUAL(0, leaf.second, 1e-9); } /* ************************************************************************* */ @@ -120,9 +120,12 @@ TEST(HybridGaussianProductFactor, AsProductFactor) { // Let's check that this worked: Assignment mode; - mode[m1.first] = 1; + mode[m1.first] = 0; auto actual = product(mode); - EXPECT(actual.at(0) == f11); + EXPECT(actual.first.at(0) == f10); + EXPECT_DOUBLES_EQUAL(10, actual.second, 1e-9); + + // TODO(Frank): when killed hiding, f11 should also be there } /* ************************************************************************* */ @@ -134,9 +137,12 @@ TEST(HybridGaussianProductFactor, AddOne) { // Let's check that this worked: Assignment mode; - mode[m1.first] = 1; + mode[m1.first] = 0; auto actual = product(mode); - EXPECT(actual.at(0) == f11); + EXPECT(actual.first.at(0) == f10); + EXPECT_DOUBLES_EQUAL(10, actual.second, 1e-9); + + // TODO(Frank): when killed hiding, f11 should also be there } /* ************************************************************************* */ @@ -152,12 +158,15 @@ TEST(HybridGaussianProductFactor, AddTwo) { // Let's check that this worked: auto actual00 = product({{M(1), 0}, {M(2), 0}}); - EXPECT(actual00.at(0) == f10); - EXPECT(actual00.at(1) == f20); + EXPECT(actual00.first.at(0) == f10); + EXPECT(actual00.first.at(1) == f20); + EXPECT_DOUBLES_EQUAL(10 + 20, actual00.second, 1e-9); auto actual12 = product({{M(1), 1}, {M(2), 2}}); - EXPECT(actual12.at(0) == f11); - EXPECT(actual12.at(1) == f22); + // TODO(Frank): when killed hiding, these should also equal: + // EXPECT(actual12.first.at(0) == f11); + // EXPECT(actual12.first.at(1) == f22); + EXPECT_DOUBLES_EQUAL(11 + 22, actual12.second, 1e-9); } /* ************************************************************************* */ From 310c4ffae9af35278d31f992be79060097ea61fb Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sun, 6 Oct 2024 11:07:02 -0400 Subject: [PATCH 047/204] update pruning comment --- gtsam/hybrid/HybridBayesNet.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/hybrid/HybridBayesNet.cpp b/gtsam/hybrid/HybridBayesNet.cpp index 36503d2ea..f57cda28d 100644 --- a/gtsam/hybrid/HybridBayesNet.cpp +++ b/gtsam/hybrid/HybridBayesNet.cpp @@ -73,7 +73,7 @@ HybridBayesNet HybridBayesNet::prune(size_t maxNrLeaves) const { // per pruned Discrete joint. for (auto &&conditional : *this) { if (auto hgc = conditional->asHybrid()) { - // Make a copy of the hybrid Gaussian conditional and prune it! + // Prune the hybrid Gaussian conditional! auto prunedHybridGaussianConditional = hgc->prune(pruned); // Type-erase and add to the pruned Bayes Net fragment. From 94fda5dd5a97969974d9ed381afa4713b2e4f7f5 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sun, 6 Oct 2024 11:08:02 -0400 Subject: [PATCH 048/204] take descriptive comment all the way --- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 7dfa56e77..5b26a623a 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -305,6 +305,9 @@ static std::shared_ptr createDiscreteFactor( // Negative logspace version of: // exp(-factor->error(kEmpty)) / conditional->normalizationConstant(); + // = exp(-factor->error(kEmpty)) * \sqrt{|2πΣ|}; + // log = -(-factor->error(kEmpty) + log(\sqrt{|2πΣ|})) + // = factor->error(kEmpty) - log(\sqrt{|2πΣ|}) // negLogConstant gives `-log(k)` // which is `-log(k) = log(1/k) = log(\sqrt{|2πΣ|})`. return factor->error(kEmpty) - conditional->negLogConstant(); From 873b2a714204da27f8d7cc79a453b698d94f4891 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sun, 6 Oct 2024 11:15:46 -0400 Subject: [PATCH 049/204] fix return value of pruned factors --- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 5b26a623a..d4fb5833b 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -240,7 +240,9 @@ discreteElimination(const HybridGaussianFactorGraph &factors, // In this case, compute discrete probabilities. auto logProbability = [&](const GaussianFactor::shared_ptr &factor) -> double { - if (!factor) return 0.0; + // If the factor is null, it is has been pruned hence return ∞ + // so that the exp(-∞)=0. + if (!factor) return std::numeric_limits::infinity(); return factor->error(VectorValues()); }; AlgebraicDecisionTree logProbabilities = @@ -300,8 +302,9 @@ static std::shared_ptr createDiscreteFactor( auto negLogProbability = [&](const Result &pair) -> double { const auto &[conditional, factor] = pair; static const VectorValues kEmpty; - // If the factor is not null, it has no keys, just contains the residual. - if (!factor) return 1.0; // TODO(dellaert): not loving this. + // If the factor is null, it has been pruned, hence return ∞ + // so that the exp(-∞)=0. + if (!factor) return std::numeric_limits::infinity(); // Negative logspace version of: // exp(-factor->error(kEmpty)) / conditional->normalizationConstant(); From e1c0d0e2275d95f65ccf5b6c7dadf6ba2379f17b Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 7 Oct 2024 09:12:56 +0900 Subject: [PATCH 050/204] operator returns pairs, extensive switching test --- gtsam/hybrid/HybridGaussianConditional.cpp | 8 +- gtsam/hybrid/HybridGaussianFactor.cpp | 4 +- gtsam/hybrid/HybridGaussianFactor.h | 2 +- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 12 +- gtsam/hybrid/tests/testHybridBayesNet.cpp | 108 ++++++++-------- .../tests/testHybridGaussianConditional.cpp | 2 +- .../tests/testHybridGaussianFactorGraph.cpp | 119 +++++++++++++++++- 7 files changed, 180 insertions(+), 75 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index a6dcc6624..ba2c34414 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -273,13 +273,7 @@ std::shared_ptr HybridGaussianConditional::likelihood( [&](const GaussianConditional::shared_ptr& conditional) -> GaussianFactorValuePair { const auto likelihood_m = conditional->likelihood(given); const double Cgm_Kgcm = conditional->negLogConstant() - negLogConstant_; - if (Cgm_Kgcm == 0.0) { - return {likelihood_m, 0.0}; - } else { - // Add a constant to the likelihood in case the noise models - // are not all equal. - return {likelihood_m, Cgm_Kgcm}; - } + return {likelihood_m, Cgm_Kgcm}; }); return std::make_shared(discreteParentKeys, likelihoods); } diff --git a/gtsam/hybrid/HybridGaussianFactor.cpp b/gtsam/hybrid/HybridGaussianFactor.cpp index d49630b64..e01df539e 100644 --- a/gtsam/hybrid/HybridGaussianFactor.cpp +++ b/gtsam/hybrid/HybridGaussianFactor.cpp @@ -187,9 +187,9 @@ void HybridGaussianFactor::print(const std::string& s, const KeyFormatter& forma } /* *******************************************************************************/ -HybridGaussianFactor::sharedFactor HybridGaussianFactor::operator()( +GaussianFactorValuePair HybridGaussianFactor::operator()( const DiscreteValues& assignment) const { - return factors_(assignment).first; + return factors_(assignment); } /* *******************************************************************************/ diff --git a/gtsam/hybrid/HybridGaussianFactor.h b/gtsam/hybrid/HybridGaussianFactor.h index bb4d3b368..08debcf48 100644 --- a/gtsam/hybrid/HybridGaussianFactor.h +++ b/gtsam/hybrid/HybridGaussianFactor.h @@ -129,7 +129,7 @@ public: /// @{ /// Get factor at a given discrete assignment. - sharedFactor operator()(const DiscreteValues &assignment) const; + GaussianFactorValuePair operator()(const DiscreteValues &assignment) const; /** * @brief Compute error of the HybridGaussianFactor as a tree. diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index b94905652..2d4ff38bd 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -81,7 +81,7 @@ static void printFactor(const std::shared_ptr& factor, if (assignment.empty()) hgf->print("HybridGaussianFactor:", keyFormatter); else - hgf->operator()(assignment)->print("HybridGaussianFactor, component:", keyFormatter); + hgf->operator()(assignment).first->print("HybridGaussianFactor, component:", keyFormatter); } else if (auto gf = std::dynamic_pointer_cast(factor)) { factor->print("GaussianFactor:\n", keyFormatter); @@ -329,6 +329,7 @@ static std::shared_ptr createHybridGaussianFactor(const ResultTree& elim const double negLogK = conditional->negLogConstant(); hf->constantTerm() += -2.0 * negLogK; return {factor, negLogK}; + return {factor, scalar + negLogK}; } else if (!conditional && !factor) { return {nullptr, 0.0}; // TODO(frank): or should this be infinity? } else { @@ -355,7 +356,9 @@ HybridGaussianFactorGraph::eliminate(const Ordering& keys) const { DiscreteKeys discreteSeparator = GetDiscreteKeys(*this); // Collect all the factors to create a set of Gaussian factor graphs in a - // decision tree indexed by all discrete keys involved. + // decision tree indexed by all discrete keys involved. Just like any hybrid factor, every + // assignment also has a scalar error, in this case the sum of all errors in the graph. This error + // is assignment-specific and accounts for any difference in noise models used. HybridGaussianProductFactor productFactor = collectProductFactor(); // Convert factor graphs with a nullptr to an empty factor graph. @@ -374,11 +377,12 @@ HybridGaussianFactorGraph::eliminate(const Ordering& keys) const { } // Expensive elimination of product factor. - auto result = EliminatePreferCholesky(graph, keys); + auto result = EliminatePreferCholesky(graph, keys); /// <<<<<< MOST COMPUTE IS HERE // Record whether there any continuous variables left someContinuousLeft |= !result.second->empty(); + // We pass on the scalar unmodified. return {result, scalar}; }; @@ -548,7 +552,7 @@ GaussianFactorGraph HybridGaussianFactorGraph::choose(const DiscreteValues& assi } else if (auto gc = std::dynamic_pointer_cast(f)) { gfg.push_back(gf); } else if (auto hgf = std::dynamic_pointer_cast(f)) { - gfg.push_back((*hgf)(assignment)); + gfg.push_back((*hgf)(assignment).first); } else if (auto hgc = std::dynamic_pointer_cast(f)) { gfg.push_back((*hgc)(assignment)); } else if (auto hc = std::dynamic_pointer_cast(f)) { diff --git a/gtsam/hybrid/tests/testHybridBayesNet.cpp b/gtsam/hybrid/tests/testHybridBayesNet.cpp index 1d22b3d73..438bfd267 100644 --- a/gtsam/hybrid/tests/testHybridBayesNet.cpp +++ b/gtsam/hybrid/tests/testHybridBayesNet.cpp @@ -18,9 +18,12 @@ * @date December 2021 */ +#include +#include #include #include #include +#include #include #include "Switching.h" @@ -28,6 +31,7 @@ // Include for test suite #include +#include using namespace std; using namespace gtsam; @@ -113,7 +117,7 @@ TEST(HybridBayesNet, EvaluatePureDiscrete) { } /* ****************************************************************************/ -// Test creation of a tiny hybrid Bayes net. +// Test API for a tiny hybrid Bayes net. TEST(HybridBayesNet, Tiny) { auto bayesNet = tiny::createHybridBayesNet(); // P(z|x,mode)P(x)P(mode) EXPECT_LONGS_EQUAL(3, bayesNet.size()); @@ -164,10 +168,8 @@ TEST(HybridBayesNet, Tiny) { EXPECT(!pruned.equals(bayesNet)); // error - const double error0 = chosen0.error(vv) + gc0->negLogConstant() - - px->negLogConstant() - log(0.4); - const double error1 = chosen1.error(vv) + gc1->negLogConstant() - - px->negLogConstant() - log(0.6); + const double error0 = chosen0.error(vv) + gc0->negLogConstant() - px->negLogConstant() - log(0.4); + const double error1 = chosen1.error(vv) + gc1->negLogConstant() - px->negLogConstant() - log(0.6); // print errors: EXPECT_DOUBLES_EQUAL(error0, bayesNet.error(zero), 1e-9); EXPECT_DOUBLES_EQUAL(error1, bayesNet.error(one), 1e-9); @@ -188,6 +190,23 @@ TEST(HybridBayesNet, Tiny) { // toFactorGraph auto fg = bayesNet.toFactorGraph({{Z(0), Vector1(5.0)}}); EXPECT_LONGS_EQUAL(3, fg.size()); + GTSAM_PRINT(fg); + + // Create the product factor for eliminating x0: + HybridGaussianFactorGraph factors_x0; + factors_x0.push_back(fg.at(0)); + factors_x0.push_back(fg.at(1)); + auto productFactor = factors_x0.collectProductFactor(); + + // Check that scalars are 0 and 1.79 + EXPECT_DOUBLES_EQUAL(0.0, productFactor({{M(0), 0}}).second, 1e-9); + EXPECT_DOUBLES_EQUAL(1.791759, productFactor({{M(0), 1}}).second, 1e-5); + + // Call eliminate and check scalar: + auto result = factors_x0.eliminate({X(0)}); + GTSAM_PRINT(*result.first); + auto df = std::dynamic_pointer_cast(result.second); + GTSAM_PRINT(df->errorTree()); // Check that the ratio of probPrime to evaluate is the same for all modes. std::vector ratio(2); @@ -209,17 +228,13 @@ TEST(HybridBayesNet, Tiny) { /* ****************************************************************************/ // Hybrid Bayes net P(X0|X1) P(X1|Asia) P(Asia). namespace different_sigmas { -const auto gc = GaussianConditional::sharedMeanAndStddev(X(0), 2 * I_1x1, X(1), - Vector1(-4.0), 5.0); +const auto gc = GaussianConditional::sharedMeanAndStddev(X(0), 2 * I_1x1, X(1), Vector1(-4.0), 5.0); -const std::vector> parms{{Vector1(5), 2.0}, - {Vector1(2), 3.0}}; +const std::vector> parms{{Vector1(5), 2.0}, {Vector1(2), 3.0}}; const auto hgc = std::make_shared(Asia, X(1), parms); const auto prior = std::make_shared(Asia, "99/1"); -auto wrap = [](const auto& c) { - return std::make_shared(c); -}; +auto wrap = [](const auto& c) { return std::make_shared(c); }; const HybridBayesNet bayesNet{wrap(gc), wrap(hgc), wrap(prior)}; // Create values at which to evaluate. @@ -233,8 +248,8 @@ TEST(HybridBayesNet, evaluateHybrid) { const double conditionalProbability = gc->evaluate(values.continuous()); const double mixtureProbability = hgc->evaluate(values); - EXPECT_DOUBLES_EQUAL(conditionalProbability * mixtureProbability * 0.99, - bayesNet.evaluate(values), 1e-9); + EXPECT_DOUBLES_EQUAL( + conditionalProbability * mixtureProbability * 0.99, bayesNet.evaluate(values), 1e-9); } /* ****************************************************************************/ @@ -256,14 +271,10 @@ TEST(HybridBayesNet, Choose) { EXPECT_LONGS_EQUAL(4, gbn.size()); - EXPECT(assert_equal(*(*hybridBayesNet->at(0)->asHybrid())(assignment), - *gbn.at(0))); - EXPECT(assert_equal(*(*hybridBayesNet->at(1)->asHybrid())(assignment), - *gbn.at(1))); - EXPECT(assert_equal(*(*hybridBayesNet->at(2)->asHybrid())(assignment), - *gbn.at(2))); - EXPECT(assert_equal(*(*hybridBayesNet->at(3)->asHybrid())(assignment), - *gbn.at(3))); + EXPECT(assert_equal(*(*hybridBayesNet->at(0)->asHybrid())(assignment), *gbn.at(0))); + EXPECT(assert_equal(*(*hybridBayesNet->at(1)->asHybrid())(assignment), *gbn.at(1))); + EXPECT(assert_equal(*(*hybridBayesNet->at(2)->asHybrid())(assignment), *gbn.at(2))); + EXPECT(assert_equal(*(*hybridBayesNet->at(3)->asHybrid())(assignment), *gbn.at(3))); } /* ****************************************************************************/ @@ -300,8 +311,7 @@ TEST(HybridBayesNet, OptimizeAssignment) { TEST(HybridBayesNet, Optimize) { Switching s(4, 1.0, 0.1, {0, 1, 2, 3}, "1/1 1/1"); - HybridBayesNet::shared_ptr hybridBayesNet = - s.linearizedFactorGraph.eliminateSequential(); + HybridBayesNet::shared_ptr hybridBayesNet = s.linearizedFactorGraph.eliminateSequential(); HybridValues delta = hybridBayesNet->optimize(); @@ -328,8 +338,7 @@ TEST(HybridBayesNet, Pruning) { // ϕ(x0) ϕ(x0,x1,m0) ϕ(x1,x2,m1) ϕ(x0;z0) ϕ(x1;z1) ϕ(x2;z2) ϕ(m0) ϕ(m0,m1) Switching s(3); - HybridBayesNet::shared_ptr posterior = - s.linearizedFactorGraph.eliminateSequential(); + HybridBayesNet::shared_ptr posterior = s.linearizedFactorGraph.eliminateSequential(); EXPECT_LONGS_EQUAL(5, posterior->size()); // Optimize @@ -355,12 +364,9 @@ TEST(HybridBayesNet, Pruning) { logProbability += posterior->at(0)->asHybrid()->logProbability(hybridValues); logProbability += posterior->at(1)->asHybrid()->logProbability(hybridValues); logProbability += posterior->at(2)->asHybrid()->logProbability(hybridValues); - logProbability += - posterior->at(3)->asDiscrete()->logProbability(hybridValues); - logProbability += - posterior->at(4)->asDiscrete()->logProbability(hybridValues); - EXPECT_DOUBLES_EQUAL(logProbability, posterior->logProbability(hybridValues), - 1e-9); + logProbability += posterior->at(3)->asDiscrete()->logProbability(hybridValues); + logProbability += posterior->at(4)->asDiscrete()->logProbability(hybridValues); + EXPECT_DOUBLES_EQUAL(logProbability, posterior->logProbability(hybridValues), 1e-9); // Check agreement with discrete posterior // double density = exp(logProbability); @@ -381,8 +387,7 @@ TEST(HybridBayesNet, Pruning) { TEST(HybridBayesNet, Prune) { Switching s(4); - HybridBayesNet::shared_ptr posterior = - s.linearizedFactorGraph.eliminateSequential(); + HybridBayesNet::shared_ptr posterior = s.linearizedFactorGraph.eliminateSequential(); EXPECT_LONGS_EQUAL(7, posterior->size()); HybridValues delta = posterior->optimize(); @@ -399,8 +404,7 @@ TEST(HybridBayesNet, Prune) { TEST(HybridBayesNet, UpdateDiscreteConditionals) { Switching s(4); - HybridBayesNet::shared_ptr posterior = - s.linearizedFactorGraph.eliminateSequential(); + HybridBayesNet::shared_ptr posterior = s.linearizedFactorGraph.eliminateSequential(); EXPECT_LONGS_EQUAL(7, posterior->size()); DiscreteConditional joint; @@ -412,8 +416,7 @@ TEST(HybridBayesNet, UpdateDiscreteConditionals) { auto prunedDecisionTree = joint.prune(maxNrLeaves); #ifdef GTSAM_DT_MERGING - EXPECT_LONGS_EQUAL(maxNrLeaves + 2 /*2 zero leaves*/, - prunedDecisionTree.nrLeaves()); + EXPECT_LONGS_EQUAL(maxNrLeaves + 2 /*2 zero leaves*/, prunedDecisionTree.nrLeaves()); #else EXPECT_LONGS_EQUAL(8 /*full tree*/, prunedDecisionTree.nrLeaves()); #endif @@ -421,16 +424,14 @@ TEST(HybridBayesNet, UpdateDiscreteConditionals) { // regression // NOTE(Frank): I had to include *three* non-zeroes here now. DecisionTreeFactor::ADT potentials( - s.modes, - std::vector{0, 0, 0, 0.28739288, 0, 0.43106901, 0, 0.2815381}); + s.modes, std::vector{0, 0, 0, 0.28739288, 0, 0.43106901, 0, 0.2815381}); DiscreteConditional expectedConditional(3, s.modes, potentials); // Prune! auto pruned = posterior->prune(maxNrLeaves); // Functor to verify values against the expectedConditional - auto checker = [&](const Assignment& assignment, - double probability) -> double { + auto checker = [&](const Assignment& assignment, double probability) -> double { // typecast so we can use this to get probability value DiscreteValues choices(assignment); if (prunedDecisionTree(choices) == 0) { @@ -445,8 +446,7 @@ TEST(HybridBayesNet, UpdateDiscreteConditionals) { CHECK(pruned.at(0)->asDiscrete()); auto pruned_discrete_conditionals = pruned.at(0)->asDiscrete(); auto discrete_conditional_tree = - std::dynamic_pointer_cast( - pruned_discrete_conditionals); + std::dynamic_pointer_cast(pruned_discrete_conditionals); // The checker functor verifies the values for us. discrete_conditional_tree->apply(checker); @@ -460,13 +460,10 @@ TEST(HybridBayesNet, Sampling) { auto noise_model = noiseModel::Diagonal::Sigmas(Vector1(1.0)); nfg.emplace_shared>(X(0), 0.0, noise_model); - auto zero_motion = - std::make_shared>(X(0), X(1), 0, noise_model); - auto one_motion = - std::make_shared>(X(0), X(1), 1, noise_model); + auto zero_motion = std::make_shared>(X(0), X(1), 0, noise_model); + auto one_motion = std::make_shared>(X(0), X(1), 1, noise_model); nfg.emplace_shared( - DiscreteKey(M(0), 2), - std::vector{zero_motion, one_motion}); + DiscreteKey(M(0), 2), std::vector{zero_motion, one_motion}); DiscreteKey mode(M(0), 2); nfg.emplace_shared(mode, "1/1"); @@ -538,18 +535,17 @@ TEST(HybridBayesNet, ErrorTreeWithConditional) { hbn.emplace_shared(x0, Vector1(0.0), I_1x1, prior_model); // Add measurement P(z0 | x0) - hbn.emplace_shared(z0, Vector1(0.0), -I_1x1, x0, I_1x1, - measurement_model); + hbn.emplace_shared(z0, Vector1(0.0), -I_1x1, x0, I_1x1, measurement_model); // Add hybrid motion model double mu = 0.0; double sigma0 = 1e2, sigma1 = 1e-2; auto model0 = noiseModel::Isotropic::Sigma(1, sigma0); auto model1 = noiseModel::Isotropic::Sigma(1, sigma1); - auto c0 = make_shared(f01, Vector1(mu), I_1x1, x1, I_1x1, - x0, -I_1x1, model0), - c1 = make_shared(f01, Vector1(mu), I_1x1, x1, I_1x1, - x0, -I_1x1, model1); + auto c0 = + make_shared(f01, Vector1(mu), I_1x1, x1, I_1x1, x0, -I_1x1, model0), + c1 = + make_shared(f01, Vector1(mu), I_1x1, x1, I_1x1, x0, -I_1x1, model1); DiscreteKey m1(M(2), 2); hbn.emplace_shared(m1, std::vector{c0, c1}); diff --git a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp index cd9c182cd..05f7c6c61 100644 --- a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp @@ -217,7 +217,7 @@ TEST(HybridGaussianConditional, Likelihood2) { // Check the detailed JacobianFactor calculation for mode==1. { // We have a JacobianFactor - const auto gf1 = (*likelihood)(assignment1); + const auto [gf1, _] = (*likelihood)(assignment1); const auto jf1 = std::dynamic_pointer_cast(gf1); CHECK(jf1); diff --git a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp index ed444c13c..caf62616f 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp @@ -165,8 +165,119 @@ TEST(HybridGaussianFactorGraph, eliminateFullSequentialSimple) { EXPECT_LONGS_EQUAL(4, result->size()); } -/* -****************************************************************************/ +/* ************************************************************************* */ +// Test API for the smallest switching network. +// None of these are regression tests. +TEST(HybridBayesNet, Switching) { + const double betweenSigma = 0.3, priorSigma = 0.1; + Switching s(2, betweenSigma, priorSigma); + const HybridGaussianFactorGraph& graph = s.linearizedFactorGraph; + EXPECT_LONGS_EQUAL(4, graph.size()); + + // Create some continuous and discrete values + VectorValues continuousValues{{X(0), Vector1(0.1)}, {X(1), Vector1(1.2)}}; + DiscreteValues modeZero{{M(0), 0}}, modeOne{{M(0), 1}}; + + // Get the hybrid gaussian factor and check it is as expected + auto hgf = std::dynamic_pointer_cast(graph.at(1)); + CHECK(hgf); + + // Get factors and scalars for both modes + auto [factor0, scalar0] = (*hgf)(modeZero); + auto [factor1, scalar1] = (*hgf)(modeOne); + CHECK(factor0); + CHECK(factor1); + + // Check scalars against negLogConstant of noise model + auto betweenModel = noiseModel::Isotropic::Sigma(1, betweenSigma); + EXPECT_DOUBLES_EQUAL(betweenModel->negLogConstant(), scalar0, 1e-9); + EXPECT_DOUBLES_EQUAL(betweenModel->negLogConstant(), scalar1, 1e-9); + + // Check error for M(0) = 0 + HybridValues values0{continuousValues, modeZero}; + double expectedError0 = 0; + for (const auto& factor : graph) expectedError0 += factor->error(values0); + EXPECT_DOUBLES_EQUAL(expectedError0, graph.error(values0), 1e-5); + + // Check error for M(0) = 1 + HybridValues values1{continuousValues, modeOne}; + double expectedError1 = 0; + for (const auto& factor : graph) expectedError1 += factor->error(values1); + EXPECT_DOUBLES_EQUAL(expectedError1, graph.error(values1), 1e-5); + + // Check errorTree + AlgebraicDecisionTree actualErrors = graph.errorTree(continuousValues); + // Create expected error tree + AlgebraicDecisionTree expectedErrors(M(0), expectedError0, expectedError1); + + // Check that the actual error tree matches the expected one + EXPECT(assert_equal(expectedErrors, actualErrors, 1e-5)); + + // Check probPrime + double probPrime0 = graph.probPrime(values0); + EXPECT_DOUBLES_EQUAL(std::exp(-expectedError0), probPrime0, 1e-5); + + double probPrime1 = graph.probPrime(values1); + EXPECT_DOUBLES_EQUAL(std::exp(-expectedError1), probPrime1, 1e-5); + + // Check discretePosterior + AlgebraicDecisionTree posterior = graph.discretePosterior(continuousValues); + double sum = probPrime0 + probPrime1; + AlgebraicDecisionTree expectedPosterior(M(0), probPrime0 / sum, probPrime1 / sum); + EXPECT(assert_equal(expectedPosterior, posterior, 1e-5)); + + // Make the clique of factors connected to x0: + HybridGaussianFactorGraph factors_x0; + factors_x0.push_back(graph.at(0)); + factors_x0.push_back(hgf); + + // Test collectProductFactor + auto productFactor = factors_x0.collectProductFactor(); + + // For M(0) = 0 + auto [gaussianFactor0, actualScalar0] = productFactor(modeZero); + EXPECT(gaussianFactor0.size() == 2); + EXPECT_DOUBLES_EQUAL((*hgf)(modeZero).second, actualScalar0, 1e-5); + + // For M(0) = 1 + auto [gaussianFactor1, actualScalar1] = productFactor(modeOne); + EXPECT(gaussianFactor1.size() == 2); + EXPECT_DOUBLES_EQUAL((*hgf)(modeOne).second, actualScalar1, 1e-5); + + // Test eliminate + Ordering ordering{X(0)}; + auto [conditional, factor] = factors_x0.eliminate(ordering); + + // Check the conditional + CHECK(conditional); + EXPECT(conditional->isHybrid()); + auto hybridConditional = conditional->asHybrid(); + CHECK(hybridConditional); + EXPECT_LONGS_EQUAL(1, hybridConditional->nrFrontals()); // x0 + EXPECT_LONGS_EQUAL(2, hybridConditional->nrParents()); // x1, m0 + + // Check the remaining factor + EXPECT(factor); + EXPECT(std::dynamic_pointer_cast(factor)); + auto hybridFactor = std::dynamic_pointer_cast(factor); + EXPECT_LONGS_EQUAL(2, hybridFactor->keys().size()); // x1, m0 + + // Check that the conditional and remaining factor are consistent for both modes + for (auto&& mode : {modeZero, modeOne}) { + auto gc = (*hybridConditional)(mode); + auto gf = (*hybridFactor)(mode); + + // The error of the original factors should equal the sum of errors of the conditional and + // remaining factor, modulo the normalization constant of the conditional. + double originalError = factors_x0.error({continuousValues, mode}); + EXPECT_DOUBLES_EQUAL( + originalError, + gc->negLogConstant() + gc->error(continuousValues) + gf.first->error(continuousValues), + 1e-9); + } +} + +/* ************************************************************************* */ // Select a particular continuous factor graph given a discrete assignment TEST(HybridGaussianFactorGraph, DiscreteSelection) { Switching s(3); @@ -410,12 +521,12 @@ TEST(HybridGaussianFactorGraph, collectProductFactor) { // Expected decision tree with two factor graphs: // f(x0;mode=0)P(x0) - GaussianFactorGraph expectedFG0{(*hybrid)(d0), prior}; + GaussianFactorGraph expectedFG0{(*hybrid)(d0).first, prior}; EXPECT(assert_equal(expectedFG0, actual(d0).first, 1e-5)); EXPECT(assert_equal(0.0, actual(d0).second, 1e-5)); // f(x0;mode=1)P(x0) - GaussianFactorGraph expectedFG1{(*hybrid)(d1), prior}; + GaussianFactorGraph expectedFG1{(*hybrid)(d1).first, prior}; EXPECT(assert_equal(expectedFG1, actual(d1).first, 1e-5)); EXPECT(assert_equal(1.79176, actual(d1).second, 1e-5)); } From 04cfb063aec39c026a38f1cb254627fdeb5e8b7f Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 7 Oct 2024 09:19:35 +0900 Subject: [PATCH 051/204] Cherry-pick Varun's bugfix --- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 2d4ff38bd..38995b60a 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -247,7 +247,8 @@ static std::pair> discret // In this case, compute discrete probabilities. auto logProbability = [&](const auto& pair) -> double { auto [factor, _] = pair; - if (!factor) return 0.0; + // If the factor is null, it is has been pruned hence return ∞ + // so that the exp(-∞)=0. return factor->error(VectorValues()); }; AlgebraicDecisionTree logProbabilities = @@ -299,7 +300,9 @@ static std::shared_ptr createDiscreteFactor(const ResultTree& eliminatio // which is `-log(k) = log(1/k) = log(\sqrt{|2πΣ|})`. return factor->error(kEmpty) - conditional->negLogConstant(); } else if (!conditional && !factor) { - return 1.0; // TODO(dellaert): not loving this, what should this be?? + // If the factor is null, it has been pruned, hence return ∞ + // so that the exp(-∞)=0. + return std::numeric_limits::infinity(); } else { throw std::runtime_error("createDiscreteFactor has mixed NULLs"); } From b3c698047d93c6ba9f11eafd05d70ff6992a63dd Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 7 Oct 2024 17:08:38 +0900 Subject: [PATCH 052/204] Don't normalize probabilities for a mere DiscreteFactor --- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 70 ++++++++-------------- 1 file changed, 24 insertions(+), 46 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 38995b60a..95acb6ad6 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -53,9 +53,12 @@ namespace gtsam { /// Specialize EliminateableFactorGraph for HybridGaussianFactorGraph: template class EliminateableFactorGraph; -using OrphanWrapper = BayesTreeOrphanWrapper; - using std::dynamic_pointer_cast; +using OrphanWrapper = BayesTreeOrphanWrapper; +using Result = std::pair, GaussianFactor::shared_ptr>; +using ResultTree = DecisionTree>; + +static const VectorValues kEmpty; /* ************************************************************************ */ // Throw a runtime exception for method specified in string s, and factor f: @@ -215,25 +218,6 @@ static std::pair> continu return {std::make_shared(result.first), result.second}; } -/* ************************************************************************ */ -/** - * @brief Exponentiate (not necessarily normalized) negative log-values, - * normalize, and then return as AlgebraicDecisionTree. - * - * @param logValues DecisionTree of (unnormalized) log values. - * @return AlgebraicDecisionTree - */ -static AlgebraicDecisionTree probabilitiesFromNegativeLogValues( - const AlgebraicDecisionTree& logValues) { - // Perform normalization - double min_log = logValues.min(); - AlgebraicDecisionTree probabilities = DecisionTree( - logValues, [&min_log](const double x) { return exp(-(x - min_log)); }); - probabilities = probabilities.normalize(probabilities.sum()); - - return probabilities; -} - /* ************************************************************************ */ static std::pair> discreteElimination( const HybridGaussianFactorGraph& factors, const Ordering& frontalKeys) { @@ -245,18 +229,17 @@ static std::pair> discret } else if (auto gmf = dynamic_pointer_cast(f)) { // Case where we have a HybridGaussianFactor with no continuous keys. // In this case, compute discrete probabilities. - auto logProbability = [&](const auto& pair) -> double { + // TODO(frank): What about the scalar!? + auto potential = [&](const auto& pair) -> double { auto [factor, _] = pair; - // If the factor is null, it is has been pruned hence return ∞ - // so that the exp(-∞)=0. - return factor->error(VectorValues()); + // If the factor is null, it has been pruned, hence return potential of zero + if (!factor) + return 0; + else + return exp(-factor->error(kEmpty)); }; - AlgebraicDecisionTree logProbabilities = - DecisionTree(gmf->factors(), logProbability); - - AlgebraicDecisionTree probabilities = - probabilitiesFromNegativeLogValues(logProbabilities); - dfg.emplace_shared(gmf->discreteKeys(), probabilities); + DecisionTree potentials(gmf->factors(), potential); + dfg.emplace_shared(gmf->discreteKeys(), potentials); } else if (auto orphan = dynamic_pointer_cast(f)) { // Ignore orphaned clique. @@ -277,9 +260,6 @@ static std::pair> discret } /* ************************************************************************ */ -using Result = std::pair, GaussianFactor::shared_ptr>; -using ResultTree = DecisionTree>; - /** * Compute the probability p(μ;m) = exp(-error(μ;m)) * sqrt(det(2π Σ_m) * from the residual error ||b||^2 at the mean μ. @@ -288,34 +268,31 @@ using ResultTree = DecisionTree>; */ static std::shared_ptr createDiscreteFactor(const ResultTree& eliminationResults, const DiscreteKeys& discreteSeparator) { - auto negLogProbability = [&](const auto& pair) -> double { + auto potential = [&](const auto& pair) -> double { const auto& [conditional, factor] = pair.first; if (conditional && factor) { - static const VectorValues kEmpty; // If the factor is not null, it has no keys, just contains the residual. // Negative-log-space version of: // exp(-factor->error(kEmpty)) / conditional->normalizationConstant(); // negLogConstant gives `-log(k)` // which is `-log(k) = log(1/k) = log(\sqrt{|2πΣ|})`. - return factor->error(kEmpty) - conditional->negLogConstant(); + const double negLogK = conditional->negLogConstant(); + const double old = factor->error(kEmpty) - negLogK; + return exp(-old); } else if (!conditional && !factor) { - // If the factor is null, it has been pruned, hence return ∞ - // so that the exp(-∞)=0. - return std::numeric_limits::infinity(); + // If the factor is null, it has been pruned, hence return potential of zero + return 0; } else { throw std::runtime_error("createDiscreteFactor has mixed NULLs"); } }; - AlgebraicDecisionTree negLogProbabilities( - DecisionTree(eliminationResults, negLogProbability)); - AlgebraicDecisionTree probabilities = - probabilitiesFromNegativeLogValues(negLogProbabilities); - - return std::make_shared(discreteSeparator, probabilities); + DecisionTree potentials(eliminationResults, potential); + return std::make_shared(discreteSeparator, potentials); } +/* *******************************************************************************/ // Create HybridGaussianFactor on the separator, taking care to correct // for conditional constants. static std::shared_ptr createHybridGaussianFactor(const ResultTree& eliminationResults, @@ -323,6 +300,7 @@ static std::shared_ptr createHybridGaussianFactor(const ResultTree& elim // Correct for the normalization constant used up by the conditional auto correct = [&](const auto& pair) -> GaussianFactorValuePair { const auto& [conditional, factor] = pair.first; + const double scalar = pair.second; if (conditional && factor) { auto hf = std::dynamic_pointer_cast(factor); if (!hf) throw std::runtime_error("Expected HessianFactor!"); From 586b177b7873acff7c2449adbe21fcae0f556a2a Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 7 Oct 2024 22:28:38 +0900 Subject: [PATCH 053/204] Extensive new API test --- .../tests/testHybridGaussianFactorGraph.cpp | 97 +++++++++++++++---- 1 file changed, 80 insertions(+), 17 deletions(-) diff --git a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp index caf62616f..1bed2533a 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp @@ -117,7 +117,8 @@ TEST(HybridGaussianFactorGraph, hybridEliminationOneFactor) { // Check that factor is discrete and correct auto factor = std::dynamic_pointer_cast(result.second); CHECK(factor); - EXPECT(assert_equal(DecisionTreeFactor{m1, "1 1"}, *factor)); + // regression test + EXPECT(assert_equal(DecisionTreeFactor{m1, "15.74961 15.74961"}, *factor, 1e-5)); } /* ************************************************************************* */ @@ -221,10 +222,10 @@ TEST(HybridBayesNet, Switching) { EXPECT_DOUBLES_EQUAL(std::exp(-expectedError1), probPrime1, 1e-5); // Check discretePosterior - AlgebraicDecisionTree posterior = graph.discretePosterior(continuousValues); + AlgebraicDecisionTree graphPosterior = graph.discretePosterior(continuousValues); double sum = probPrime0 + probPrime1; AlgebraicDecisionTree expectedPosterior(M(0), probPrime0 / sum, probPrime1 / sum); - EXPECT(assert_equal(expectedPosterior, posterior, 1e-5)); + EXPECT(assert_equal(expectedPosterior, graphPosterior, 1e-5)); // Make the clique of factors connected to x0: HybridGaussianFactorGraph factors_x0; @@ -244,37 +245,100 @@ TEST(HybridBayesNet, Switching) { EXPECT(gaussianFactor1.size() == 2); EXPECT_DOUBLES_EQUAL((*hgf)(modeOne).second, actualScalar1, 1e-5); - // Test eliminate + // Test eliminate x0 Ordering ordering{X(0)}; auto [conditional, factor] = factors_x0.eliminate(ordering); // Check the conditional CHECK(conditional); EXPECT(conditional->isHybrid()); - auto hybridConditional = conditional->asHybrid(); - CHECK(hybridConditional); - EXPECT_LONGS_EQUAL(1, hybridConditional->nrFrontals()); // x0 - EXPECT_LONGS_EQUAL(2, hybridConditional->nrParents()); // x1, m0 + auto p_x0_given_x1_m = conditional->asHybrid(); + CHECK(p_x0_given_x1_m); + EXPECT_LONGS_EQUAL(1, p_x0_given_x1_m->nrFrontals()); // x0 + EXPECT_LONGS_EQUAL(2, p_x0_given_x1_m->nrParents()); // x1, m0 // Check the remaining factor EXPECT(factor); EXPECT(std::dynamic_pointer_cast(factor)); - auto hybridFactor = std::dynamic_pointer_cast(factor); - EXPECT_LONGS_EQUAL(2, hybridFactor->keys().size()); // x1, m0 + auto phi_x1_m = std::dynamic_pointer_cast(factor); + EXPECT_LONGS_EQUAL(2, phi_x1_m->keys().size()); // x1, m0 + // Check that the scalars incorporate the negative log constant of the conditional + EXPECT_DOUBLES_EQUAL( + scalar0 - (*p_x0_given_x1_m)(modeZero)->negLogConstant(), (*phi_x1_m)(modeZero).second, 1e-9); + EXPECT_DOUBLES_EQUAL( + scalar1 - (*p_x0_given_x1_m)(modeOne)->negLogConstant(), (*phi_x1_m)(modeOne).second, 1e-9); // Check that the conditional and remaining factor are consistent for both modes for (auto&& mode : {modeZero, modeOne}) { - auto gc = (*hybridConditional)(mode); - auto gf = (*hybridFactor)(mode); + auto gc = (*p_x0_given_x1_m)(mode); + auto [gf, scalar] = (*phi_x1_m)(mode); // The error of the original factors should equal the sum of errors of the conditional and // remaining factor, modulo the normalization constant of the conditional. double originalError = factors_x0.error({continuousValues, mode}); - EXPECT_DOUBLES_EQUAL( - originalError, - gc->negLogConstant() + gc->error(continuousValues) + gf.first->error(continuousValues), - 1e-9); + const double actualError = + gc->negLogConstant() + gc->error(continuousValues) + gf->error(continuousValues) + scalar; + EXPECT_DOUBLES_EQUAL(originalError, actualError, 1e-9); } + + // Create a clique for x1 + HybridGaussianFactorGraph factors_x1; + factors_x1.push_back(factor); // Use the remaining factor from previous elimination + factors_x1.push_back(graph.at(2)); // Add the factor for x1 from the original graph + + // Test collectProductFactor for x1 clique + auto productFactor_x1 = factors_x1.collectProductFactor(); + + // For M(0) = 0 + auto [gaussianFactor_x1_0, actualScalar_x1_0] = productFactor_x1(modeZero); + EXPECT_LONGS_EQUAL(2, gaussianFactor_x1_0.size()); + // NOTE(Frank): prior on x1 does not contribute to the scalar + EXPECT_DOUBLES_EQUAL((*phi_x1_m)(modeZero).second, actualScalar_x1_0, 1e-5); + + // For M(0) = 1 + auto [gaussianFactor_x1_1, actualScalar_x1_1] = productFactor_x1(modeOne); + EXPECT_LONGS_EQUAL(2, gaussianFactor_x1_1.size()); + // NOTE(Frank): prior on x1 does not contribute to the scalar + EXPECT_DOUBLES_EQUAL((*phi_x1_m)(modeOne).second, actualScalar_x1_1, 1e-5); + + // Test eliminate for x1 clique + Ordering ordering_x1{X(1)}; + auto [conditional_x1, factor_x1] = factors_x1.eliminate(ordering_x1); + + // Check the conditional for x1 + CHECK(conditional_x1); + EXPECT(conditional_x1->isHybrid()); + auto p_x1_given_m = conditional_x1->asHybrid(); + CHECK(p_x1_given_m); + EXPECT_LONGS_EQUAL(1, p_x1_given_m->nrFrontals()); // x1 + EXPECT_LONGS_EQUAL(1, p_x1_given_m->nrParents()); // m0 + + // Check the remaining factor for x1 + CHECK(factor_x1); + auto phi_x1 = std::dynamic_pointer_cast(factor_x1); + CHECK(phi_x1); + EXPECT_LONGS_EQUAL(1, phi_x1->keys().size()); // m0 + // We can't really check the error of the decision tree factor phi_x1, because the continuos + // factor whose error(kEmpty) we need is not available.. + + // However, we can still check the total error for the clique factors_x1 and the elimination + // results are equal, modulo -again- the negative log constant of the conditional. + for (auto&& mode : {modeZero, modeOne}) { + auto gc_x1 = (*p_x1_given_m)(mode); + double originalError_x1 = factors_x1.error({continuousValues, mode}); + const double actualError = + gc_x1->negLogConstant() + gc_x1->error(continuousValues) + phi_x1->error(mode); + EXPECT_DOUBLES_EQUAL(originalError_x1, actualError, 1e-9); + } + + // Now test full elimination of the graph: + auto posterior = graph.eliminateSequential(); + CHECK(posterior); + + // Check that the posterior P(M|X=continuousValues) from the Bayes net is the same as the + // same posterior from the graph. This is a sanity check that the elimination is done correctly. + AlgebraicDecisionTree bnPosterior = graph.discretePosterior(continuousValues); + EXPECT(assert_equal(graphPosterior, bnPosterior)); } /* ************************************************************************* */ @@ -572,7 +636,6 @@ bool ratioTest(const HybridBayesNet& bn, // Test ratios for a number of independent samples: for (size_t i = 0; i < num_samples; i++) { HybridValues sample = bn.sample(&kRng); - // GTSAM_PRINT(sample); // std::cout << "ratio: " << compute_ratio(&sample) << std::endl; if (std::abs(expected_ratio - compute_ratio(&sample)) > 1e-6) return false; } From 518ea81d1ef7c0071ac8380723783a55753cedd4 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 7 Oct 2024 22:30:30 +0900 Subject: [PATCH 054/204] No more hiding! --- gtsam/hybrid/HybridGaussianFactor.cpp | 72 +++------------------- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 18 ++---- 2 files changed, 15 insertions(+), 75 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactor.cpp b/gtsam/hybrid/HybridGaussianFactor.cpp index e01df539e..44a56910e 100644 --- a/gtsam/hybrid/HybridGaussianFactor.cpp +++ b/gtsam/hybrid/HybridGaussianFactor.cpp @@ -31,45 +31,6 @@ namespace gtsam { -/* *******************************************************************************/ -HybridGaussianFactor::FactorValuePairs HybridGaussianFactor::augment( - const FactorValuePairs& factors) { - // Find the minimum value so we can "proselytize" to positive values. - // Done because we can't have sqrt of negative numbers. - DecisionTree gaussianFactors; - AlgebraicDecisionTree valueTree; - std::tie(gaussianFactors, valueTree) = unzip(factors); - - // Compute minimum value for normalization. - double min_value = valueTree.min(); - - // Finally, update the [A|b] matrices. - auto update = [&min_value](const auto& gfv) -> GaussianFactorValuePair { - auto [gf, value] = gfv; - - auto jf = std::dynamic_pointer_cast(gf); - if (!jf) return {gf, 0.0}; // should this be zero or infinite? - - double normalized_value = value - min_value; - - // If the value is 0, do nothing - if (normalized_value == 0.0) return {gf, value}; - - GaussianFactorGraph gfg; - gfg.push_back(jf); - - Vector c(1); - // When hiding c inside the `b` vector, value == 0.5*c^2 - c << std::sqrt(2.0 * normalized_value); - auto constantFactor = std::make_shared(c); - - gfg.push_back(constantFactor); - // NOTE(Frank): we store the actual value, not the normalized value: - return {std::make_shared(gfg), value}; - }; - return FactorValuePairs(factors, update); -} - /* *******************************************************************************/ struct HybridGaussianFactor::ConstructorHelper { KeyVector continuousKeys; // Continuous keys extracted from factors @@ -88,10 +49,10 @@ struct HybridGaussianFactor::ConstructorHelper { } } // Build the FactorValuePairs DecisionTree - pairs = FactorValuePairs(DecisionTree(discreteKeys, factors), - [](const auto& f) { - return std::pair{f, 0.0}; - }); + pairs = FactorValuePairs( + DecisionTree(discreteKeys, factors), [](const auto& f) { + return std::pair{f, f ? 0.0 : std::numeric_limits::infinity()}; + }); } /// Constructor for a single discrete key and a vector of GaussianFactorValuePairs @@ -128,7 +89,7 @@ struct HybridGaussianFactor::ConstructorHelper { /* *******************************************************************************/ HybridGaussianFactor::HybridGaussianFactor(const ConstructorHelper& helper) - : Base(helper.continuousKeys, helper.discreteKeys), factors_(augment(helper.pairs)) {} + : Base(helper.continuousKeys, helper.discreteKeys), factors_(helper.pairs) {} HybridGaussianFactor::HybridGaussianFactor( const DiscreteKey& discreteKey, const std::vector& factorPairs) @@ -187,8 +148,7 @@ void HybridGaussianFactor::print(const std::string& s, const KeyFormatter& forma } /* *******************************************************************************/ -GaussianFactorValuePair HybridGaussianFactor::operator()( - const DiscreteValues& assignment) const { +GaussianFactorValuePair HybridGaussianFactor::operator()(const DiscreteValues& assignment) const { return factors_(assignment); } @@ -202,26 +162,13 @@ HybridGaussianProductFactor HybridGaussianFactor::asProductFactor() const { }}}; } -/* *******************************************************************************/ -/// Helper method to compute the error of a component. -static double PotentiallyPrunedComponentError(const GaussianFactor::shared_ptr& gf, - const VectorValues& values) { - // Check if valid pointer - if (gf) { - return gf->error(values); - } else { - // If nullptr this component was pruned, so we return maximum error. This - // way the negative exponential will give a probability value close to 0.0. - return std::numeric_limits::max(); - } -} - /* *******************************************************************************/ AlgebraicDecisionTree HybridGaussianFactor::errorTree( const VectorValues& continuousValues) const { // functor to convert from sharedFactor to double error value. auto errorFunc = [&continuousValues](const auto& pair) { - return PotentiallyPrunedComponentError(pair.first, continuousValues); + return pair.first ? pair.first->error(continuousValues) + pair.second + : std::numeric_limits::infinity(); }; DecisionTree error_tree(factors_, errorFunc); return error_tree; @@ -231,7 +178,8 @@ AlgebraicDecisionTree HybridGaussianFactor::errorTree( double HybridGaussianFactor::error(const HybridValues& values) const { // Directly index to get the component, no need to build the whole tree. const auto pair = factors_(values.discrete()); - return PotentiallyPrunedComponentError(pair.first, values.continuous()); + return pair.first ? pair.first->error(values.continuous()) + pair.second + : std::numeric_limits::infinity(); } } // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 95acb6ad6..c2d84fdb8 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -154,8 +154,6 @@ void HybridGaussianFactorGraph::printErrors( } /* ************************************************************************ */ -// TODO(dellaert): it's probably more efficient to first collect the discrete -// keys, and then loop over all assignments to populate a vector. HybridGaussianProductFactor HybridGaussianFactorGraph::collectProductFactor() const { HybridGaussianProductFactor result; @@ -270,6 +268,7 @@ static std::shared_ptr createDiscreteFactor(const ResultTree& eliminatio const DiscreteKeys& discreteSeparator) { auto potential = [&](const auto& pair) -> double { const auto& [conditional, factor] = pair.first; + const double scalar = pair.second; if (conditional && factor) { // If the factor is not null, it has no keys, just contains the residual. @@ -278,8 +277,8 @@ static std::shared_ptr createDiscreteFactor(const ResultTree& eliminatio // negLogConstant gives `-log(k)` // which is `-log(k) = log(1/k) = log(\sqrt{|2πΣ|})`. const double negLogK = conditional->negLogConstant(); - const double old = factor->error(kEmpty) - negLogK; - return exp(-old); + const double error = scalar + factor->error(kEmpty) - negLogK; + return exp(-error); } else if (!conditional && !factor) { // If the factor is null, it has been pruned, hence return potential of zero return 0; @@ -302,17 +301,10 @@ static std::shared_ptr createHybridGaussianFactor(const ResultTree& elim const auto& [conditional, factor] = pair.first; const double scalar = pair.second; if (conditional && factor) { - auto hf = std::dynamic_pointer_cast(factor); - if (!hf) throw std::runtime_error("Expected HessianFactor!"); - // Add 2.0 term since the constant term will be premultiplied by 0.5 - // as per the Hessian definition, - // and negative since we want log(k) const double negLogK = conditional->negLogConstant(); - hf->constantTerm() += -2.0 * negLogK; - return {factor, negLogK}; - return {factor, scalar + negLogK}; + return {factor, scalar - negLogK}; } else if (!conditional && !factor) { - return {nullptr, 0.0}; // TODO(frank): or should this be infinity? + return {nullptr, std::numeric_limits::infinity()}; } else { throw std::runtime_error("createHybridGaussianFactors has mixed NULLs"); } From 42052d07a106181239ef7eb30837a0c31573c175 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 7 Oct 2024 11:13:52 -0400 Subject: [PATCH 055/204] additional fix --- gtsam/discrete/DecisionTreeFactor.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/gtsam/discrete/DecisionTreeFactor.cpp b/gtsam/discrete/DecisionTreeFactor.cpp index d1b68f4bf..0a453ad4a 100644 --- a/gtsam/discrete/DecisionTreeFactor.cpp +++ b/gtsam/discrete/DecisionTreeFactor.cpp @@ -385,6 +385,16 @@ namespace gtsam { // Now threshold the decision tree size_t total = 0; auto thresholdFunc = [threshold, &total, N](const double& value) { + // There is a possible case where the `threshold` is equal to 0.0 + // In that case `(value < threshold) == false` + // which increases the leaf total erroneously. + // Hence we check for 0.0 explicitly. + if (fpEqual(value, 0.0, 1e-12)) { + return 0.0; + } + + // Check if value is less than the threshold and + // we haven't exceeded the maximum number of leaves. if (value < threshold || total >= N) { return 0.0; } else { From 0f403c7d28427e131e95dd2a89ae2e676b683521 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 7 Oct 2024 11:54:21 -0400 Subject: [PATCH 056/204] clean up testHybridEstimation --- gtsam/hybrid/tests/testHybridEstimation.cpp | 104 +++++--------------- 1 file changed, 23 insertions(+), 81 deletions(-) diff --git a/gtsam/hybrid/tests/testHybridEstimation.cpp b/gtsam/hybrid/tests/testHybridEstimation.cpp index 88d8be0bc..3bb659fee 100644 --- a/gtsam/hybrid/tests/testHybridEstimation.cpp +++ b/gtsam/hybrid/tests/testHybridEstimation.cpp @@ -47,6 +47,14 @@ using namespace gtsam; using symbol_shorthand::X; using symbol_shorthand::Z; +namespace estimation_fixture { +std::vector measurements = {0, 1, 2, 2, 2, 2, 3, 4, 5, 6, 6, + 7, 8, 9, 9, 9, 10, 11, 11, 11, 11}; +// Ground truth discrete seq +std::vector discrete_seq = {1, 1, 0, 0, 0, 1, 1, 1, 1, 0, + 1, 1, 1, 0, 0, 1, 1, 0, 0, 0}; +} // namespace estimation_fixture + TEST(HybridEstimation, Full) { size_t K = 6; std::vector measurements = {0, 1, 2, 2, 2, 3}; @@ -90,12 +98,10 @@ TEST(HybridEstimation, Full) { /****************************************************************************/ // Test approximate inference with an additional pruning step. TEST(HybridEstimation, IncrementalSmoother) { + using namespace estimation_fixture; + size_t K = 15; - std::vector measurements = {0, 1, 2, 2, 2, 2, 3, 4, 5, 6, 6, - 7, 8, 9, 9, 9, 10, 11, 11, 11, 11}; - // Ground truth discrete seq - std::vector discrete_seq = {1, 1, 0, 0, 0, 1, 1, 1, 1, 0, - 1, 1, 1, 0, 0, 1, 1, 0, 0, 0}; + // Switching example of robot moving in 1D // with given measurements and equal mode priors. Switching switching(K, 1.0, 0.1, measurements, "1/1 1/1"); @@ -123,10 +129,6 @@ TEST(HybridEstimation, IncrementalSmoother) { smoother.update(linearized, maxNrLeaves, ordering); graph.resize(0); - - // Uncomment to print out pruned discrete marginal: - // smoother.hybridBayesNet().at(0)->asDiscrete()->dot("smoother_" + - // std::to_string(k)); } HybridValues delta = smoother.hybridBayesNet().optimize(); @@ -149,12 +151,10 @@ TEST(HybridEstimation, IncrementalSmoother) { /****************************************************************************/ // Test approximate inference with an additional pruning step. TEST(HybridEstimation, ISAM) { + using namespace estimation_fixture; + size_t K = 15; - std::vector measurements = {0, 1, 2, 2, 2, 2, 3, 4, 5, 6, 6, - 7, 8, 9, 9, 9, 10, 11, 11, 11, 11}; - // Ground truth discrete seq - std::vector discrete_seq = {1, 1, 0, 0, 0, 1, 1, 1, 1, 0, - 1, 1, 1, 0, 0, 1, 1, 0, 0, 0}; + // Switching example of robot moving in 1D // with given measurements and equal mode priors. Switching switching(K, 1.0, 0.1, measurements, "1/1 1/1"); @@ -179,7 +179,6 @@ TEST(HybridEstimation, ISAM) { initial.insert(X(k), switching.linearizationPoint.at(X(k))); isam.update(graph, initial, 3); - // isam.bayesTree().print("\n\n"); graph.resize(0); initial.clear(); @@ -201,65 +200,6 @@ TEST(HybridEstimation, ISAM) { EXPECT(assert_equal(expected_continuous, result)); } -/** - * @brief A function to get a specific 1D robot motion problem as a linearized - * factor graph. This is the problem P(X|Z, M), i.e. estimating the continuous - * positions given the measurements and discrete sequence. - * - * @param K The number of timesteps. - * @param measurements The vector of measurements for each timestep. - * @param discrete_seq The discrete sequence governing the motion of the robot. - * @param measurement_sigma Noise model sigma for measurements. - * @param between_sigma Noise model sigma for the between factor. - * @return GaussianFactorGraph::shared_ptr - */ -GaussianFactorGraph::shared_ptr specificModesFactorGraph( - size_t K, const std::vector& measurements, - const std::vector& discrete_seq, double measurement_sigma = 0.1, - double between_sigma = 1.0) { - NonlinearFactorGraph graph; - Values linearizationPoint; - - // Add measurement factors - auto measurement_noise = noiseModel::Isotropic::Sigma(1, measurement_sigma); - for (size_t k = 0; k < K; k++) { - graph.emplace_shared>(X(k), measurements.at(k), - measurement_noise); - linearizationPoint.insert(X(k), static_cast(k + 1)); - } - - using MotionModel = BetweenFactor; - - // Add "motion models". - auto motion_noise_model = noiseModel::Isotropic::Sigma(1, between_sigma); - for (size_t k = 0; k < K - 1; k++) { - auto motion_model = std::make_shared( - X(k), X(k + 1), discrete_seq.at(k), motion_noise_model); - graph.push_back(motion_model); - } - GaussianFactorGraph::shared_ptr linear_graph = - graph.linearize(linearizationPoint); - return linear_graph; -} - -/** - * @brief Get the discrete sequence from the integer `x`. - * - * @tparam K Template parameter so we can set the correct bitset size. - * @param x The integer to convert to a discrete binary sequence. - * @return std::vector - */ -template -std::vector getDiscreteSequence(size_t x) { - std::bitset seq = x; - std::vector discrete_seq(K - 1); - for (size_t i = 0; i < K - 1; i++) { - // Save to discrete vector in reverse order - discrete_seq[K - 2 - i] = seq[i]; - } - return discrete_seq; -} - /** * @brief Helper method to get the tree of * unnormalized probabilities as per the elimination scheme. @@ -270,7 +210,7 @@ std::vector getDiscreteSequence(size_t x) { * @param graph The HybridGaussianFactorGraph to eliminate. * @return AlgebraicDecisionTree */ -AlgebraicDecisionTree getProbPrimeTree( +AlgebraicDecisionTree GetProbPrimeTree( const HybridGaussianFactorGraph& graph) { Ordering continuous(graph.continuousKeySet()); const auto [bayesNet, remainingGraph] = @@ -316,8 +256,9 @@ AlgebraicDecisionTree getProbPrimeTree( * The values should match those of P'(Continuous) for each discrete mode. ********************************************************************************/ TEST(HybridEstimation, Probability) { + using namespace estimation_fixture; + constexpr size_t K = 4; - std::vector measurements = {0, 1, 2, 2}; double between_sigma = 1.0, measurement_sigma = 0.1; // Switching example of robot moving in 1D with @@ -358,8 +299,9 @@ TEST(HybridEstimation, Probability) { * for each discrete mode. */ TEST(HybridEstimation, ProbabilityMultifrontal) { + using namespace estimation_fixture; + constexpr size_t K = 4; - std::vector measurements = {0, 1, 2, 2}; double between_sigma = 1.0, measurement_sigma = 0.1; @@ -370,7 +312,7 @@ TEST(HybridEstimation, ProbabilityMultifrontal) { auto graph = switching.linearizedFactorGraph; // Get the tree of unnormalized probabilities for each mode sequence. - AlgebraicDecisionTree expected_probPrimeTree = getProbPrimeTree(graph); + AlgebraicDecisionTree expected_probPrimeTree = GetProbPrimeTree(graph); // Eliminate continuous Ordering continuous_ordering(graph.continuousKeySet()); @@ -451,7 +393,7 @@ static HybridNonlinearFactorGraph createHybridNonlinearFactorGraph() { /********************************************************************************* // Create a hybrid linear factor graph f(x0, x1, m0; z0, z1) ********************************************************************************/ -static HybridGaussianFactorGraph::shared_ptr createHybridGaussianFactorGraph() { +static HybridGaussianFactorGraph::shared_ptr CreateHybridGaussianFactorGraph() { HybridNonlinearFactorGraph nfg = createHybridNonlinearFactorGraph(); Values initial; @@ -466,7 +408,7 @@ static HybridGaussianFactorGraph::shared_ptr createHybridGaussianFactorGraph() { ********************************************************************************/ TEST(HybridEstimation, eliminateSequentialRegression) { // Create the factor graph from the nonlinear factor graph. - HybridGaussianFactorGraph::shared_ptr fg = createHybridGaussianFactorGraph(); + HybridGaussianFactorGraph::shared_ptr fg = CreateHybridGaussianFactorGraph(); // Create expected discrete conditional on m0. DiscreteKey m(M(0), 2); @@ -501,7 +443,7 @@ TEST(HybridEstimation, eliminateSequentialRegression) { ********************************************************************************/ TEST(HybridEstimation, CorrectnessViaSampling) { // 1. Create the factor graph from the nonlinear factor graph. - const auto fg = createHybridGaussianFactorGraph(); + const auto fg = CreateHybridGaussianFactorGraph(); // 2. Eliminate into BN const HybridBayesNet::shared_ptr bn = fg->eliminateSequential(); From 5ee27bfad18226bcfc30ea1e87a03d548b685ce5 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 7 Oct 2024 12:05:58 -0400 Subject: [PATCH 057/204] more test improvements --- gtsam/hybrid/tests/testHybridEstimation.cpp | 42 ++++++++++++--------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/gtsam/hybrid/tests/testHybridEstimation.cpp b/gtsam/hybrid/tests/testHybridEstimation.cpp index 3bb659fee..cefb599c5 100644 --- a/gtsam/hybrid/tests/testHybridEstimation.cpp +++ b/gtsam/hybrid/tests/testHybridEstimation.cpp @@ -53,6 +53,22 @@ std::vector measurements = {0, 1, 2, 2, 2, 2, 3, 4, 5, 6, 6, // Ground truth discrete seq std::vector discrete_seq = {1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0}; + +Switching InitializeEstimationProblem( + const size_t K, const double between_sigma, const double measurement_sigma, + const std::vector& measurements, + const std::string& discrete_transition_prob, + HybridNonlinearFactorGraph& graph, Values& initial) { + Switching switching(K, between_sigma, measurement_sigma, measurements, + discrete_transition_prob); + + // Add the X(0) prior + graph.push_back(switching.nonlinearFactorGraph.at(0)); + initial.insert(X(0), switching.linearizationPoint.at(X(0))); + + return switching; +} + } // namespace estimation_fixture TEST(HybridEstimation, Full) { @@ -104,14 +120,11 @@ TEST(HybridEstimation, IncrementalSmoother) { // Switching example of robot moving in 1D // with given measurements and equal mode priors. - Switching switching(K, 1.0, 0.1, measurements, "1/1 1/1"); - HybridSmoother smoother; HybridNonlinearFactorGraph graph; Values initial; - - // Add the X(0) prior - graph.push_back(switching.nonlinearFactorGraph.at(0)); - initial.insert(X(0), switching.linearizationPoint.at(X(0))); + Switching switching = InitializeEstimationProblem(K, 1.0, 0.1, measurements, + "1/1 1/1", graph, initial); + HybridSmoother smoother; HybridGaussianFactorGraph linearized; @@ -157,16 +170,11 @@ TEST(HybridEstimation, ISAM) { // Switching example of robot moving in 1D // with given measurements and equal mode priors. - Switching switching(K, 1.0, 0.1, measurements, "1/1 1/1"); - HybridNonlinearISAM isam; HybridNonlinearFactorGraph graph; Values initial; - - // gttic_(Estimation); - - // Add the X(0) prior - graph.push_back(switching.nonlinearFactorGraph.at(0)); - initial.insert(X(0), switching.linearizationPoint.at(X(0))); + Switching switching = InitializeEstimationProblem(K, 1.0, 0.1, measurements, + "1/1 1/1", graph, initial); + HybridNonlinearISAM isam; HybridGaussianFactorGraph linearized; @@ -367,7 +375,7 @@ TEST(HybridEstimation, ProbabilityMultifrontal) { /********************************************************************************* // Create a hybrid nonlinear factor graph f(x0, x1, m0; z0, z1) ********************************************************************************/ -static HybridNonlinearFactorGraph createHybridNonlinearFactorGraph() { +static HybridNonlinearFactorGraph CreateHybridNonlinearFactorGraph() { HybridNonlinearFactorGraph nfg; constexpr double sigma = 0.5; // measurement noise @@ -394,7 +402,7 @@ static HybridNonlinearFactorGraph createHybridNonlinearFactorGraph() { // Create a hybrid linear factor graph f(x0, x1, m0; z0, z1) ********************************************************************************/ static HybridGaussianFactorGraph::shared_ptr CreateHybridGaussianFactorGraph() { - HybridNonlinearFactorGraph nfg = createHybridNonlinearFactorGraph(); + HybridNonlinearFactorGraph nfg = CreateHybridNonlinearFactorGraph(); Values initial; double z0 = 0.0, z1 = 1.0; @@ -406,7 +414,7 @@ static HybridGaussianFactorGraph::shared_ptr CreateHybridGaussianFactorGraph() { /********************************************************************************* * Do hybrid elimination and do regression test on discrete conditional. ********************************************************************************/ -TEST(HybridEstimation, eliminateSequentialRegression) { +TEST(HybridEstimation, EliminateSequentialRegression) { // Create the factor graph from the nonlinear factor graph. HybridGaussianFactorGraph::shared_ptr fg = CreateHybridGaussianFactorGraph(); From ec32d5f197f0857a20c90df4ee7828c7a2130796 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 7 Oct 2024 12:06:14 -0400 Subject: [PATCH 058/204] add test for checking pruning --- gtsam/hybrid/tests/testHybridEstimation.cpp | 49 +++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/gtsam/hybrid/tests/testHybridEstimation.cpp b/gtsam/hybrid/tests/testHybridEstimation.cpp index cefb599c5..32d7277a2 100644 --- a/gtsam/hybrid/tests/testHybridEstimation.cpp +++ b/gtsam/hybrid/tests/testHybridEstimation.cpp @@ -161,6 +161,55 @@ TEST(HybridEstimation, IncrementalSmoother) { EXPECT(assert_equal(expected_continuous, result)); } +/****************************************************************************/ +// Test if pruned factor is set to correct error and no errors are thrown. +TEST(HybridEstimation, ValidPruningError) { + using namespace estimation_fixture; + + size_t K = 8; + + HybridNonlinearFactorGraph graph; + Values initial; + Switching switching = InitializeEstimationProblem(K, 1e-2, 1e-3, measurements, + "1/1 1/1", graph, initial); + HybridSmoother smoother; + + HybridGaussianFactorGraph linearized; + + constexpr size_t maxNrLeaves = 3; + for (size_t k = 1; k < K; k++) { + // Motion Model + graph.push_back(switching.nonlinearFactorGraph.at(k)); + // Measurement + graph.push_back(switching.nonlinearFactorGraph.at(k + K - 1)); + + initial.insert(X(k), switching.linearizationPoint.at(X(k))); + + linearized = *graph.linearize(initial); + Ordering ordering = smoother.getOrdering(linearized); + + smoother.update(linearized, maxNrLeaves, ordering); + + graph.resize(0); + } + + HybridValues delta = smoother.hybridBayesNet().optimize(); + + Values result = initial.retract(delta.continuous()); + + DiscreteValues expected_discrete; + for (size_t k = 0; k < K - 1; k++) { + expected_discrete[M(k)] = discrete_seq[k]; + } + EXPECT(assert_equal(expected_discrete, delta.discrete())); + + Values expected_continuous; + for (size_t k = 0; k < K; k++) { + expected_continuous.insert(X(k), measurements[k]); + } + EXPECT(assert_equal(expected_continuous, result)); +} + /****************************************************************************/ // Test approximate inference with an additional pruning step. TEST(HybridEstimation, ISAM) { From f0770c26cf345dab8d8e6c439ec85d3ada26d916 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 8 Oct 2024 16:14:01 +0900 Subject: [PATCH 059/204] Less regression, more API tests for s(3). --- .../tests/testHybridGaussianFactorGraph.cpp | 133 ++++++++---------- 1 file changed, 57 insertions(+), 76 deletions(-) diff --git a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp index 1bed2533a..3b5a6a80c 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp @@ -170,14 +170,18 @@ TEST(HybridGaussianFactorGraph, eliminateFullSequentialSimple) { // Test API for the smallest switching network. // None of these are regression tests. TEST(HybridBayesNet, Switching) { + // Create switching network with two continuous variables and one discrete: + // ϕ(x0) ϕ(x0,x1,m0) ϕ(x1;z1) ϕ(m0) const double betweenSigma = 0.3, priorSigma = 0.1; Switching s(2, betweenSigma, priorSigma); + + // Check size of linearized factor graph const HybridGaussianFactorGraph& graph = s.linearizedFactorGraph; EXPECT_LONGS_EQUAL(4, graph.size()); // Create some continuous and discrete values - VectorValues continuousValues{{X(0), Vector1(0.1)}, {X(1), Vector1(1.2)}}; - DiscreteValues modeZero{{M(0), 0}}, modeOne{{M(0), 1}}; + const VectorValues continuousValues{{X(0), Vector1(0.1)}, {X(1), Vector1(1.2)}}; + const DiscreteValues modeZero{{M(0), 0}}, modeOne{{M(0), 1}}; // Get the hybrid gaussian factor and check it is as expected auto hgf = std::dynamic_pointer_cast(graph.at(1)); @@ -195,13 +199,13 @@ TEST(HybridBayesNet, Switching) { EXPECT_DOUBLES_EQUAL(betweenModel->negLogConstant(), scalar1, 1e-9); // Check error for M(0) = 0 - HybridValues values0{continuousValues, modeZero}; + const HybridValues values0{continuousValues, modeZero}; double expectedError0 = 0; for (const auto& factor : graph) expectedError0 += factor->error(values0); EXPECT_DOUBLES_EQUAL(expectedError0, graph.error(values0), 1e-5); // Check error for M(0) = 1 - HybridValues values1{continuousValues, modeOne}; + const HybridValues values1{continuousValues, modeOne}; double expectedError1 = 0; for (const auto& factor : graph) expectedError1 += factor->error(values1); EXPECT_DOUBLES_EQUAL(expectedError1, graph.error(values1), 1e-5); @@ -209,22 +213,22 @@ TEST(HybridBayesNet, Switching) { // Check errorTree AlgebraicDecisionTree actualErrors = graph.errorTree(continuousValues); // Create expected error tree - AlgebraicDecisionTree expectedErrors(M(0), expectedError0, expectedError1); + const AlgebraicDecisionTree expectedErrors(M(0), expectedError0, expectedError1); // Check that the actual error tree matches the expected one EXPECT(assert_equal(expectedErrors, actualErrors, 1e-5)); // Check probPrime - double probPrime0 = graph.probPrime(values0); + const double probPrime0 = graph.probPrime(values0); EXPECT_DOUBLES_EQUAL(std::exp(-expectedError0), probPrime0, 1e-5); - double probPrime1 = graph.probPrime(values1); + const double probPrime1 = graph.probPrime(values1); EXPECT_DOUBLES_EQUAL(std::exp(-expectedError1), probPrime1, 1e-5); // Check discretePosterior - AlgebraicDecisionTree graphPosterior = graph.discretePosterior(continuousValues); - double sum = probPrime0 + probPrime1; - AlgebraicDecisionTree expectedPosterior(M(0), probPrime0 / sum, probPrime1 / sum); + const AlgebraicDecisionTree graphPosterior = graph.discretePosterior(continuousValues); + const double sum = probPrime0 + probPrime1; + const AlgebraicDecisionTree expectedPosterior(M(0), probPrime0 / sum, probPrime1 / sum); EXPECT(assert_equal(expectedPosterior, graphPosterior, 1e-5)); // Make the clique of factors connected to x0: @@ -246,7 +250,7 @@ TEST(HybridBayesNet, Switching) { EXPECT_DOUBLES_EQUAL((*hgf)(modeOne).second, actualScalar1, 1e-5); // Test eliminate x0 - Ordering ordering{X(0)}; + const Ordering ordering{X(0)}; auto [conditional, factor] = factors_x0.eliminate(ordering); // Check the conditional @@ -254,6 +258,7 @@ TEST(HybridBayesNet, Switching) { EXPECT(conditional->isHybrid()); auto p_x0_given_x1_m = conditional->asHybrid(); CHECK(p_x0_given_x1_m); + EXPECT(HybridGaussianConditional::CheckInvariants(*p_x0_given_x1_m, values1)); EXPECT_LONGS_EQUAL(1, p_x0_given_x1_m->nrFrontals()); // x0 EXPECT_LONGS_EQUAL(2, p_x0_given_x1_m->nrParents()); // x1, m0 @@ -270,8 +275,8 @@ TEST(HybridBayesNet, Switching) { // Check that the conditional and remaining factor are consistent for both modes for (auto&& mode : {modeZero, modeOne}) { - auto gc = (*p_x0_given_x1_m)(mode); - auto [gf, scalar] = (*phi_x1_m)(mode); + const auto gc = (*p_x0_given_x1_m)(mode); + const auto [gf, scalar] = (*phi_x1_m)(mode); // The error of the original factors should equal the sum of errors of the conditional and // remaining factor, modulo the normalization constant of the conditional. @@ -332,12 +337,41 @@ TEST(HybridBayesNet, Switching) { } // Now test full elimination of the graph: - auto posterior = graph.eliminateSequential(); - CHECK(posterior); + auto hybridBayesNet = graph.eliminateSequential(); + CHECK(hybridBayesNet); // Check that the posterior P(M|X=continuousValues) from the Bayes net is the same as the // same posterior from the graph. This is a sanity check that the elimination is done correctly. - AlgebraicDecisionTree bnPosterior = graph.discretePosterior(continuousValues); + AlgebraicDecisionTree bnPosterior = hybridBayesNet->discretePosterior(continuousValues); + EXPECT(assert_equal(graphPosterior, bnPosterior)); +} + +/* ****************************************************************************/ +// Test subset of API for switching network with 3 states. +// None of these are regression tests. +TEST(HybridGaussianFactorGraph, ErrorAndProbPrime) { + // Create switching network with three continuous variables and two discrete: + // ϕ(x0) ϕ(x0,x1,m0) ϕ(x1,x2,m1) ϕ(x1;z1) ϕ(x2;z2) ϕ(m0) ϕ(m0,m1) + Switching s(3); + + // Check size of linearized factor graph + const HybridGaussianFactorGraph& graph = s.linearizedFactorGraph; + EXPECT_LONGS_EQUAL(7, graph.size()); + + // Eliminate the graph + const HybridBayesNet::shared_ptr hybridBayesNet = graph.eliminateSequential(); + + const HybridValues delta = hybridBayesNet->optimize(); + const double error = graph.error(delta); + + // Check that the probability prime is the exponential of the error + EXPECT(assert_equal(graph.probPrime(delta), exp(-error), 1e-7)); + + // Check that the posterior P(M|X=continuousValues) from the Bayes net is the same as the + // same posterior from the graph. This is a sanity check that the elimination is done correctly. + const AlgebraicDecisionTree graphPosterior = graph.discretePosterior(delta.continuous()); + const AlgebraicDecisionTree bnPosterior = + hybridBayesNet->discretePosterior(delta.continuous()); EXPECT(assert_equal(graphPosterior, bnPosterior)); } @@ -466,51 +500,6 @@ TEST(HybridGaussianFactorGraph, Conditionals) { EXPECT(assert_equal(expected_discrete, result.discrete())); } -/* ****************************************************************************/ -// Test hybrid gaussian factor graph error and unnormalized probabilities -TEST(HybridGaussianFactorGraph, ErrorAndProbPrime) { - Switching s(3); - - HybridGaussianFactorGraph graph = s.linearizedFactorGraph; - - HybridBayesNet::shared_ptr hybridBayesNet = graph.eliminateSequential(); - - const HybridValues delta = hybridBayesNet->optimize(); - const double error = graph.error(delta); - - // regression - EXPECT(assert_equal(1.58886, error, 1e-5)); - - // Real test: - EXPECT(assert_equal(graph.probPrime(delta), exp(-error), 1e-7)); -} - -/* ****************************************************************************/ -// Test hybrid gaussian factor graph error and unnormalized probabilities -TEST(HybridGaussianFactorGraph, ErrorAndProbPrimeTree) { - // Create switching network with three continuous variables and two discrete: - // ϕ(x0) ϕ(x0,x1,m0) ϕ(x1,x2,m1) ϕ(x0;z0) ϕ(x1;z1) ϕ(x2;z2) ϕ(m0) ϕ(m0,m1) - Switching s(3); - - const HybridGaussianFactorGraph& graph = s.linearizedFactorGraph; - - const HybridBayesNet::shared_ptr hybridBayesNet = graph.eliminateSequential(); - - const HybridValues delta = hybridBayesNet->optimize(); - - // regression test for errorTree - std::vector leaves = {2.7916153, 1.5888555, 1.7233422, 1.6191947}; - AlgebraicDecisionTree expectedErrors(s.modes, leaves); - const auto error_tree = graph.errorTree(delta.continuous()); - EXPECT(assert_equal(expectedErrors, error_tree, 1e-7)); - - // regression test for discretePosterior - const AlgebraicDecisionTree expectedPosterior( - s.modes, std::vector{0.095516068, 0.31800092, 0.27798511, 0.3084979}); - auto posterior = graph.discretePosterior(delta.continuous()); - EXPECT(assert_equal(expectedPosterior, posterior, 1e-7)); -} - /* ****************************************************************************/ // Test hybrid gaussian factor graph errorTree during incremental operation TEST(HybridGaussianFactorGraph, IncrementalErrorTree) { @@ -528,15 +517,11 @@ TEST(HybridGaussianFactorGraph, IncrementalErrorTree) { HybridBayesNet::shared_ptr hybridBayesNet = graph.eliminateSequential(); EXPECT_LONGS_EQUAL(5, hybridBayesNet->size()); + // Check discrete posterior at optimum HybridValues delta = hybridBayesNet->optimize(); - auto error_tree = graph.errorTree(delta.continuous()); - - std::vector discrete_keys = {m0, m1}; - std::vector leaves = {2.7916153, 1.5888555, 1.7233422, 1.6191947}; - AlgebraicDecisionTree expected_error(discrete_keys, leaves); - - // regression - EXPECT(assert_equal(expected_error, error_tree, 1e-7)); + AlgebraicDecisionTree graphPosterior = graph.discretePosterior(delta.continuous()); + AlgebraicDecisionTree bnPosterior = hybridBayesNet->discretePosterior(delta.continuous()); + EXPECT(assert_equal(graphPosterior, bnPosterior)); graph = HybridGaussianFactorGraph(); graph.push_back(*hybridBayesNet); @@ -547,13 +532,9 @@ TEST(HybridGaussianFactorGraph, IncrementalErrorTree) { EXPECT_LONGS_EQUAL(7, hybridBayesNet->size()); delta = hybridBayesNet->optimize(); - auto error_tree2 = graph.errorTree(delta.continuous()); - - // regression - leaves = { - 0.50985198, 0.0097577296, 0.50009425, 0, 0.52922138, 0.029127133, 0.50985105, 0.0097567964}; - AlgebraicDecisionTree expected_error2(s.modes, leaves); - EXPECT(assert_equal(expected_error, error_tree, 1e-7)); + graphPosterior = graph.discretePosterior(delta.continuous()); + bnPosterior = hybridBayesNet->discretePosterior(delta.continuous()); + EXPECT(assert_equal(graphPosterior, bnPosterior)); } /* ****************************************************************************/ From bcd94e32a300efbcbeed1360f37a38482f653ecc Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 8 Oct 2024 16:14:56 +0900 Subject: [PATCH 060/204] Store negLogConstant[i] - negLogConstant_ --- gtsam/hybrid/HybridGaussianConditional.cpp | 33 ++++-- .../tests/testHybridGaussianConditional.cpp | 106 ++++++++---------- 2 files changed, 66 insertions(+), 73 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index ba2c34414..4147af8cb 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -33,6 +33,15 @@ namespace gtsam { /* *******************************************************************************/ +/** + * @brief Helper struct for constructing HybridGaussianConditional objects + * + * This struct contains the following fields: + * - nrFrontals: Optional size_t for number of frontal variables + * - pairs: FactorValuePairs for storing conditionals with their negLogConstant + * - conditionals: Conditionals for storing conditionals. TODO(frank): kill! + * - minNegLogConstant: minimum negLogConstant, computed here, subtracted in constructor + */ struct HybridGaussianConditional::Helper { std::optional nrFrontals; FactorValuePairs pairs; @@ -67,16 +76,12 @@ struct HybridGaussianConditional::Helper { /// Construct from tree of GaussianConditionals. explicit Helper(const Conditionals& conditionals) : conditionals(conditionals), minNegLogConstant(std::numeric_limits::infinity()) { - auto func = [this](const GC::shared_ptr& c) -> GaussianFactorValuePair { - double value = 0.0; - if (c) { - if (!nrFrontals.has_value()) { - nrFrontals = c->nrFrontals(); - } - value = c->negLogConstant(); - minNegLogConstant = std::min(minNegLogConstant, value); - } - return {std::dynamic_pointer_cast(c), value}; + auto func = [this](const GC::shared_ptr& gc) -> GaussianFactorValuePair { + if (!gc) return {nullptr, std::numeric_limits::infinity()}; + if (!nrFrontals) nrFrontals = gc->nrFrontals(); + double value = gc->negLogConstant(); + minNegLogConstant = std::min(minNegLogConstant, value); + return {gc, value}; }; pairs = FactorValuePairs(conditionals, func); if (!nrFrontals.has_value()) { @@ -90,7 +95,13 @@ struct HybridGaussianConditional::Helper { /* *******************************************************************************/ HybridGaussianConditional::HybridGaussianConditional(const DiscreteKeys& discreteParents, const Helper& helper) - : BaseFactor(discreteParents, helper.pairs), + : BaseFactor( + discreteParents, + FactorValuePairs( + helper.pairs, + [&](const GaussianFactorValuePair& pair) { // subtract minNegLogConstant + return GaussianFactorValuePair{pair.first, pair.second - helper.minNegLogConstant}; + })), BaseConditional(*helper.nrFrontals), conditionals_(helper.conditionals), negLogConstant_(helper.minNegLogConstant) {} diff --git a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp index 05f7c6c61..5631ac321 100644 --- a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp @@ -18,6 +18,8 @@ * @date December 2021 */ +#include +#include #include #include #include @@ -28,9 +30,6 @@ #include #include -#include "gtsam/discrete/DecisionTree.h" -#include "gtsam/discrete/DiscreteKey.h" - // Include for test suite #include @@ -52,10 +51,8 @@ namespace equal_constants { // Create a simple HybridGaussianConditional const double commonSigma = 2.0; const std::vector conditionals{ - GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), - commonSigma), - GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), - commonSigma)}; + GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), commonSigma), + GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), commonSigma)}; const HybridGaussianConditional hybrid_conditional(mode, conditionals); } // namespace equal_constants @@ -80,8 +77,8 @@ TEST(HybridGaussianConditional, LogProbability) { using namespace equal_constants; for (size_t mode : {0, 1}) { const HybridValues hv{vv, {{M(0), mode}}}; - EXPECT_DOUBLES_EQUAL(conditionals[mode]->logProbability(vv), - hybrid_conditional.logProbability(hv), 1e-8); + EXPECT_DOUBLES_EQUAL( + conditionals[mode]->logProbability(vv), hybrid_conditional.logProbability(hv), 1e-8); } } @@ -93,8 +90,7 @@ TEST(HybridGaussianConditional, Error) { // Check result. DiscreteKeys discrete_keys{mode}; - std::vector leaves = {conditionals[0]->error(vv), - conditionals[1]->error(vv)}; + std::vector leaves = {conditionals[0]->error(vv), conditionals[1]->error(vv)}; AlgebraicDecisionTree expected(discrete_keys, leaves); EXPECT(assert_equal(expected, actual, 1e-6)); @@ -102,8 +98,7 @@ TEST(HybridGaussianConditional, Error) { // Check for non-tree version. for (size_t mode : {0, 1}) { const HybridValues hv{vv, {{M(0), mode}}}; - EXPECT_DOUBLES_EQUAL(conditionals[mode]->error(vv), - hybrid_conditional.error(hv), 1e-8); + EXPECT_DOUBLES_EQUAL(conditionals[mode]->error(vv), hybrid_conditional.error(hv), 1e-8); } } @@ -118,10 +113,8 @@ TEST(HybridGaussianConditional, Likelihood) { // Check that the hybrid conditional error and the likelihood error are the // same. - EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv0), likelihood->error(hv0), - 1e-8); - EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv1), likelihood->error(hv1), - 1e-8); + EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv0), likelihood->error(hv0), 1e-8); + EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv1), likelihood->error(hv1), 1e-8); // Check that likelihood error is as expected, i.e., just the errors of the // individual likelihoods, in the `equal_constants` case. @@ -135,8 +128,7 @@ TEST(HybridGaussianConditional, Likelihood) { std::vector ratio(2); for (size_t mode : {0, 1}) { const HybridValues hv{vv, {{M(0), mode}}}; - ratio[mode] = - std::exp(-likelihood->error(hv)) / hybrid_conditional.evaluate(hv); + ratio[mode] = std::exp(-likelihood->error(hv)) / hybrid_conditional.evaluate(hv); } EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); } @@ -146,10 +138,8 @@ namespace mode_dependent_constants { // Create a HybridGaussianConditional with mode-dependent noise models. // 0 is low-noise, 1 is high-noise. const std::vector conditionals{ - GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), - 0.5), - GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), - 3.0)}; + GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), 0.5), + GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), 3.0)}; const HybridGaussianConditional hybrid_conditional(mode, conditionals); } // namespace mode_dependent_constants @@ -181,9 +171,8 @@ TEST(HybridGaussianConditional, Error2) { // Expected error is e(X) + log(sqrt(|2πΣ|)). // We normalize log(sqrt(|2πΣ|)) with min(negLogConstant) // so it is non-negative. - std::vector leaves = { - conditionals[0]->error(vv) + negLogConstant0 - minErrorConstant, - conditionals[1]->error(vv) + negLogConstant1 - minErrorConstant}; + std::vector leaves = {conditionals[0]->error(vv) + negLogConstant0 - minErrorConstant, + conditionals[1]->error(vv) + negLogConstant1 - minErrorConstant}; AlgebraicDecisionTree expected(discrete_keys, leaves); EXPECT(assert_equal(expected, actual, 1e-6)); @@ -191,10 +180,10 @@ TEST(HybridGaussianConditional, Error2) { // Check for non-tree version. for (size_t mode : {0, 1}) { const HybridValues hv{vv, {{M(0), mode}}}; - EXPECT_DOUBLES_EQUAL(conditionals[mode]->error(vv) + - conditionals[mode]->negLogConstant() - - minErrorConstant, - hybrid_conditional.error(hv), 1e-8); + EXPECT_DOUBLES_EQUAL( + conditionals[mode]->error(vv) + conditionals[mode]->negLogConstant() - minErrorConstant, + hybrid_conditional.error(hv), + 1e-8); } } @@ -209,10 +198,8 @@ TEST(HybridGaussianConditional, Likelihood2) { // Check that the hybrid conditional error and the likelihood error are as // expected, this invariant is the same as the equal noise case: - EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv0), likelihood->error(hv0), - 1e-8); - EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv1), likelihood->error(hv1), - 1e-8); + EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv0), likelihood->error(hv0), 1e-8); + EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv1), likelihood->error(hv1), 1e-8); // Check the detailed JacobianFactor calculation for mode==1. { @@ -221,34 +208,18 @@ TEST(HybridGaussianConditional, Likelihood2) { const auto jf1 = std::dynamic_pointer_cast(gf1); CHECK(jf1); - // It has 2 rows, not 1! - CHECK(jf1->rows() == 2); - - // Check that the constant C1 is properly encoded in the JacobianFactor. - const double C1 = - conditionals[1]->negLogConstant() - hybrid_conditional.negLogConstant(); - const double c1 = std::sqrt(2.0 * C1); - Vector expected_unwhitened(2); - expected_unwhitened << 4.9 - 5.0, -c1; - Vector actual_unwhitened = jf1->unweighted_error(vv); - EXPECT(assert_equal(expected_unwhitened, actual_unwhitened)); - - // Make sure the noise model does not touch it. - Vector expected_whitened(2); - expected_whitened << (4.9 - 5.0) / 3.0, -c1; - Vector actual_whitened = jf1->error_vector(vv); - EXPECT(assert_equal(expected_whitened, actual_whitened)); - - // Check that the error is equal to the conditional error: - EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv1), jf1->error(hv1), 1e-8); + // Check that the JacobianFactor error with constants is equal to the conditional error: + EXPECT_DOUBLES_EQUAL( + hybrid_conditional.error(hv1), + jf1->error(hv1) + conditionals[1]->negLogConstant() - hybrid_conditional.negLogConstant(), + 1e-8); } // Check that the ratio of probPrime to evaluate is the same for all modes. std::vector ratio(2); for (size_t mode : {0, 1}) { const HybridValues hv{vv, {{M(0), mode}}}; - ratio[mode] = - std::exp(-likelihood->error(hv)) / hybrid_conditional.evaluate(hv); + ratio[mode] = std::exp(-likelihood->error(hv)) / hybrid_conditional.evaluate(hv); } EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); } @@ -261,8 +232,7 @@ TEST(HybridGaussianConditional, Prune) { DiscreteKeys modes{{M(1), 2}, {M(2), 2}}; std::vector gcs; for (size_t i = 0; i < 4; i++) { - gcs.push_back( - GaussianConditional::sharedMeanAndStddev(Z(0), Vector1(i + 1), i + 1)); + gcs.push_back(GaussianConditional::sharedMeanAndStddev(Z(0), Vector1(i + 1), i + 1)); } auto empty = std::make_shared(); HybridGaussianConditional::Conditionals conditionals(modes, gcs); @@ -282,8 +252,14 @@ TEST(HybridGaussianConditional, Prune) { } } { - const std::vector potentials{0, 0, 0.5, 0, // - 0, 0, 0.5, 0}; + const std::vector potentials{0, + 0, + 0.5, + 0, // + 0, + 0, + 0.5, + 0}; const DecisionTreeFactor decisionTreeFactor(keys, potentials); const auto pruned = hgc.prune(decisionTreeFactor); @@ -292,8 +268,14 @@ TEST(HybridGaussianConditional, Prune) { EXPECT_LONGS_EQUAL(2, pruned->nrComponents()); } { - const std::vector potentials{0.2, 0, 0.3, 0, // - 0, 0, 0.5, 0}; + const std::vector potentials{0.2, + 0, + 0.3, + 0, // + 0, + 0, + 0.5, + 0}; const DecisionTreeFactor decisionTreeFactor(keys, potentials); const auto pruned = hgc.prune(decisionTreeFactor); From 0f48efb0a9d8949af1ac1c0c5058f4becba111ee Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 8 Oct 2024 16:34:50 +0900 Subject: [PATCH 061/204] asProductFactor from base class! --- gtsam/hybrid/HybridGaussianConditional.cpp | 30 ------- gtsam/hybrid/HybridGaussianConditional.h | 3 - gtsam/hybrid/tests/testHybridMotionModel.cpp | 90 ++++++++++---------- 3 files changed, 45 insertions(+), 78 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index 4147af8cb..c3c1b893e 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -150,36 +150,6 @@ const HybridGaussianConditional::Conditionals& HybridGaussianConditional::condit return conditionals_; } -/* *******************************************************************************/ -HybridGaussianProductFactor HybridGaussianConditional::asProductFactor() const { - auto wrap = [this](const std::shared_ptr& gc) - -> std::pair { - // First check if conditional has not been pruned - if (gc) { - const double Cgm_Kgcm = gc->negLogConstant() - this->negLogConstant_; - // If there is a difference in the covariances, we need to account for - // that since the error is dependent on the mode. - if (Cgm_Kgcm > 0.0) { - // We add a constant factor which will be used when computing - // the probability of the discrete variables. - Vector c(1); - c << std::sqrt(2.0 * Cgm_Kgcm); - auto constantFactor = std::make_shared(c); - return {GaussianFactorGraph{gc, constantFactor}, Cgm_Kgcm}; - } else { - // The scalar can be zero. - // TODO(Frank): after hiding is gone, this should be only case here. - return {GaussianFactorGraph{gc}, Cgm_Kgcm}; - } - } else { - // If the conditional is pruned, return an empty GaussianFactorGraph with zero scalar sum - // TODO(Frank): Could we just return an *empty* GaussianFactorGraph? - return {GaussianFactorGraph{nullptr}, 0.0}; - } - }; - return {{conditionals_, wrap}}; -} - /* *******************************************************************************/ size_t HybridGaussianConditional::nrComponents() const { size_t total = 0; diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index 3e6574abc..25391cb83 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -222,9 +222,6 @@ class GTSAM_EXPORT HybridGaussianConditional HybridGaussianConditional::shared_ptr prune( const DecisionTreeFactor &discreteProbs) const; - /// Convert to a DecisionTree of Gaussian factor graphs. - HybridGaussianProductFactor asProductFactor() const override; - /// @} private: diff --git a/gtsam/hybrid/tests/testHybridMotionModel.cpp b/gtsam/hybrid/tests/testHybridMotionModel.cpp index cbfdc7fe4..bcffbe628 100644 --- a/gtsam/hybrid/tests/testHybridMotionModel.cpp +++ b/gtsam/hybrid/tests/testHybridMotionModel.cpp @@ -46,25 +46,25 @@ using symbol_shorthand::Z; DiscreteKey m1(M(1), 2); -void addMeasurement(HybridBayesNet &hbn, Key z_key, Key x_key, double sigma) { +void addMeasurement(HybridBayesNet& hbn, Key z_key, Key x_key, double sigma) { auto measurement_model = noiseModel::Isotropic::Sigma(1, sigma); - hbn.emplace_shared(z_key, Vector1(0.0), I_1x1, x_key, - -I_1x1, measurement_model); + hbn.emplace_shared( + z_key, Vector1(0.0), I_1x1, x_key, -I_1x1, measurement_model); } /// Create hybrid motion model p(x1 | x0, m1) -static HybridGaussianConditional::shared_ptr CreateHybridMotionModel( - double mu0, double mu1, double sigma0, double sigma1) { +static HybridGaussianConditional::shared_ptr CreateHybridMotionModel(double mu0, + double mu1, + double sigma0, + double sigma1) { std::vector> motionModels{{Vector1(mu0), sigma0}, {Vector1(mu1), sigma1}}; - return std::make_shared(m1, X(1), I_1x1, X(0), - motionModels); + return std::make_shared(m1, X(1), I_1x1, X(0), motionModels); } /// Create two state Bayes network with 1 or two measurement models -HybridBayesNet CreateBayesNet( - const HybridGaussianConditional::shared_ptr &hybridMotionModel, - bool add_second_measurement = false) { +HybridBayesNet CreateBayesNet(const HybridGaussianConditional::shared_ptr& hybridMotionModel, + bool add_second_measurement = false) { HybridBayesNet hbn; // Add measurement model p(z0 | x0) @@ -86,15 +86,16 @@ HybridBayesNet CreateBayesNet( /// Approximate the discrete marginal P(m1) using importance sampling std::pair approximateDiscreteMarginal( - const HybridBayesNet &hbn, - const HybridGaussianConditional::shared_ptr &hybridMotionModel, - const VectorValues &given, size_t N = 100000) { + const HybridBayesNet& hbn, + const HybridGaussianConditional::shared_ptr& hybridMotionModel, + const VectorValues& given, + size_t N = 100000) { /// Create importance sampling network q(x0,x1,m) = p(x1|x0,m1) q(x0) P(m1), /// using q(x0) = N(z0, sigmaQ) to sample x0. HybridBayesNet q; q.push_back(hybridMotionModel); // Add hybrid motion model q.emplace_shared(GaussianConditional::FromMeanAndStddev( - X(0), given.at(Z(0)), /* sigmaQ = */ 3.0)); // Add proposal q(x0) for x0 + X(0), given.at(Z(0)), /* sigmaQ = */ 3.0)); // Add proposal q(x0) for x0 q.emplace_shared(m1, "50/50"); // Discrete prior. // Do importance sampling @@ -192,24 +193,25 @@ TEST(HybridGaussianFactor, TwoStateModel2) { HybridBayesNet hbn = CreateBayesNet(hybridMotionModel); HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given); - // Check that ratio of Bayes net and factor graph for different modes is - // equal for several values of {x0,x1}. - for (VectorValues vv : - {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, - VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { - vv.insert(given); // add measurements for HBN - HybridValues hv0(vv, {{M(1), 0}}), hv1(vv, {{M(1), 1}}); - EXPECT_DOUBLES_EQUAL(gfg.error(hv0) / hbn.error(hv0), - gfg.error(hv1) / hbn.error(hv1), 1e-9); - } + HybridBayesNet::shared_ptr eliminated = gfg.eliminateSequential(); - HybridBayesNet::shared_ptr bn = gfg.eliminateSequential(); + for (VectorValues vv : {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, + VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { + vv.insert(given); // add measurements for HBN + const auto& expectedDiscretePosterior = hbn.discretePosterior(vv); + + // Equality of posteriors asserts that the factor graph is correct (same ratios for all modes) + EXPECT(assert_equal(expectedDiscretePosterior, gfg.discretePosterior(vv))); + + // This one asserts that HBN resulting from elimination is correct. + EXPECT(assert_equal(expectedDiscretePosterior, eliminated->discretePosterior(vv))); + } // Importance sampling run with 100k samples gives 50.095/49.905 // approximateDiscreteMarginal(hbn, hybridMotionModel, given); // Since no measurement on x1, we a 50/50 probability - auto p_m = bn->at(2)->asDiscrete(); + auto p_m = eliminated->at(2)->asDiscrete(); EXPECT_DOUBLES_EQUAL(0.5, p_m->operator()({{M(1), 0}}), 1e-9); EXPECT_DOUBLES_EQUAL(0.5, p_m->operator()({{M(1), 1}}), 1e-9); } @@ -221,24 +223,26 @@ TEST(HybridGaussianFactor, TwoStateModel2) { HybridBayesNet hbn = CreateBayesNet(hybridMotionModel, true); HybridGaussianFactorGraph gfg = hbn.toFactorGraph(given); + HybridBayesNet::shared_ptr eliminated = gfg.eliminateSequential(); // Check that ratio of Bayes net and factor graph for different modes is // equal for several values of {x0,x1}. - for (VectorValues vv : - {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, - VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { + for (VectorValues vv : {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, + VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { vv.insert(given); // add measurements for HBN - HybridValues hv0(vv, {{M(1), 0}}), hv1(vv, {{M(1), 1}}); - EXPECT_DOUBLES_EQUAL(gfg.error(hv0) / hbn.error(hv0), - gfg.error(hv1) / hbn.error(hv1), 1e-9); - } + const auto& expectedDiscretePosterior = hbn.discretePosterior(vv); - HybridBayesNet::shared_ptr bn = gfg.eliminateSequential(); + // Equality of posteriors asserts that the factor graph is correct (same ratios for all modes) + EXPECT(assert_equal(expectedDiscretePosterior, gfg.discretePosterior(vv))); + + // This one asserts that HBN resulting from elimination is correct. + EXPECT(assert_equal(expectedDiscretePosterior, eliminated->discretePosterior(vv))); + } // Values taken from an importance sampling run with 100k samples: // approximateDiscreteMarginal(hbn, hybridMotionModel, given); DiscreteConditional expected(m1, "48.3158/51.6842"); - EXPECT(assert_equal(expected, *(bn->at(2)->asDiscrete()), 0.002)); + EXPECT(assert_equal(expected, *(eliminated->at(2)->asDiscrete()), 0.002)); } { @@ -286,13 +290,11 @@ TEST(HybridGaussianFactor, TwoStateModel3) { // Check that ratio of Bayes net and factor graph for different modes is // equal for several values of {x0,x1}. - for (VectorValues vv : - {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, - VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { + for (VectorValues vv : {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, + VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { vv.insert(given); // add measurements for HBN HybridValues hv0(vv, {{M(1), 0}}), hv1(vv, {{M(1), 1}}); - EXPECT_DOUBLES_EQUAL(gfg.error(hv0) / hbn.error(hv0), - gfg.error(hv1) / hbn.error(hv1), 1e-9); + EXPECT_DOUBLES_EQUAL(gfg.error(hv0) / hbn.error(hv0), gfg.error(hv1) / hbn.error(hv1), 1e-9); } HybridBayesNet::shared_ptr bn = gfg.eliminateSequential(); @@ -316,13 +318,11 @@ TEST(HybridGaussianFactor, TwoStateModel3) { // Check that ratio of Bayes net and factor graph for different modes is // equal for several values of {x0,x1}. - for (VectorValues vv : - {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, - VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { + for (VectorValues vv : {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, + VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { vv.insert(given); // add measurements for HBN HybridValues hv0(vv, {{M(1), 0}}), hv1(vv, {{M(1), 1}}); - EXPECT_DOUBLES_EQUAL(gfg.error(hv0) / hbn.error(hv0), - gfg.error(hv1) / hbn.error(hv1), 1e-9); + EXPECT_DOUBLES_EQUAL(gfg.error(hv0) / hbn.error(hv0), gfg.error(hv1) / hbn.error(hv1), 1e-9); } HybridBayesNet::shared_ptr bn = gfg.eliminateSequential(); From 524161403a3d626c41f8f797bc74b596e128354e Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 8 Oct 2024 17:09:47 +0900 Subject: [PATCH 062/204] Fixed discreteEimination --- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 6 +-- gtsam/hybrid/tests/testGaussianMixture.cpp | 54 +++++++++++---------- gtsam/hybrid/tests/testHybridEstimation.cpp | 2 - 3 files changed, 31 insertions(+), 31 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index c2d84fdb8..f6b3a9eb7 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -229,12 +229,12 @@ static std::pair> discret // In this case, compute discrete probabilities. // TODO(frank): What about the scalar!? auto potential = [&](const auto& pair) -> double { - auto [factor, _] = pair; + auto [factor, scalar] = pair; // If the factor is null, it has been pruned, hence return potential of zero if (!factor) - return 0; + return 0.0; else - return exp(-factor->error(kEmpty)); + return exp(-scalar - factor->error(kEmpty)); }; DecisionTree potentials(gmf->factors(), potential); dfg.emplace_shared(gmf->discreteKeys(), potentials); diff --git a/gtsam/hybrid/tests/testGaussianMixture.cpp b/gtsam/hybrid/tests/testGaussianMixture.cpp index 3256f5686..6bd9a22a8 100644 --- a/gtsam/hybrid/tests/testGaussianMixture.cpp +++ b/gtsam/hybrid/tests/testGaussianMixture.cpp @@ -40,8 +40,7 @@ const DiscreteKey m(M(0), 2); const DiscreteValues m1Assignment{{M(0), 1}}; // Define a 50/50 prior on the mode -DiscreteConditional::shared_ptr mixing = - std::make_shared(m, "60/40"); +DiscreteConditional::shared_ptr mixing = std::make_shared(m, "60/40"); /// Gaussian density function double Gaussian(double mu, double sigma, double z) { @@ -53,24 +52,12 @@ double Gaussian(double mu, double sigma, double z) { * If sigma0 == sigma1, it simplifies to a sigmoid function. * Hardcodes 60/40 prior on mode. */ -double prob_m_z(double mu0, double mu1, double sigma0, double sigma1, - double z) { +double prob_m_z(double mu0, double mu1, double sigma0, double sigma1, double z) { const double p0 = 0.6 * Gaussian(mu0, sigma0, z); const double p1 = 0.4 * Gaussian(mu1, sigma1, z); return p1 / (p0 + p1); }; -/// Given \phi(m;z)\phi(m) use eliminate to obtain P(m|z). -DiscreteConditional SolveHFG(const HybridGaussianFactorGraph &hfg) { - return *hfg.eliminateSequential()->at(0)->asDiscrete(); -} - -/// Given p(z,m) and z, convert to HFG and solve. -DiscreteConditional SolveHBN(const HybridBayesNet &hbn, double z) { - VectorValues given{{Z(0), Vector1(z)}}; - return SolveHFG(hbn.toFactorGraph(given)); -} - /* * Test a Gaussian Mixture Model P(m)p(z|m) with same sigma. * The posterior, as a function of z, should be a sigmoid function. @@ -81,14 +68,14 @@ TEST(GaussianMixture, GaussianMixtureModel) { // Create a Gaussian mixture model p(z|m) with same sigma. HybridBayesNet gmm; - std::vector> parameters{{Vector1(mu0), sigma}, - {Vector1(mu1), sigma}}; + std::vector> parameters{{Vector1(mu0), sigma}, {Vector1(mu1), sigma}}; gmm.emplace_shared(m, Z(0), parameters); gmm.push_back(mixing); // At the halfway point between the means, we should get P(m|z)=0.5 double midway = mu1 - mu0; - auto pMid = SolveHBN(gmm, midway); + auto eliminationResult = gmm.toFactorGraph({{Z(0), Vector1(midway)}}).eliminateSequential(); + auto pMid = *eliminationResult->at(0)->asDiscrete(); EXPECT(assert_equal(DiscreteConditional(m, "60/40"), pMid)); // Everywhere else, the result should be a sigmoid. @@ -97,7 +84,8 @@ TEST(GaussianMixture, GaussianMixtureModel) { const double expected = prob_m_z(mu0, mu1, sigma, sigma, z); // Workflow 1: convert HBN to HFG and solve - auto posterior1 = SolveHBN(gmm, z); + auto eliminationResult1 = gmm.toFactorGraph({{Z(0), Vector1(z)}}).eliminateSequential(); + auto posterior1 = *eliminationResult1->at(0)->asDiscrete(); EXPECT_DOUBLES_EQUAL(expected, posterior1(m1Assignment), 1e-8); // Workflow 2: directly specify HFG and solve @@ -105,7 +93,8 @@ TEST(GaussianMixture, GaussianMixtureModel) { hfg1.emplace_shared( m, std::vector{Gaussian(mu0, sigma, z), Gaussian(mu1, sigma, z)}); hfg1.push_back(mixing); - auto posterior2 = SolveHFG(hfg1); + auto eliminationResult2 = hfg1.eliminateSequential(); + auto posterior2 = *eliminationResult2->at(0)->asDiscrete(); EXPECT_DOUBLES_EQUAL(expected, posterior2(m1Assignment), 1e-8); } } @@ -120,15 +109,27 @@ TEST(GaussianMixture, GaussianMixtureModel2) { // Create a Gaussian mixture model p(z|m) with same sigma. HybridBayesNet gmm; - std::vector> parameters{{Vector1(mu0), sigma0}, - {Vector1(mu1), sigma1}}; + std::vector> parameters{{Vector1(mu0), sigma0}, {Vector1(mu1), sigma1}}; gmm.emplace_shared(m, Z(0), parameters); gmm.push_back(mixing); // We get zMax=3.1333 by finding the maximum value of the function, at which // point the mode m==1 is about twice as probable as m==0. double zMax = 3.133; - auto pMax = SolveHBN(gmm, zMax); + const VectorValues vv{{Z(0), Vector1(zMax)}}; + auto gfg = gmm.toFactorGraph(vv); + + // Equality of posteriors asserts that the elimination is correct (same ratios for all modes) + const auto& expectedDiscretePosterior = gmm.discretePosterior(vv); + EXPECT(assert_equal(expectedDiscretePosterior, gfg.discretePosterior(vv))); + + // Eliminate the graph! + auto eliminationResultMax = gfg.eliminateSequential(); + + // Equality of posteriors asserts that the elimination is correct (same ratios for all modes) + EXPECT(assert_equal(expectedDiscretePosterior, eliminationResultMax->discretePosterior(vv))); + + auto pMax = *eliminationResultMax->at(0)->asDiscrete(); EXPECT(assert_equal(DiscreteConditional(m, "42/58"), pMax, 1e-4)); // Everywhere else, the result should be a bell curve like function. @@ -137,7 +138,8 @@ TEST(GaussianMixture, GaussianMixtureModel2) { const double expected = prob_m_z(mu0, mu1, sigma0, sigma1, z); // Workflow 1: convert HBN to HFG and solve - auto posterior1 = SolveHBN(gmm, z); + auto eliminationResult1 = gmm.toFactorGraph({{Z(0), Vector1(z)}}).eliminateSequential(); + auto posterior1 = *eliminationResult1->at(0)->asDiscrete(); EXPECT_DOUBLES_EQUAL(expected, posterior1(m1Assignment), 1e-8); // Workflow 2: directly specify HFG and solve @@ -145,11 +147,11 @@ TEST(GaussianMixture, GaussianMixtureModel2) { hfg.emplace_shared( m, std::vector{Gaussian(mu0, sigma0, z), Gaussian(mu1, sigma1, z)}); hfg.push_back(mixing); - auto posterior2 = SolveHFG(hfg); + auto eliminationResult2 = hfg.eliminateSequential(); + auto posterior2 = *eliminationResult2->at(0)->asDiscrete(); EXPECT_DOUBLES_EQUAL(expected, posterior2(m1Assignment), 1e-8); } } - /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/hybrid/tests/testHybridEstimation.cpp b/gtsam/hybrid/tests/testHybridEstimation.cpp index 88d8be0bc..b2a909ac3 100644 --- a/gtsam/hybrid/tests/testHybridEstimation.cpp +++ b/gtsam/hybrid/tests/testHybridEstimation.cpp @@ -518,8 +518,6 @@ TEST(HybridEstimation, CorrectnessViaSampling) { // the normalizing term computed via the Bayes net determinant. const HybridValues sample = bn->sample(&rng); double expected_ratio = compute_ratio(sample); - // regression - EXPECT_DOUBLES_EQUAL(0.728588, expected_ratio, 1e-6); // 3. Do sampling constexpr int num_samples = 10; From 9f7ccbb33c29a69459054ddfa0e61ca0abcd4526 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 8 Oct 2024 17:57:53 +0900 Subject: [PATCH 063/204] Minimize formatting changes --- gtsam/hybrid/HybridGaussianConditional.cpp | 154 +++++++++--------- gtsam/hybrid/HybridGaussianFactor.cpp | 56 ++++--- gtsam/hybrid/HybridGaussianFactor.h | 2 +- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 149 +++++++++-------- gtsam/hybrid/HybridGaussianProductFactor.cpp | 9 +- gtsam/hybrid/HybridGaussianProductFactor.h | 30 ++-- gtsam/hybrid/HybridNonlinearFactorGraph.cpp | 6 +- gtsam/hybrid/HybridNonlinearFactorGraph.h | 4 +- gtsam/hybrid/tests/testGaussianMixture.cpp | 30 ++-- gtsam/hybrid/tests/testHybridBayesNet.cpp | 88 ++++++---- gtsam/hybrid/tests/testHybridBayesTree.cpp | 46 +++--- gtsam/hybrid/tests/testHybridEstimation.cpp | 2 - .../tests/testHybridGaussianConditional.cpp | 87 +++++----- .../tests/testHybridGaussianFactorGraph.cpp | 150 +++++++++-------- .../tests/testHybridGaussianProductFactor.cpp | 7 +- gtsam/hybrid/tests/testHybridMotionModel.cpp | 73 +++++---- .../tests/testHybridNonlinearFactorGraph.cpp | 1 - 17 files changed, 509 insertions(+), 385 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index c3c1b893e..4dcb84034 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -40,7 +40,8 @@ namespace gtsam { * - nrFrontals: Optional size_t for number of frontal variables * - pairs: FactorValuePairs for storing conditionals with their negLogConstant * - conditionals: Conditionals for storing conditionals. TODO(frank): kill! - * - minNegLogConstant: minimum negLogConstant, computed here, subtracted in constructor + * - minNegLogConstant: minimum negLogConstant, computed here, subtracted in + * constructor */ struct HybridGaussianConditional::Helper { std::optional nrFrontals; @@ -53,7 +54,7 @@ struct HybridGaussianConditional::Helper { /// Construct from a vector of mean and sigma pairs, plus extra args. template - explicit Helper(const DiscreteKey& mode, const P& p, Args&&... args) { + explicit Helper(const DiscreteKey &mode, const P &p, Args &&...args) { nrFrontals = 1; minNegLogConstant = std::numeric_limits::infinity(); @@ -61,8 +62,9 @@ struct HybridGaussianConditional::Helper { std::vector gcs; fvs.reserve(p.size()); gcs.reserve(p.size()); - for (auto&& [mean, sigma] : p) { - auto gaussianConditional = GC::sharedMeanAndStddev(std::forward(args)..., mean, sigma); + for (auto &&[mean, sigma] : p) { + auto gaussianConditional = + GC::sharedMeanAndStddev(std::forward(args)..., mean, sigma); double value = gaussianConditional->negLogConstant(); minNegLogConstant = std::min(minNegLogConstant, value); fvs.emplace_back(gaussianConditional, value); @@ -74,8 +76,9 @@ struct HybridGaussianConditional::Helper { } /// Construct from tree of GaussianConditionals. - explicit Helper(const Conditionals& conditionals) - : conditionals(conditionals), minNegLogConstant(std::numeric_limits::infinity()) { + explicit Helper(const Conditionals &conditionals) + : conditionals(conditionals), + minNegLogConstant(std::numeric_limits::infinity()) { auto func = [this](const GC::shared_ptr& gc) -> GaussianFactorValuePair { if (!gc) return {nullptr, std::numeric_limits::infinity()}; if (!nrFrontals) nrFrontals = gc->nrFrontals(); @@ -93,67 +96,63 @@ struct HybridGaussianConditional::Helper { }; /* *******************************************************************************/ -HybridGaussianConditional::HybridGaussianConditional(const DiscreteKeys& discreteParents, - const Helper& helper) - : BaseFactor( - discreteParents, - FactorValuePairs( - helper.pairs, - [&](const GaussianFactorValuePair& pair) { // subtract minNegLogConstant - return GaussianFactorValuePair{pair.first, pair.second - helper.minNegLogConstant}; - })), +HybridGaussianConditional::HybridGaussianConditional( + const DiscreteKeys& discreteParents, const Helper& helper) + : BaseFactor(discreteParents, + FactorValuePairs(helper.pairs, + [&](const GaussianFactorValuePair& + pair) { // subtract minNegLogConstant + return GaussianFactorValuePair{ + pair.first, + pair.second - helper.minNegLogConstant}; + })), BaseConditional(*helper.nrFrontals), conditionals_(helper.conditionals), negLogConstant_(helper.minNegLogConstant) {} HybridGaussianConditional::HybridGaussianConditional( - const DiscreteKey& discreteParent, - const std::vector& conditionals) + const DiscreteKey &discreteParent, + const std::vector &conditionals) : HybridGaussianConditional(DiscreteKeys{discreteParent}, Conditionals({discreteParent}, conditionals)) {} HybridGaussianConditional::HybridGaussianConditional( - const DiscreteKey& discreteParent, - Key key, // - const std::vector>& parameters) + const DiscreteKey &discreteParent, Key key, // + const std::vector> ¶meters) : HybridGaussianConditional(DiscreteKeys{discreteParent}, Helper(discreteParent, parameters, key)) {} HybridGaussianConditional::HybridGaussianConditional( - const DiscreteKey& discreteParent, - Key key, // - const Matrix& A, - Key parent, - const std::vector>& parameters) - : HybridGaussianConditional(DiscreteKeys{discreteParent}, - Helper(discreteParent, parameters, key, A, parent)) {} + const DiscreteKey &discreteParent, Key key, // + const Matrix &A, Key parent, + const std::vector> ¶meters) + : HybridGaussianConditional( + DiscreteKeys{discreteParent}, + Helper(discreteParent, parameters, key, A, parent)) {} HybridGaussianConditional::HybridGaussianConditional( - const DiscreteKey& discreteParent, - Key key, // - const Matrix& A1, - Key parent1, - const Matrix& A2, - Key parent2, - const std::vector>& parameters) - : HybridGaussianConditional(DiscreteKeys{discreteParent}, - Helper(discreteParent, parameters, key, A1, parent1, A2, parent2)) { -} + const DiscreteKey &discreteParent, Key key, // + const Matrix &A1, Key parent1, const Matrix &A2, Key parent2, + const std::vector> ¶meters) + : HybridGaussianConditional( + DiscreteKeys{discreteParent}, + Helper(discreteParent, parameters, key, A1, parent1, A2, parent2)) {} HybridGaussianConditional::HybridGaussianConditional( - const DiscreteKeys& discreteParents, - const HybridGaussianConditional::Conditionals& conditionals) + const DiscreteKeys &discreteParents, + const HybridGaussianConditional::Conditionals &conditionals) : HybridGaussianConditional(discreteParents, Helper(conditionals)) {} /* *******************************************************************************/ -const HybridGaussianConditional::Conditionals& HybridGaussianConditional::conditionals() const { +const HybridGaussianConditional::Conditionals & +HybridGaussianConditional::conditionals() const { return conditionals_; } /* *******************************************************************************/ size_t HybridGaussianConditional::nrComponents() const { size_t total = 0; - conditionals_.visit([&total](const GaussianFactor::shared_ptr& node) { + conditionals_.visit([&total](const GaussianFactor::shared_ptr &node) { if (node) total += 1; }); return total; @@ -161,19 +160,21 @@ size_t HybridGaussianConditional::nrComponents() const { /* *******************************************************************************/ GaussianConditional::shared_ptr HybridGaussianConditional::choose( - const DiscreteValues& discreteValues) const { - auto& ptr = conditionals_(discreteValues); + const DiscreteValues &discreteValues) const { + auto &ptr = conditionals_(discreteValues); if (!ptr) return nullptr; auto conditional = std::dynamic_pointer_cast(ptr); if (conditional) return conditional; else - throw std::logic_error("A HybridGaussianConditional unexpectedly contained a non-conditional"); + throw std::logic_error( + "A HybridGaussianConditional unexpectedly contained a non-conditional"); } /* *******************************************************************************/ -bool HybridGaussianConditional::equals(const HybridFactor& lf, double tol) const { - const This* e = dynamic_cast(&lf); +bool HybridGaussianConditional::equals(const HybridFactor &lf, + double tol) const { + const This *e = dynamic_cast(&lf); if (e == nullptr) return false; // This will return false if either conditionals_ is empty or e->conditionals_ @@ -182,26 +183,27 @@ bool HybridGaussianConditional::equals(const HybridFactor& lf, double tol) const // Check the base and the factors: return BaseFactor::equals(*e, tol) && - conditionals_.equals(e->conditionals_, [tol](const auto& f1, const auto& f2) { - return (!f1 && !f2) || (f1 && f2 && f1->equals(*f2, tol)); - }); + conditionals_.equals( + e->conditionals_, [tol](const auto &f1, const auto &f2) { + return (!f1 && !f2) || (f1 && f2 && f1->equals(*f2, tol)); + }); } /* *******************************************************************************/ -void HybridGaussianConditional::print(const std::string& s, const KeyFormatter& formatter) const { +void HybridGaussianConditional::print(const std::string &s, + const KeyFormatter &formatter) const { std::cout << (s.empty() ? "" : s + "\n"); BaseConditional::print("", formatter); std::cout << " Discrete Keys = "; - for (auto& dk : discreteKeys()) { + for (auto &dk : discreteKeys()) { std::cout << "(" << formatter(dk.first) << ", " << dk.second << "), "; } std::cout << std::endl << " logNormalizationConstant: " << -negLogConstant() << std::endl << std::endl; conditionals_.print( - "", - [&](Key k) { return formatter(k); }, - [&](const GaussianConditional::shared_ptr& gf) -> std::string { + "", [&](Key k) { return formatter(k); }, + [&](const GaussianConditional::shared_ptr &gf) -> std::string { RedirectCout rd; if (gf && !gf->empty()) { gf->print("", formatter); @@ -218,19 +220,20 @@ KeyVector HybridGaussianConditional::continuousParents() const { const auto range = parents(); KeyVector continuousParentKeys(range.begin(), range.end()); // Loop over all discrete keys: - for (const auto& discreteKey : discreteKeys()) { + for (const auto &discreteKey : discreteKeys()) { const Key key = discreteKey.first; // remove that key from continuousParentKeys: - continuousParentKeys.erase( - std::remove(continuousParentKeys.begin(), continuousParentKeys.end(), key), + continuousParentKeys.erase(std::remove(continuousParentKeys.begin(), + continuousParentKeys.end(), key), continuousParentKeys.end()); } return continuousParentKeys; } /* ************************************************************************* */ -bool HybridGaussianConditional::allFrontalsGiven(const VectorValues& given) const { - for (auto&& kv : given) { +bool HybridGaussianConditional::allFrontalsGiven( + const VectorValues &given) const { + for (auto &&kv : given) { if (given.find(kv.first) == given.end()) { return false; } @@ -240,7 +243,7 @@ bool HybridGaussianConditional::allFrontalsGiven(const VectorValues& given) cons /* ************************************************************************* */ std::shared_ptr HybridGaussianConditional::likelihood( - const VectorValues& given) const { + const VectorValues &given) const { if (!allFrontalsGiven(given)) { throw std::runtime_error( "HybridGaussianConditional::likelihood: given values are missing some " @@ -251,29 +254,32 @@ std::shared_ptr HybridGaussianConditional::likelihood( const KeyVector continuousParentKeys = continuousParents(); const HybridGaussianFactor::FactorValuePairs likelihoods( conditionals_, - [&](const GaussianConditional::shared_ptr& conditional) -> GaussianFactorValuePair { + [&](const GaussianConditional::shared_ptr &conditional) + -> GaussianFactorValuePair { const auto likelihood_m = conditional->likelihood(given); const double Cgm_Kgcm = conditional->negLogConstant() - negLogConstant_; return {likelihood_m, Cgm_Kgcm}; }); - return std::make_shared(discreteParentKeys, likelihoods); + return std::make_shared(discreteParentKeys, + likelihoods); } /* ************************************************************************* */ -std::set DiscreteKeysAsSet(const DiscreteKeys& discreteKeys) { +std::set DiscreteKeysAsSet(const DiscreteKeys &discreteKeys) { std::set s(discreteKeys.begin(), discreteKeys.end()); return s; } /* *******************************************************************************/ HybridGaussianConditional::shared_ptr HybridGaussianConditional::prune( - const DecisionTreeFactor& discreteProbs) const { + const DecisionTreeFactor &discreteProbs) const { // Find keys in discreteProbs.keys() but not in this->keys(): std::set mine(this->keys().begin(), this->keys().end()); - std::set theirs(discreteProbs.keys().begin(), discreteProbs.keys().end()); + std::set theirs(discreteProbs.keys().begin(), + discreteProbs.keys().end()); std::vector diff; - std::set_difference( - theirs.begin(), theirs.end(), mine.begin(), mine.end(), std::back_inserter(diff)); + std::set_difference(theirs.begin(), theirs.end(), mine.begin(), mine.end(), + std::back_inserter(diff)); // Find maximum probability value for every combination of our keys. Ordering keys(diff); @@ -281,24 +287,26 @@ HybridGaussianConditional::shared_ptr HybridGaussianConditional::prune( // Check the max value for every combination of our keys. // If the max value is 0.0, we can prune the corresponding conditional. - auto pruner = - [&](const Assignment& choices, - const GaussianConditional::shared_ptr& conditional) -> GaussianConditional::shared_ptr { + auto pruner = [&](const Assignment &choices, + const GaussianConditional::shared_ptr &conditional) + -> GaussianConditional::shared_ptr { return (max->evaluate(choices) == 0.0) ? nullptr : conditional; }; auto pruned_conditionals = conditionals_.apply(pruner); - return std::make_shared(discreteKeys(), pruned_conditionals); + return std::make_shared(discreteKeys(), + pruned_conditionals); } /* *******************************************************************************/ -double HybridGaussianConditional::logProbability(const HybridValues& values) const { +double HybridGaussianConditional::logProbability( + const HybridValues &values) const { auto conditional = conditionals_(values.discrete()); return conditional->logProbability(values.continuous()); } /* *******************************************************************************/ -double HybridGaussianConditional::evaluate(const HybridValues& values) const { +double HybridGaussianConditional::evaluate(const HybridValues &values) const { auto conditional = conditionals_(values.discrete()); return conditional->evaluate(values.continuous()); } diff --git a/gtsam/hybrid/HybridGaussianFactor.cpp b/gtsam/hybrid/HybridGaussianFactor.cpp index 44a56910e..afc818c4a 100644 --- a/gtsam/hybrid/HybridGaussianFactor.cpp +++ b/gtsam/hybrid/HybridGaussianFactor.cpp @@ -50,12 +50,15 @@ struct HybridGaussianFactor::ConstructorHelper { } // Build the FactorValuePairs DecisionTree pairs = FactorValuePairs( - DecisionTree(discreteKeys, factors), [](const auto& f) { - return std::pair{f, f ? 0.0 : std::numeric_limits::infinity()}; + DecisionTree(discreteKeys, factors), + [](const auto& f) { + return std::pair{f, + f ? 0.0 : std::numeric_limits::infinity()}; }); } - /// Constructor for a single discrete key and a vector of GaussianFactorValuePairs + /// Constructor for a single discrete key and a vector of + /// GaussianFactorValuePairs ConstructorHelper(const DiscreteKey& discreteKey, const std::vector& factorPairs) : discreteKeys({discreteKey}) { @@ -71,8 +74,10 @@ struct HybridGaussianFactor::ConstructorHelper { pairs = FactorValuePairs(discreteKeys, factorPairs); } - /// Constructor for a vector of discrete keys and a vector of GaussianFactorValuePairs - ConstructorHelper(const DiscreteKeys& discreteKeys, const FactorValuePairs& factorPairs) + /// Constructor for a vector of discrete keys and a vector of + /// GaussianFactorValuePairs + ConstructorHelper(const DiscreteKeys& discreteKeys, + const FactorValuePairs& factorPairs) : discreteKeys(discreteKeys) { // Extract continuous keys from the first non-null factor // TODO: just stop after first non-null factor @@ -88,24 +93,27 @@ struct HybridGaussianFactor::ConstructorHelper { }; /* *******************************************************************************/ -HybridGaussianFactor::HybridGaussianFactor(const ConstructorHelper& helper) - : Base(helper.continuousKeys, helper.discreteKeys), factors_(helper.pairs) {} +HybridGaussianFactor::HybridGaussianFactor(const ConstructorHelper &helper) + : Base(helper.continuousKeys, helper.discreteKeys), + factors_(helper.pairs) {} HybridGaussianFactor::HybridGaussianFactor( - const DiscreteKey& discreteKey, const std::vector& factorPairs) + const DiscreteKey &discreteKey, + const std::vector &factors) + : HybridGaussianFactor(ConstructorHelper(discreteKey, factors)) {} + +HybridGaussianFactor::HybridGaussianFactor( + const DiscreteKey &discreteKey, + const std::vector &factorPairs) : HybridGaussianFactor(ConstructorHelper(discreteKey, factorPairs)) {} -HybridGaussianFactor::HybridGaussianFactor(const DiscreteKey& discreteKey, - const std::vector& factorPairs) - : HybridGaussianFactor(ConstructorHelper(discreteKey, factorPairs)) {} - -HybridGaussianFactor::HybridGaussianFactor(const DiscreteKeys& discreteKeys, - const FactorValuePairs& factorPairs) - : HybridGaussianFactor(ConstructorHelper(discreteKeys, factorPairs)) {} +HybridGaussianFactor::HybridGaussianFactor(const DiscreteKeys &discreteKeys, + const FactorValuePairs &factors) + : HybridGaussianFactor(ConstructorHelper(discreteKeys, factors)) {} /* *******************************************************************************/ -bool HybridGaussianFactor::equals(const HybridFactor& lf, double tol) const { - const This* e = dynamic_cast(&lf); +bool HybridGaussianFactor::equals(const HybridFactor &lf, double tol) const { + const This *e = dynamic_cast(&lf); if (e == nullptr) return false; // This will return false if either factors_ is empty or e->factors_ is @@ -122,7 +130,8 @@ bool HybridGaussianFactor::equals(const HybridFactor& lf, double tol) const { } /* *******************************************************************************/ -void HybridGaussianFactor::print(const std::string& s, const KeyFormatter& formatter) const { +void HybridGaussianFactor::print(const std::string &s, + const KeyFormatter &formatter) const { std::cout << (s.empty() ? "" : s + "\n"); HybridFactor::print("", formatter); std::cout << "{\n"; @@ -148,7 +157,8 @@ void HybridGaussianFactor::print(const std::string& s, const KeyFormatter& forma } /* *******************************************************************************/ -GaussianFactorValuePair HybridGaussianFactor::operator()(const DiscreteValues& assignment) const { +GaussianFactorValuePair HybridGaussianFactor::operator()( + const DiscreteValues &assignment) const { return factors_(assignment); } @@ -156,15 +166,17 @@ GaussianFactorValuePair HybridGaussianFactor::operator()(const DiscreteValues& a HybridGaussianProductFactor HybridGaussianFactor::asProductFactor() const { // Implemented by creating a new DecisionTree where: // - The structure (keys and assignments) is preserved from factors_ - // - Each leaf converted to a GaussianFactorGraph with just the factor and its scalar. - return {{factors_, [](const auto& pair) -> std::pair { + // - Each leaf converted to a GaussianFactorGraph with just the factor and its + // scalar. + return {{factors_, + [](const auto& pair) -> std::pair { return {GaussianFactorGraph{pair.first}, pair.second}; }}}; } /* *******************************************************************************/ AlgebraicDecisionTree HybridGaussianFactor::errorTree( - const VectorValues& continuousValues) const { + const VectorValues &continuousValues) const { // functor to convert from sharedFactor to double error value. auto errorFunc = [&continuousValues](const auto& pair) { return pair.first ? pair.first->error(continuousValues) + pair.second diff --git a/gtsam/hybrid/HybridGaussianFactor.h b/gtsam/hybrid/HybridGaussianFactor.h index 08debcf48..48a8bf48d 100644 --- a/gtsam/hybrid/HybridGaussianFactor.h +++ b/gtsam/hybrid/HybridGaussianFactor.h @@ -191,4 +191,4 @@ private: template <> struct traits : public Testable {}; -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 2d90f2dea..26cdb8dd2 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -55,21 +55,23 @@ template class EliminateableFactorGraph; using std::dynamic_pointer_cast; using OrphanWrapper = BayesTreeOrphanWrapper; -using Result = std::pair, GaussianFactor::shared_ptr>; +using Result = + std::pair, GaussianFactor::shared_ptr>; using ResultTree = DecisionTree>; static const VectorValues kEmpty; /* ************************************************************************ */ // Throw a runtime exception for method specified in string s, and factor f: -static void throwRuntimeError(const std::string& s, const std::shared_ptr& f) { +static void throwRuntimeError(const std::string& s, + const std::shared_ptr& f) { auto& fr = *f; - throw std::runtime_error(s + " not implemented for factor type " + demangle(typeid(fr).name()) + - "."); + throw std::runtime_error(s + " not implemented for factor type " + + demangle(typeid(fr).name()) + "."); } /* ************************************************************************ */ -const Ordering HybridOrdering(const HybridGaussianFactorGraph& graph) { +const Ordering HybridOrdering(const HybridGaussianFactorGraph &graph) { KeySet discrete_keys = graph.discreteKeySet(); const VariableIndex index(graph); return Ordering::ColamdConstrainedLast( @@ -77,20 +79,21 @@ const Ordering HybridOrdering(const HybridGaussianFactorGraph& graph) { } /* ************************************************************************ */ -static void printFactor(const std::shared_ptr& factor, - const DiscreteValues& assignment, - const KeyFormatter& keyFormatter) { - if (auto hgf = std::dynamic_pointer_cast(factor)) { +static void printFactor(const std::shared_ptr &factor, + const DiscreteValues &assignment, + const KeyFormatter &keyFormatter) { + if (auto hgf = dynamic_pointer_cast(factor)) { if (assignment.empty()) hgf->print("HybridGaussianFactor:", keyFormatter); else - hgf->operator()(assignment).first->print("HybridGaussianFactor, component:", keyFormatter); - } else if (auto gf = std::dynamic_pointer_cast(factor)) { + hgf->operator()(assignment) + .first->print("HybridGaussianFactor, component:", keyFormatter); + } else if (auto gf = dynamic_pointer_cast(factor)) { factor->print("GaussianFactor:\n", keyFormatter); - } else if (auto df = std::dynamic_pointer_cast(factor)) { + } else if (auto df = dynamic_pointer_cast(factor)) { factor->print("DiscreteFactor:\n", keyFormatter); - } else if (auto hc = std::dynamic_pointer_cast(factor)) { + } else if (auto hc = dynamic_pointer_cast(factor)) { if (hc->isContinuous()) { factor->print("GaussianConditional:\n", keyFormatter); } else if (hc->isDiscrete()) { @@ -99,7 +102,9 @@ static void printFactor(const std::shared_ptr& factor, if (assignment.empty()) hc->print("HybridConditional:", keyFormatter); else - hc->asHybrid()->choose(assignment)->print("HybridConditional, component:\n", keyFormatter); + hc->asHybrid() + ->choose(assignment) + ->print("HybridConditional, component:\n", keyFormatter); } } else { factor->print("Unknown factor type\n", keyFormatter); @@ -128,15 +133,15 @@ void HybridGaussianFactorGraph::print(const std::string& s, /* ************************************************************************ */ void HybridGaussianFactorGraph::printErrors( - const HybridValues& values, - const std::string& str, - const KeyFormatter& keyFormatter, - const std::function& - printCondition) const { - std::cout << str << " size: " << size() << std::endl << std::endl; + const HybridValues &values, const std::string &str, + const KeyFormatter &keyFormatter, + const std::function + &printCondition) const { + std::cout << str << "size: " << size() << std::endl << std::endl; for (size_t i = 0; i < factors_.size(); i++) { - auto&& factor = factors_[i]; + auto &&factor = factors_[i]; if (factor == nullptr) { std::cout << "Factor " << i << ": nullptr\n"; continue; @@ -154,7 +159,8 @@ void HybridGaussianFactorGraph::printErrors( } /* ************************************************************************ */ -HybridGaussianProductFactor HybridGaussianFactorGraph::collectProductFactor() const { +HybridGaussianProductFactor HybridGaussianFactorGraph::collectProductFactor() + const { HybridGaussianProductFactor result; for (auto& f : factors_) { @@ -194,10 +200,11 @@ HybridGaussianProductFactor HybridGaussianFactorGraph::collectProductFactor() co } /* ************************************************************************ */ -static std::pair> continuousElimination( - const HybridGaussianFactorGraph& factors, const Ordering& frontalKeys) { +static std::pair> +continuousElimination(const HybridGaussianFactorGraph &factors, + const Ordering &frontalKeys) { GaussianFactorGraph gfg; - for (auto& f : factors) { + for (auto &f : factors) { if (auto gf = dynamic_pointer_cast(f)) { gfg.push_back(gf); } else if (auto orphan = dynamic_pointer_cast(f)) { @@ -217,20 +224,20 @@ static std::pair> continu } /* ************************************************************************ */ -static std::pair> discreteElimination( - const HybridGaussianFactorGraph& factors, const Ordering& frontalKeys) { +static std::pair> +discreteElimination(const HybridGaussianFactorGraph &factors, + const Ordering &frontalKeys) { DiscreteFactorGraph dfg; - for (auto& f : factors) { + for (auto &f : factors) { if (auto df = dynamic_pointer_cast(f)) { dfg.push_back(df); } else if (auto gmf = dynamic_pointer_cast(f)) { // Case where we have a HybridGaussianFactor with no continuous keys. // In this case, compute discrete probabilities. - // TODO(frank): What about the scalar!? auto potential = [&](const auto& pair) -> double { auto [factor, scalar] = pair; - // If the factor is null, it has been pruned, hence return potential of zero + // If factor is null, it has been pruned, hence return potential zero if (!factor) return 0.0; return exp(-scalar - factor->error(kEmpty)); }; @@ -262,9 +269,10 @@ static std::pair> discret * The residual error contains no keys, and only * depends on the discrete separator if present. */ -static std::shared_ptr createDiscreteFactor(const ResultTree& eliminationResults, - const DiscreteKeys& discreteSeparator) { - auto potential = [&](const auto& pair) -> double { +static std::shared_ptr createDiscreteFactor( + const ResultTree& eliminationResults, + const DiscreteKeys &discreteSeparator) { + auto potential = [&](const auto &pair) -> double { const auto& [conditional, factor] = pair.first; const double scalar = pair.second; if (conditional && factor) { @@ -276,7 +284,8 @@ static std::shared_ptr createDiscreteFactor(const ResultTree& eliminatio const double error = scalar + factor->error(kEmpty) - negLogK; return exp(-error); } else if (!conditional && !factor) { - // If the factor is null, it has been pruned, hence return potential of zero + // If the factor is null, it has been pruned, hence return potential of + // zero return 0.0; } else { throw std::runtime_error("createDiscreteFactor has mixed NULLs"); @@ -290,11 +299,12 @@ static std::shared_ptr createDiscreteFactor(const ResultTree& eliminatio /* *******************************************************************************/ // Create HybridGaussianFactor on the separator, taking care to correct // for conditional constants. -static std::shared_ptr createHybridGaussianFactor(const ResultTree& eliminationResults, - const DiscreteKeys& discreteSeparator) { +static std::shared_ptr createHybridGaussianFactor( + const ResultTree &eliminationResults, + const DiscreteKeys &discreteSeparator) { // Correct for the normalization constant used up by the conditional - auto correct = [&](const auto& pair) -> GaussianFactorValuePair { - const auto& [conditional, factor] = pair.first; + auto correct = [&](const auto &pair) -> GaussianFactorValuePair { + const auto &[conditional, factor] = pair.first; const double scalar = pair.second; if (conditional && factor) { const double negLogK = conditional->negLogConstant(); @@ -305,29 +315,32 @@ static std::shared_ptr createHybridGaussianFactor(const ResultTree& elim throw std::runtime_error("createHybridGaussianFactors has mixed NULLs"); } }; - DecisionTree newFactors(eliminationResults, correct); + DecisionTree newFactors(eliminationResults, + correct); return std::make_shared(discreteSeparator, newFactors); } /* *******************************************************************************/ /// Get the discrete keys from the HybridGaussianFactorGraph as DiscreteKeys. -static auto GetDiscreteKeys = [](const HybridGaussianFactorGraph& hfg) -> DiscreteKeys { +static auto GetDiscreteKeys = + [](const HybridGaussianFactorGraph &hfg) -> DiscreteKeys { const std::set discreteKeySet = hfg.discreteKeys(); return {discreteKeySet.begin(), discreteKeySet.end()}; }; /* *******************************************************************************/ std::pair> -HybridGaussianFactorGraph::eliminate(const Ordering& keys) const { +HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { // Since we eliminate all continuous variables first, // the discrete separator will be *all* the discrete keys. DiscreteKeys discreteSeparator = GetDiscreteKeys(*this); // Collect all the factors to create a set of Gaussian factor graphs in a - // decision tree indexed by all discrete keys involved. Just like any hybrid factor, every - // assignment also has a scalar error, in this case the sum of all errors in the graph. This error - // is assignment-specific and accounts for any difference in noise models used. + // decision tree indexed by all discrete keys involved. Just like any hybrid + // factor, every assignment also has a scalar error, in this case the sum of + // all errors in the graph. This error is assignment-specific and accounts for + // any difference in noise models used. HybridGaussianProductFactor productFactor = collectProductFactor(); // Convert factor graphs with a nullptr to an empty factor graph. @@ -337,8 +350,8 @@ HybridGaussianFactorGraph::eliminate(const Ordering& keys) const { // This is the elimination method on the leaf nodes bool someContinuousLeft = false; - auto eliminate = - [&](const std::pair& pair) -> std::pair { + auto eliminate = [&](const std::pair& pair) + -> std::pair { const auto& [graph, scalar] = pair; if (graph.empty()) { @@ -346,7 +359,8 @@ HybridGaussianFactorGraph::eliminate(const Ordering& keys) const { } // Expensive elimination of product factor. - auto result = EliminatePreferCholesky(graph, keys); /// <<<<<< MOST COMPUTE IS HERE + auto result = + EliminatePreferCholesky(graph, keys); /// <<<<<< MOST COMPUTE IS HERE // Record whether there any continuous variables left someContinuousLeft |= !result.second->empty(); @@ -361,15 +375,16 @@ HybridGaussianFactorGraph::eliminate(const Ordering& keys) const { // If there are no more continuous parents we create a DiscreteFactor with the // error for each discrete choice. Otherwise, create a HybridGaussianFactor // on the separator, taking care to correct for conditional constants. - auto newFactor = someContinuousLeft - ? createHybridGaussianFactor(eliminationResults, discreteSeparator) - : createDiscreteFactor(eliminationResults, discreteSeparator); + auto newFactor = + someContinuousLeft + ? createHybridGaussianFactor(eliminationResults, discreteSeparator) + : createDiscreteFactor(eliminationResults, discreteSeparator); // Create the HybridGaussianConditional from the conditionals HybridGaussianConditional::Conditionals conditionals( eliminationResults, [](const auto& pair) { return pair.first.first; }); - auto hybridGaussian = - std::make_shared(discreteSeparator, conditionals); + auto hybridGaussian = std::make_shared( + discreteSeparator, conditionals); return {std::make_shared(hybridGaussian), newFactor}; } @@ -389,7 +404,8 @@ HybridGaussianFactorGraph::eliminate(const Ordering& keys) const { * be INCORRECT and there will be NO error raised. */ std::pair> // -EliminateHybrid(const HybridGaussianFactorGraph& factors, const Ordering& keys) { +EliminateHybrid(const HybridGaussianFactorGraph &factors, + const Ordering &keys) { // NOTE: Because we are in the Conditional Gaussian regime there are only // a few cases: // 1. continuous variable, make a hybrid Gaussian conditional if there are @@ -440,7 +456,7 @@ EliminateHybrid(const HybridGaussianFactorGraph& factors, const Ordering& keys) // 3. if not, we do hybrid elimination: bool only_discrete = true, only_continuous = true; - for (auto&& factor : factors) { + for (auto &&factor : factors) { if (auto hybrid_factor = std::dynamic_pointer_cast(factor)) { if (hybrid_factor->isDiscrete()) { only_continuous = false; @@ -451,9 +467,11 @@ EliminateHybrid(const HybridGaussianFactorGraph& factors, const Ordering& keys) only_discrete = false; break; } - } else if (auto cont_factor = std::dynamic_pointer_cast(factor)) { + } else if (auto cont_factor = + std::dynamic_pointer_cast(factor)) { only_discrete = false; - } else if (auto discrete_factor = std::dynamic_pointer_cast(factor)) { + } else if (auto discrete_factor = + std::dynamic_pointer_cast(factor)) { only_continuous = false; } } @@ -474,17 +492,17 @@ EliminateHybrid(const HybridGaussianFactorGraph& factors, const Ordering& keys) /* ************************************************************************ */ AlgebraicDecisionTree HybridGaussianFactorGraph::errorTree( - const VectorValues& continuousValues) const { + const VectorValues &continuousValues) const { AlgebraicDecisionTree result(0.0); // Iterate over each factor. - for (auto& factor : factors_) { + for (auto &factor : factors_) { if (auto hf = std::dynamic_pointer_cast(factor)) { // Add errorTree for hybrid factors, includes HybridGaussianConditionals! result = result + hf->errorTree(continuousValues); } else if (auto df = std::dynamic_pointer_cast(factor)) { // If discrete, just add its errorTree as well result = result + df->errorTree(); - } else if (auto gf = std::dynamic_pointer_cast(factor)) { + } else if (auto gf = dynamic_pointer_cast(factor)) { // For a continuous only factor, just add its error result = result + gf->error(continuousValues); } else { @@ -495,7 +513,7 @@ AlgebraicDecisionTree HybridGaussianFactorGraph::errorTree( } /* ************************************************************************ */ -double HybridGaussianFactorGraph::probPrime(const HybridValues& values) const { +double HybridGaussianFactorGraph::probPrime(const HybridValues &values) const { double error = this->error(values); // NOTE: The 0.5 term is handled by each factor return std::exp(-error); @@ -503,7 +521,7 @@ double HybridGaussianFactorGraph::probPrime(const HybridValues& values) const { /* ************************************************************************ */ AlgebraicDecisionTree HybridGaussianFactorGraph::discretePosterior( - const VectorValues& continuousValues) const { + const VectorValues &continuousValues) const { AlgebraicDecisionTree errors = this->errorTree(continuousValues); AlgebraicDecisionTree p = errors.apply([](double error) { // NOTE: The 0.5 term is handled by each factor @@ -513,18 +531,19 @@ AlgebraicDecisionTree HybridGaussianFactorGraph::discretePosterior( } /* ************************************************************************ */ -GaussianFactorGraph HybridGaussianFactorGraph::choose(const DiscreteValues& assignment) const { +GaussianFactorGraph HybridGaussianFactorGraph::choose( + const DiscreteValues &assignment) const { GaussianFactorGraph gfg; - for (auto&& f : *this) { + for (auto &&f : *this) { if (auto gf = std::dynamic_pointer_cast(f)) { gfg.push_back(gf); } else if (auto gc = std::dynamic_pointer_cast(f)) { gfg.push_back(gf); } else if (auto hgf = std::dynamic_pointer_cast(f)) { gfg.push_back((*hgf)(assignment).first); - } else if (auto hgc = std::dynamic_pointer_cast(f)) { + } else if (auto hgc = dynamic_pointer_cast(f)) { gfg.push_back((*hgc)(assignment)); - } else if (auto hc = std::dynamic_pointer_cast(f)) { + } else if (auto hc = dynamic_pointer_cast(f)) { if (auto gc = hc->asGaussian()) gfg.push_back(gc); else if (auto hgc = hc->asHybrid()) diff --git a/gtsam/hybrid/HybridGaussianProductFactor.cpp b/gtsam/hybrid/HybridGaussianProductFactor.cpp index 2430750d1..2e95ea8d1 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.cpp +++ b/gtsam/hybrid/HybridGaussianProductFactor.cpp @@ -59,7 +59,8 @@ HybridGaussianProductFactor& HybridGaussianProductFactor::operator+=( return *this; } -void HybridGaussianProductFactor::print(const std::string& s, const KeyFormatter& formatter) const { +void HybridGaussianProductFactor::print(const std::string& s, + const KeyFormatter& formatter) const { KeySet keys; auto printer = [&](const Y& y) { if (keys.empty()) keys = y.first.keys(); @@ -77,8 +78,10 @@ void HybridGaussianProductFactor::print(const std::string& s, const KeyFormatter HybridGaussianProductFactor HybridGaussianProductFactor::removeEmpty() const { auto emptyGaussian = [](const Y& y) { - bool hasNull = std::any_of( - y.first.begin(), y.first.end(), [](const GaussianFactor::shared_ptr& ptr) { return !ptr; }); + bool hasNull = + std::any_of(y.first.begin(), + y.first.end(), + [](const GaussianFactor::shared_ptr& ptr) { return !ptr; }); return hasNull ? Y{GaussianFactorGraph(), 0.0} : y; }; return {Base(*this, emptyGaussian)}; diff --git a/gtsam/hybrid/HybridGaussianProductFactor.h b/gtsam/hybrid/HybridGaussianProductFactor.h index 6d33daa1b..9c2aee74a 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.h +++ b/gtsam/hybrid/HybridGaussianProductFactor.h @@ -60,13 +60,16 @@ class HybridGaussianProductFactor ///@{ /// Add GaussianFactor into HybridGaussianProductFactor - HybridGaussianProductFactor operator+(const GaussianFactor::shared_ptr& factor) const; + HybridGaussianProductFactor operator+( + const GaussianFactor::shared_ptr& factor) const; /// Add HybridGaussianFactor into HybridGaussianProductFactor - HybridGaussianProductFactor operator+(const HybridGaussianFactor& factor) const; + HybridGaussianProductFactor operator+( + const HybridGaussianFactor& factor) const; /// Add-assign operator for GaussianFactor - HybridGaussianProductFactor& operator+=(const GaussianFactor::shared_ptr& factor); + HybridGaussianProductFactor& operator+=( + const GaussianFactor::shared_ptr& factor); /// Add-assign operator for HybridGaussianFactor HybridGaussianProductFactor& operator+=(const HybridGaussianFactor& factor); @@ -81,7 +84,8 @@ class HybridGaussianProductFactor * @param s Optional string to prepend * @param formatter Optional key formatter */ - void print(const std::string& s = "", const KeyFormatter& formatter = DefaultKeyFormatter) const; + void print(const std::string& s = "", + const KeyFormatter& formatter = DefaultKeyFormatter) const; /** * @brief Check if this HybridGaussianProductFactor is equal to another @@ -89,9 +93,11 @@ class HybridGaussianProductFactor * @param tol Tolerance for floating point comparisons * @return true if equal, false otherwise */ - bool equals(const HybridGaussianProductFactor& other, double tol = 1e-9) const { + bool equals(const HybridGaussianProductFactor& other, + double tol = 1e-9) const { return Base::equals(other, [tol](const Y& a, const Y& b) { - return a.first.equals(b.first, tol) && std::abs(a.second - b.second) < tol; + return a.first.equals(b.first, tol) && + std::abs(a.second - b.second) < tol; }); } @@ -102,12 +108,13 @@ class HybridGaussianProductFactor /** * @brief Remove empty GaussianFactorGraphs from the decision tree - * @return A new HybridGaussianProductFactor with empty GaussianFactorGraphs removed + * @return A new HybridGaussianProductFactor with empty GaussianFactorGraphs + * removed * * If any GaussianFactorGraph in the decision tree contains a nullptr, convert - * that leaf to an empty GaussianFactorGraph with zero scalar sum. This is needed because the - * DecisionTree will otherwise create a GaussianFactorGraph with a single (null) factor, which - * doesn't register as null. + * that leaf to an empty GaussianFactorGraph with zero scalar sum. This is + * needed because the DecisionTree will otherwise create a GaussianFactorGraph + * with a single (null) factor, which doesn't register as null. */ HybridGaussianProductFactor removeEmpty() const; @@ -116,6 +123,7 @@ class HybridGaussianProductFactor // Testable traits template <> -struct traits : public Testable {}; +struct traits + : public Testable {}; } // namespace gtsam diff --git a/gtsam/hybrid/HybridNonlinearFactorGraph.cpp b/gtsam/hybrid/HybridNonlinearFactorGraph.cpp index 56b75d15e..2f5031cf2 100644 --- a/gtsam/hybrid/HybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/HybridNonlinearFactorGraph.cpp @@ -181,19 +181,19 @@ HybridGaussianFactorGraph::shared_ptr HybridNonlinearFactorGraph::linearize( /* ************************************************************************* */ AlgebraicDecisionTree HybridNonlinearFactorGraph::errorTree( - const Values& continuousValues) const { + const Values& values) const { AlgebraicDecisionTree result(0.0); // Iterate over each factor. for (auto& factor : factors_) { if (auto hnf = std::dynamic_pointer_cast(factor)) { // Compute factor error and add it. - result = result + hnf->errorTree(continuousValues); + result = result + hnf->errorTree(values); } else if (auto nf = std::dynamic_pointer_cast(factor)) { // If continuous only, get the (double) error // and add it to every leaf of the result - result = result + nf->error(continuousValues); + result = result + nf->error(values); } else if (auto df = std::dynamic_pointer_cast(factor)) { // If discrete, just add its errorTree as well diff --git a/gtsam/hybrid/HybridNonlinearFactorGraph.h b/gtsam/hybrid/HybridNonlinearFactorGraph.h index dd18cfa60..f79f7b452 100644 --- a/gtsam/hybrid/HybridNonlinearFactorGraph.h +++ b/gtsam/hybrid/HybridNonlinearFactorGraph.h @@ -98,10 +98,10 @@ class GTSAM_EXPORT HybridNonlinearFactorGraph : public HybridFactorGraph { * * @note: Gaussian and hybrid Gaussian factors are not considered! * - * @param continuousValues Manifold values at which to compute the error. + * @param values Manifold values at which to compute the error. * @return AlgebraicDecisionTree */ - AlgebraicDecisionTree errorTree(const Values& continuousValues) const; + AlgebraicDecisionTree errorTree(const Values& values) const; /** * @brief Computer posterior P(M|X=x) when all continuous values X are given. diff --git a/gtsam/hybrid/tests/testGaussianMixture.cpp b/gtsam/hybrid/tests/testGaussianMixture.cpp index 6bd9a22a8..14bef5fbb 100644 --- a/gtsam/hybrid/tests/testGaussianMixture.cpp +++ b/gtsam/hybrid/tests/testGaussianMixture.cpp @@ -40,7 +40,8 @@ const DiscreteKey m(M(0), 2); const DiscreteValues m1Assignment{{M(0), 1}}; // Define a 50/50 prior on the mode -DiscreteConditional::shared_ptr mixing = std::make_shared(m, "60/40"); +DiscreteConditional::shared_ptr mixing = + std::make_shared(m, "60/40"); /// Gaussian density function double Gaussian(double mu, double sigma, double z) { @@ -52,7 +53,8 @@ double Gaussian(double mu, double sigma, double z) { * If sigma0 == sigma1, it simplifies to a sigmoid function. * Hardcodes 60/40 prior on mode. */ -double prob_m_z(double mu0, double mu1, double sigma0, double sigma1, double z) { +double prob_m_z(double mu0, double mu1, double sigma0, double sigma1, + double z) { const double p0 = 0.6 * Gaussian(mu0, sigma0, z); const double p1 = 0.4 * Gaussian(mu1, sigma1, z); return p1 / (p0 + p1); @@ -68,13 +70,15 @@ TEST(GaussianMixture, GaussianMixtureModel) { // Create a Gaussian mixture model p(z|m) with same sigma. HybridBayesNet gmm; - std::vector> parameters{{Vector1(mu0), sigma}, {Vector1(mu1), sigma}}; + std::vector> parameters{{Vector1(mu0), sigma}, + {Vector1(mu1), sigma}}; gmm.emplace_shared(m, Z(0), parameters); gmm.push_back(mixing); // At the halfway point between the means, we should get P(m|z)=0.5 double midway = mu1 - mu0; - auto eliminationResult = gmm.toFactorGraph({{Z(0), Vector1(midway)}}).eliminateSequential(); + auto eliminationResult = + gmm.toFactorGraph({{Z(0), Vector1(midway)}}).eliminateSequential(); auto pMid = *eliminationResult->at(0)->asDiscrete(); EXPECT(assert_equal(DiscreteConditional(m, "60/40"), pMid)); @@ -84,7 +88,8 @@ TEST(GaussianMixture, GaussianMixtureModel) { const double expected = prob_m_z(mu0, mu1, sigma, sigma, z); // Workflow 1: convert HBN to HFG and solve - auto eliminationResult1 = gmm.toFactorGraph({{Z(0), Vector1(z)}}).eliminateSequential(); + auto eliminationResult1 = + gmm.toFactorGraph({{Z(0), Vector1(z)}}).eliminateSequential(); auto posterior1 = *eliminationResult1->at(0)->asDiscrete(); EXPECT_DOUBLES_EQUAL(expected, posterior1(m1Assignment), 1e-8); @@ -109,7 +114,8 @@ TEST(GaussianMixture, GaussianMixtureModel2) { // Create a Gaussian mixture model p(z|m) with same sigma. HybridBayesNet gmm; - std::vector> parameters{{Vector1(mu0), sigma0}, {Vector1(mu1), sigma1}}; + std::vector> parameters{{Vector1(mu0), sigma0}, + {Vector1(mu1), sigma1}}; gmm.emplace_shared(m, Z(0), parameters); gmm.push_back(mixing); @@ -119,15 +125,18 @@ TEST(GaussianMixture, GaussianMixtureModel2) { const VectorValues vv{{Z(0), Vector1(zMax)}}; auto gfg = gmm.toFactorGraph(vv); - // Equality of posteriors asserts that the elimination is correct (same ratios for all modes) + // Equality of posteriors asserts that the elimination is correct (same ratios + // for all modes) const auto& expectedDiscretePosterior = gmm.discretePosterior(vv); EXPECT(assert_equal(expectedDiscretePosterior, gfg.discretePosterior(vv))); // Eliminate the graph! auto eliminationResultMax = gfg.eliminateSequential(); - // Equality of posteriors asserts that the elimination is correct (same ratios for all modes) - EXPECT(assert_equal(expectedDiscretePosterior, eliminationResultMax->discretePosterior(vv))); + // Equality of posteriors asserts that the elimination is correct (same ratios + // for all modes) + EXPECT(assert_equal(expectedDiscretePosterior, + eliminationResultMax->discretePosterior(vv))); auto pMax = *eliminationResultMax->at(0)->asDiscrete(); EXPECT(assert_equal(DiscreteConditional(m, "42/58"), pMax, 1e-4)); @@ -138,7 +147,8 @@ TEST(GaussianMixture, GaussianMixtureModel2) { const double expected = prob_m_z(mu0, mu1, sigma0, sigma1, z); // Workflow 1: convert HBN to HFG and solve - auto eliminationResult1 = gmm.toFactorGraph({{Z(0), Vector1(z)}}).eliminateSequential(); + auto eliminationResult1 = + gmm.toFactorGraph({{Z(0), Vector1(z)}}).eliminateSequential(); auto posterior1 = *eliminationResult1->at(0)->asDiscrete(); EXPECT_DOUBLES_EQUAL(expected, posterior1(m1Assignment), 1e-8); diff --git a/gtsam/hybrid/tests/testHybridBayesNet.cpp b/gtsam/hybrid/tests/testHybridBayesNet.cpp index 438bfd267..d4192cb71 100644 --- a/gtsam/hybrid/tests/testHybridBayesNet.cpp +++ b/gtsam/hybrid/tests/testHybridBayesNet.cpp @@ -168,8 +168,10 @@ TEST(HybridBayesNet, Tiny) { EXPECT(!pruned.equals(bayesNet)); // error - const double error0 = chosen0.error(vv) + gc0->negLogConstant() - px->negLogConstant() - log(0.4); - const double error1 = chosen1.error(vv) + gc1->negLogConstant() - px->negLogConstant() - log(0.6); + const double error0 = chosen0.error(vv) + gc0->negLogConstant() - + px->negLogConstant() - log(0.4); + const double error1 = chosen1.error(vv) + gc1->negLogConstant() - + px->negLogConstant() - log(0.6); // print errors: EXPECT_DOUBLES_EQUAL(error0, bayesNet.error(zero), 1e-9); EXPECT_DOUBLES_EQUAL(error1, bayesNet.error(one), 1e-9); @@ -190,7 +192,6 @@ TEST(HybridBayesNet, Tiny) { // toFactorGraph auto fg = bayesNet.toFactorGraph({{Z(0), Vector1(5.0)}}); EXPECT_LONGS_EQUAL(3, fg.size()); - GTSAM_PRINT(fg); // Create the product factor for eliminating x0: HybridGaussianFactorGraph factors_x0; @@ -204,9 +205,7 @@ TEST(HybridBayesNet, Tiny) { // Call eliminate and check scalar: auto result = factors_x0.eliminate({X(0)}); - GTSAM_PRINT(*result.first); auto df = std::dynamic_pointer_cast(result.second); - GTSAM_PRINT(df->errorTree()); // Check that the ratio of probPrime to evaluate is the same for all modes. std::vector ratio(2); @@ -228,13 +227,17 @@ TEST(HybridBayesNet, Tiny) { /* ****************************************************************************/ // Hybrid Bayes net P(X0|X1) P(X1|Asia) P(Asia). namespace different_sigmas { -const auto gc = GaussianConditional::sharedMeanAndStddev(X(0), 2 * I_1x1, X(1), Vector1(-4.0), 5.0); +const auto gc = GaussianConditional::sharedMeanAndStddev(X(0), 2 * I_1x1, X(1), + Vector1(-4.0), 5.0); -const std::vector> parms{{Vector1(5), 2.0}, {Vector1(2), 3.0}}; +const std::vector> parms{{Vector1(5), 2.0}, + {Vector1(2), 3.0}}; const auto hgc = std::make_shared(Asia, X(1), parms); const auto prior = std::make_shared(Asia, "99/1"); -auto wrap = [](const auto& c) { return std::make_shared(c); }; +auto wrap = [](const auto& c) { + return std::make_shared(c); +}; const HybridBayesNet bayesNet{wrap(gc), wrap(hgc), wrap(prior)}; // Create values at which to evaluate. @@ -248,8 +251,8 @@ TEST(HybridBayesNet, evaluateHybrid) { const double conditionalProbability = gc->evaluate(values.continuous()); const double mixtureProbability = hgc->evaluate(values); - EXPECT_DOUBLES_EQUAL( - conditionalProbability * mixtureProbability * 0.99, bayesNet.evaluate(values), 1e-9); + EXPECT_DOUBLES_EQUAL(conditionalProbability * mixtureProbability * 0.99, + bayesNet.evaluate(values), 1e-9); } /* ****************************************************************************/ @@ -271,10 +274,14 @@ TEST(HybridBayesNet, Choose) { EXPECT_LONGS_EQUAL(4, gbn.size()); - EXPECT(assert_equal(*(*hybridBayesNet->at(0)->asHybrid())(assignment), *gbn.at(0))); - EXPECT(assert_equal(*(*hybridBayesNet->at(1)->asHybrid())(assignment), *gbn.at(1))); - EXPECT(assert_equal(*(*hybridBayesNet->at(2)->asHybrid())(assignment), *gbn.at(2))); - EXPECT(assert_equal(*(*hybridBayesNet->at(3)->asHybrid())(assignment), *gbn.at(3))); + EXPECT(assert_equal(*(*hybridBayesNet->at(0)->asHybrid())(assignment), + *gbn.at(0))); + EXPECT(assert_equal(*(*hybridBayesNet->at(1)->asHybrid())(assignment), + *gbn.at(1))); + EXPECT(assert_equal(*(*hybridBayesNet->at(2)->asHybrid())(assignment), + *gbn.at(2))); + EXPECT(assert_equal(*(*hybridBayesNet->at(3)->asHybrid())(assignment), + *gbn.at(3))); } /* ****************************************************************************/ @@ -311,7 +318,8 @@ TEST(HybridBayesNet, OptimizeAssignment) { TEST(HybridBayesNet, Optimize) { Switching s(4, 1.0, 0.1, {0, 1, 2, 3}, "1/1 1/1"); - HybridBayesNet::shared_ptr hybridBayesNet = s.linearizedFactorGraph.eliminateSequential(); + HybridBayesNet::shared_ptr hybridBayesNet = + s.linearizedFactorGraph.eliminateSequential(); HybridValues delta = hybridBayesNet->optimize(); @@ -338,7 +346,8 @@ TEST(HybridBayesNet, Pruning) { // ϕ(x0) ϕ(x0,x1,m0) ϕ(x1,x2,m1) ϕ(x0;z0) ϕ(x1;z1) ϕ(x2;z2) ϕ(m0) ϕ(m0,m1) Switching s(3); - HybridBayesNet::shared_ptr posterior = s.linearizedFactorGraph.eliminateSequential(); + HybridBayesNet::shared_ptr posterior = + s.linearizedFactorGraph.eliminateSequential(); EXPECT_LONGS_EQUAL(5, posterior->size()); // Optimize @@ -364,9 +373,12 @@ TEST(HybridBayesNet, Pruning) { logProbability += posterior->at(0)->asHybrid()->logProbability(hybridValues); logProbability += posterior->at(1)->asHybrid()->logProbability(hybridValues); logProbability += posterior->at(2)->asHybrid()->logProbability(hybridValues); - logProbability += posterior->at(3)->asDiscrete()->logProbability(hybridValues); - logProbability += posterior->at(4)->asDiscrete()->logProbability(hybridValues); - EXPECT_DOUBLES_EQUAL(logProbability, posterior->logProbability(hybridValues), 1e-9); + logProbability += + posterior->at(3)->asDiscrete()->logProbability(hybridValues); + logProbability += + posterior->at(4)->asDiscrete()->logProbability(hybridValues); + EXPECT_DOUBLES_EQUAL(logProbability, posterior->logProbability(hybridValues), + 1e-9); // Check agreement with discrete posterior // double density = exp(logProbability); @@ -387,7 +399,8 @@ TEST(HybridBayesNet, Pruning) { TEST(HybridBayesNet, Prune) { Switching s(4); - HybridBayesNet::shared_ptr posterior = s.linearizedFactorGraph.eliminateSequential(); + HybridBayesNet::shared_ptr posterior = + s.linearizedFactorGraph.eliminateSequential(); EXPECT_LONGS_EQUAL(7, posterior->size()); HybridValues delta = posterior->optimize(); @@ -404,7 +417,8 @@ TEST(HybridBayesNet, Prune) { TEST(HybridBayesNet, UpdateDiscreteConditionals) { Switching s(4); - HybridBayesNet::shared_ptr posterior = s.linearizedFactorGraph.eliminateSequential(); + HybridBayesNet::shared_ptr posterior = + s.linearizedFactorGraph.eliminateSequential(); EXPECT_LONGS_EQUAL(7, posterior->size()); DiscreteConditional joint; @@ -416,7 +430,8 @@ TEST(HybridBayesNet, UpdateDiscreteConditionals) { auto prunedDecisionTree = joint.prune(maxNrLeaves); #ifdef GTSAM_DT_MERGING - EXPECT_LONGS_EQUAL(maxNrLeaves + 2 /*2 zero leaves*/, prunedDecisionTree.nrLeaves()); + EXPECT_LONGS_EQUAL(maxNrLeaves + 2 /*2 zero leaves*/, + prunedDecisionTree.nrLeaves()); #else EXPECT_LONGS_EQUAL(8 /*full tree*/, prunedDecisionTree.nrLeaves()); #endif @@ -424,14 +439,16 @@ TEST(HybridBayesNet, UpdateDiscreteConditionals) { // regression // NOTE(Frank): I had to include *three* non-zeroes here now. DecisionTreeFactor::ADT potentials( - s.modes, std::vector{0, 0, 0, 0.28739288, 0, 0.43106901, 0, 0.2815381}); + s.modes, + std::vector{0, 0, 0, 0.28739288, 0, 0.43106901, 0, 0.2815381}); DiscreteConditional expectedConditional(3, s.modes, potentials); // Prune! auto pruned = posterior->prune(maxNrLeaves); // Functor to verify values against the expectedConditional - auto checker = [&](const Assignment& assignment, double probability) -> double { + auto checker = [&](const Assignment& assignment, + double probability) -> double { // typecast so we can use this to get probability value DiscreteValues choices(assignment); if (prunedDecisionTree(choices) == 0) { @@ -446,7 +463,8 @@ TEST(HybridBayesNet, UpdateDiscreteConditionals) { CHECK(pruned.at(0)->asDiscrete()); auto pruned_discrete_conditionals = pruned.at(0)->asDiscrete(); auto discrete_conditional_tree = - std::dynamic_pointer_cast(pruned_discrete_conditionals); + std::dynamic_pointer_cast( + pruned_discrete_conditionals); // The checker functor verifies the values for us. discrete_conditional_tree->apply(checker); @@ -460,10 +478,13 @@ TEST(HybridBayesNet, Sampling) { auto noise_model = noiseModel::Diagonal::Sigmas(Vector1(1.0)); nfg.emplace_shared>(X(0), 0.0, noise_model); - auto zero_motion = std::make_shared>(X(0), X(1), 0, noise_model); - auto one_motion = std::make_shared>(X(0), X(1), 1, noise_model); + auto zero_motion = + std::make_shared>(X(0), X(1), 0, noise_model); + auto one_motion = + std::make_shared>(X(0), X(1), 1, noise_model); nfg.emplace_shared( - DiscreteKey(M(0), 2), std::vector{zero_motion, one_motion}); + DiscreteKey(M(0), 2), + std::vector{zero_motion, one_motion}); DiscreteKey mode(M(0), 2); nfg.emplace_shared(mode, "1/1"); @@ -535,17 +556,18 @@ TEST(HybridBayesNet, ErrorTreeWithConditional) { hbn.emplace_shared(x0, Vector1(0.0), I_1x1, prior_model); // Add measurement P(z0 | x0) - hbn.emplace_shared(z0, Vector1(0.0), -I_1x1, x0, I_1x1, measurement_model); + hbn.emplace_shared(z0, Vector1(0.0), -I_1x1, x0, I_1x1, + measurement_model); // Add hybrid motion model double mu = 0.0; double sigma0 = 1e2, sigma1 = 1e-2; auto model0 = noiseModel::Isotropic::Sigma(1, sigma0); auto model1 = noiseModel::Isotropic::Sigma(1, sigma1); - auto c0 = - make_shared(f01, Vector1(mu), I_1x1, x1, I_1x1, x0, -I_1x1, model0), - c1 = - make_shared(f01, Vector1(mu), I_1x1, x1, I_1x1, x0, -I_1x1, model1); + auto c0 = make_shared(f01, Vector1(mu), I_1x1, x1, I_1x1, + x0, -I_1x1, model0), + c1 = make_shared(f01, Vector1(mu), I_1x1, x1, I_1x1, + x0, -I_1x1, model1); DiscreteKey m1(M(2), 2); hbn.emplace_shared(m1, std::vector{c0, c1}); diff --git a/gtsam/hybrid/tests/testHybridBayesTree.cpp b/gtsam/hybrid/tests/testHybridBayesTree.cpp index f31846cb3..319c6d52b 100644 --- a/gtsam/hybrid/tests/testHybridBayesTree.cpp +++ b/gtsam/hybrid/tests/testHybridBayesTree.cpp @@ -59,7 +59,7 @@ std::vector components(Key key) { return {std::make_shared(key, I_3x3, Z_3x1), std::make_shared(key, I_3x3, Vector3::Ones())}; } -} // namespace two +} // namespace two /* ************************************************************************* */ TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalSimple) { @@ -142,9 +142,9 @@ TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalTwoClique) { } /* ************************************************************************* */ -void dotPrint(const HybridGaussianFactorGraph::shared_ptr &hfg, - const HybridBayesTree::shared_ptr &hbt, - const Ordering &ordering) { +void dotPrint(const HybridGaussianFactorGraph::shared_ptr& hfg, + const HybridBayesTree::shared_ptr& hbt, + const Ordering& ordering) { DotWriter dw; dw.positionHints['c'] = 2; dw.positionHints['x'] = 1; @@ -179,13 +179,15 @@ TEST(HybridGaussianFactorGraph, Switching) { std::vector naturalX(N); std::iota(naturalX.begin(), naturalX.end(), 1); std::vector ordX; - std::transform(naturalX.begin(), naturalX.end(), std::back_inserter(ordX), - [](int x) { return X(x); }); + std::transform( + naturalX.begin(), naturalX.end(), std::back_inserter(ordX), [](int x) { + return X(x); + }); auto [ndX, lvls] = makeBinaryOrdering(ordX); std::copy(ndX.begin(), ndX.end(), std::back_inserter(ordering)); // TODO(dellaert): this has no effect! - for (auto &l : lvls) { + for (auto& l : lvls) { l = -l; } } @@ -193,8 +195,10 @@ TEST(HybridGaussianFactorGraph, Switching) { std::vector naturalC(N - 1); std::iota(naturalC.begin(), naturalC.end(), 1); std::vector ordC; - std::transform(naturalC.begin(), naturalC.end(), std::back_inserter(ordC), - [](int x) { return M(x); }); + std::transform( + naturalC.begin(), naturalC.end(), std::back_inserter(ordC), [](int x) { + return M(x); + }); // std::copy(ordC.begin(), ordC.end(), std::back_inserter(ordering)); const auto [ndC, lvls] = makeBinaryOrdering(ordC); @@ -233,13 +237,15 @@ TEST(HybridGaussianFactorGraph, SwitchingISAM) { std::vector naturalX(N); std::iota(naturalX.begin(), naturalX.end(), 1); std::vector ordX; - std::transform(naturalX.begin(), naturalX.end(), std::back_inserter(ordX), - [](int x) { return X(x); }); + std::transform( + naturalX.begin(), naturalX.end(), std::back_inserter(ordX), [](int x) { + return X(x); + }); auto [ndX, lvls] = makeBinaryOrdering(ordX); std::copy(ndX.begin(), ndX.end(), std::back_inserter(ordering)); // TODO(dellaert): this has no effect! - for (auto &l : lvls) { + for (auto& l : lvls) { l = -l; } } @@ -247,8 +253,10 @@ TEST(HybridGaussianFactorGraph, SwitchingISAM) { std::vector naturalC(N - 1); std::iota(naturalC.begin(), naturalC.end(), 1); std::vector ordC; - std::transform(naturalC.begin(), naturalC.end(), std::back_inserter(ordC), - [](int x) { return M(x); }); + std::transform( + naturalC.begin(), naturalC.end(), std::back_inserter(ordC), [](int x) { + return M(x); + }); // std::copy(ordC.begin(), ordC.end(), std::back_inserter(ordering)); const auto [ndC, lvls] = makeBinaryOrdering(ordC); @@ -408,8 +416,7 @@ TEST(HybridBayesTree, OptimizeAssignment) { // Create ordering. Ordering ordering; - for (size_t k = 0; k < s.K; k++) - ordering.push_back(X(k)); + for (size_t k = 0; k < s.K; k++) ordering.push_back(X(k)); const auto [hybridBayesNet, remainingFactorGraph] = s.linearizedFactorGraph.eliminatePartialSequential(ordering); @@ -451,21 +458,20 @@ TEST(HybridBayesTree, Optimize) { // Create ordering. Ordering ordering; - for (size_t k = 0; k < s.K; k++) - ordering.push_back(X(k)); + for (size_t k = 0; k < s.K; k++) ordering.push_back(X(k)); const auto [hybridBayesNet, remainingFactorGraph] = s.linearizedFactorGraph.eliminatePartialSequential(ordering); DiscreteFactorGraph dfg; - for (auto &&f : *remainingFactorGraph) { + for (auto&& f : *remainingFactorGraph) { auto discreteFactor = dynamic_pointer_cast(f); assert(discreteFactor); dfg.push_back(discreteFactor); } // Add the probabilities for each branch - DiscreteKeys discrete_keys = {m0, m1, m2}; + DiscreteKeys discrete_keys = {{M(0), 2}, {M(1), 2}, {M(2), 2}}; vector probs = {0.012519475, 0.041280228, 0.075018647, 0.081663656, 0.037152205, 0.12248971, 0.07349729, 0.08}; dfg.emplace_shared(discrete_keys, probs); diff --git a/gtsam/hybrid/tests/testHybridEstimation.cpp b/gtsam/hybrid/tests/testHybridEstimation.cpp index 80812a3d8..6206e5f45 100644 --- a/gtsam/hybrid/tests/testHybridEstimation.cpp +++ b/gtsam/hybrid/tests/testHybridEstimation.cpp @@ -37,8 +37,6 @@ // Include for test suite #include -#include - #include "Switching.h" using namespace std; diff --git a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp index 5631ac321..e29c485af 100644 --- a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp @@ -51,8 +51,10 @@ namespace equal_constants { // Create a simple HybridGaussianConditional const double commonSigma = 2.0; const std::vector conditionals{ - GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), commonSigma), - GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), commonSigma)}; + GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), + commonSigma), + GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), + commonSigma)}; const HybridGaussianConditional hybrid_conditional(mode, conditionals); } // namespace equal_constants @@ -77,8 +79,8 @@ TEST(HybridGaussianConditional, LogProbability) { using namespace equal_constants; for (size_t mode : {0, 1}) { const HybridValues hv{vv, {{M(0), mode}}}; - EXPECT_DOUBLES_EQUAL( - conditionals[mode]->logProbability(vv), hybrid_conditional.logProbability(hv), 1e-8); + EXPECT_DOUBLES_EQUAL(conditionals[mode]->logProbability(vv), + hybrid_conditional.logProbability(hv), 1e-8); } } @@ -90,7 +92,8 @@ TEST(HybridGaussianConditional, Error) { // Check result. DiscreteKeys discrete_keys{mode}; - std::vector leaves = {conditionals[0]->error(vv), conditionals[1]->error(vv)}; + std::vector leaves = {conditionals[0]->error(vv), + conditionals[1]->error(vv)}; AlgebraicDecisionTree expected(discrete_keys, leaves); EXPECT(assert_equal(expected, actual, 1e-6)); @@ -98,7 +101,8 @@ TEST(HybridGaussianConditional, Error) { // Check for non-tree version. for (size_t mode : {0, 1}) { const HybridValues hv{vv, {{M(0), mode}}}; - EXPECT_DOUBLES_EQUAL(conditionals[mode]->error(vv), hybrid_conditional.error(hv), 1e-8); + EXPECT_DOUBLES_EQUAL(conditionals[mode]->error(vv), + hybrid_conditional.error(hv), 1e-8); } } @@ -113,8 +117,10 @@ TEST(HybridGaussianConditional, Likelihood) { // Check that the hybrid conditional error and the likelihood error are the // same. - EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv0), likelihood->error(hv0), 1e-8); - EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv1), likelihood->error(hv1), 1e-8); + EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv0), likelihood->error(hv0), + 1e-8); + EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv1), likelihood->error(hv1), + 1e-8); // Check that likelihood error is as expected, i.e., just the errors of the // individual likelihoods, in the `equal_constants` case. @@ -128,7 +134,8 @@ TEST(HybridGaussianConditional, Likelihood) { std::vector ratio(2); for (size_t mode : {0, 1}) { const HybridValues hv{vv, {{M(0), mode}}}; - ratio[mode] = std::exp(-likelihood->error(hv)) / hybrid_conditional.evaluate(hv); + ratio[mode] = + std::exp(-likelihood->error(hv)) / hybrid_conditional.evaluate(hv); } EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); } @@ -138,8 +145,10 @@ namespace mode_dependent_constants { // Create a HybridGaussianConditional with mode-dependent noise models. // 0 is low-noise, 1 is high-noise. const std::vector conditionals{ - GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), 0.5), - GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), 3.0)}; + GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), + 0.5), + GaussianConditional::sharedMeanAndStddev(Z(0), I_1x1, X(0), Vector1(0.0), + 3.0)}; const HybridGaussianConditional hybrid_conditional(mode, conditionals); } // namespace mode_dependent_constants @@ -171,8 +180,9 @@ TEST(HybridGaussianConditional, Error2) { // Expected error is e(X) + log(sqrt(|2πΣ|)). // We normalize log(sqrt(|2πΣ|)) with min(negLogConstant) // so it is non-negative. - std::vector leaves = {conditionals[0]->error(vv) + negLogConstant0 - minErrorConstant, - conditionals[1]->error(vv) + negLogConstant1 - minErrorConstant}; + std::vector leaves = { + conditionals[0]->error(vv) + negLogConstant0 - minErrorConstant, + conditionals[1]->error(vv) + negLogConstant1 - minErrorConstant}; AlgebraicDecisionTree expected(discrete_keys, leaves); EXPECT(assert_equal(expected, actual, 1e-6)); @@ -180,10 +190,10 @@ TEST(HybridGaussianConditional, Error2) { // Check for non-tree version. for (size_t mode : {0, 1}) { const HybridValues hv{vv, {{M(0), mode}}}; - EXPECT_DOUBLES_EQUAL( - conditionals[mode]->error(vv) + conditionals[mode]->negLogConstant() - minErrorConstant, - hybrid_conditional.error(hv), - 1e-8); + EXPECT_DOUBLES_EQUAL(conditionals[mode]->error(vv) + + conditionals[mode]->negLogConstant() - + minErrorConstant, + hybrid_conditional.error(hv), 1e-8); } } @@ -198,8 +208,10 @@ TEST(HybridGaussianConditional, Likelihood2) { // Check that the hybrid conditional error and the likelihood error are as // expected, this invariant is the same as the equal noise case: - EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv0), likelihood->error(hv0), 1e-8); - EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv1), likelihood->error(hv1), 1e-8); + EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv0), likelihood->error(hv0), + 1e-8); + EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv1), likelihood->error(hv1), + 1e-8); // Check the detailed JacobianFactor calculation for mode==1. { @@ -208,18 +220,20 @@ TEST(HybridGaussianConditional, Likelihood2) { const auto jf1 = std::dynamic_pointer_cast(gf1); CHECK(jf1); - // Check that the JacobianFactor error with constants is equal to the conditional error: - EXPECT_DOUBLES_EQUAL( - hybrid_conditional.error(hv1), - jf1->error(hv1) + conditionals[1]->negLogConstant() - hybrid_conditional.negLogConstant(), - 1e-8); + // Check that the JacobianFactor error with constants is equal to the + // conditional error: + EXPECT_DOUBLES_EQUAL(hybrid_conditional.error(hv1), + jf1->error(hv1) + conditionals[1]->negLogConstant() - + hybrid_conditional.negLogConstant(), + 1e-8); } // Check that the ratio of probPrime to evaluate is the same for all modes. std::vector ratio(2); for (size_t mode : {0, 1}) { const HybridValues hv{vv, {{M(0), mode}}}; - ratio[mode] = std::exp(-likelihood->error(hv)) / hybrid_conditional.evaluate(hv); + ratio[mode] = + std::exp(-likelihood->error(hv)) / hybrid_conditional.evaluate(hv); } EXPECT_DOUBLES_EQUAL(ratio[0], ratio[1], 1e-8); } @@ -232,7 +246,8 @@ TEST(HybridGaussianConditional, Prune) { DiscreteKeys modes{{M(1), 2}, {M(2), 2}}; std::vector gcs; for (size_t i = 0; i < 4; i++) { - gcs.push_back(GaussianConditional::sharedMeanAndStddev(Z(0), Vector1(i + 1), i + 1)); + gcs.push_back( + GaussianConditional::sharedMeanAndStddev(Z(0), Vector1(i + 1), i + 1)); } auto empty = std::make_shared(); HybridGaussianConditional::Conditionals conditionals(modes, gcs); @@ -252,14 +267,8 @@ TEST(HybridGaussianConditional, Prune) { } } { - const std::vector potentials{0, - 0, - 0.5, - 0, // - 0, - 0, - 0.5, - 0}; + const std::vector potentials{0, 0, 0.5, 0, // + 0, 0, 0.5, 0}; const DecisionTreeFactor decisionTreeFactor(keys, potentials); const auto pruned = hgc.prune(decisionTreeFactor); @@ -268,14 +277,8 @@ TEST(HybridGaussianConditional, Prune) { EXPECT_LONGS_EQUAL(2, pruned->nrComponents()); } { - const std::vector potentials{0.2, - 0, - 0.3, - 0, // - 0, - 0, - 0.5, - 0}; + const std::vector potentials{0.2, 0, 0.3, 0, // + 0, 0, 0.5, 0}; const DecisionTreeFactor decisionTreeFactor(keys, potentials); const auto pruned = hgc.prune(decisionTreeFactor); diff --git a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp index 3b5a6a80c..9fdc1aaea 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp @@ -46,7 +46,6 @@ #include "Switching.h" #include "TinyHybridExample.h" -#include "gtsam/linear/GaussianFactorGraph.h" using namespace std; using namespace gtsam; @@ -74,7 +73,8 @@ TEST(HybridGaussianFactorGraph, Creation) { HybridGaussianConditional gm( m0, {std::make_shared(X(0), Z_3x1, I_3x3, X(1), I_3x3), - std::make_shared(X(0), Vector3::Ones(), I_3x3, X(1), I_3x3)}); + std::make_shared( + X(0), Vector3::Ones(), I_3x3, X(1), I_3x3)}); hfg.add(gm); EXPECT_LONGS_EQUAL(2, hfg.size()); @@ -118,7 +118,8 @@ TEST(HybridGaussianFactorGraph, hybridEliminationOneFactor) { auto factor = std::dynamic_pointer_cast(result.second); CHECK(factor); // regression test - EXPECT(assert_equal(DecisionTreeFactor{m1, "15.74961 15.74961"}, *factor, 1e-5)); + EXPECT( + assert_equal(DecisionTreeFactor{m1, "15.74961 15.74961"}, *factor, 1e-5)); } /* ************************************************************************* */ @@ -180,7 +181,8 @@ TEST(HybridBayesNet, Switching) { EXPECT_LONGS_EQUAL(4, graph.size()); // Create some continuous and discrete values - const VectorValues continuousValues{{X(0), Vector1(0.1)}, {X(1), Vector1(1.2)}}; + const VectorValues continuousValues{{X(0), Vector1(0.1)}, + {X(1), Vector1(1.2)}}; const DiscreteValues modeZero{{M(0), 0}}, modeOne{{M(0), 1}}; // Get the hybrid gaussian factor and check it is as expected @@ -213,7 +215,8 @@ TEST(HybridBayesNet, Switching) { // Check errorTree AlgebraicDecisionTree actualErrors = graph.errorTree(continuousValues); // Create expected error tree - const AlgebraicDecisionTree expectedErrors(M(0), expectedError0, expectedError1); + const AlgebraicDecisionTree expectedErrors( + M(0), expectedError0, expectedError1); // Check that the actual error tree matches the expected one EXPECT(assert_equal(expectedErrors, actualErrors, 1e-5)); @@ -226,9 +229,11 @@ TEST(HybridBayesNet, Switching) { EXPECT_DOUBLES_EQUAL(std::exp(-expectedError1), probPrime1, 1e-5); // Check discretePosterior - const AlgebraicDecisionTree graphPosterior = graph.discretePosterior(continuousValues); + const AlgebraicDecisionTree graphPosterior = + graph.discretePosterior(continuousValues); const double sum = probPrime0 + probPrime1; - const AlgebraicDecisionTree expectedPosterior(M(0), probPrime0 / sum, probPrime1 / sum); + const AlgebraicDecisionTree expectedPosterior( + M(0), probPrime0 / sum, probPrime1 / sum); EXPECT(assert_equal(expectedPosterior, graphPosterior, 1e-5)); // Make the clique of factors connected to x0: @@ -267,29 +272,37 @@ TEST(HybridBayesNet, Switching) { EXPECT(std::dynamic_pointer_cast(factor)); auto phi_x1_m = std::dynamic_pointer_cast(factor); EXPECT_LONGS_EQUAL(2, phi_x1_m->keys().size()); // x1, m0 - // Check that the scalars incorporate the negative log constant of the conditional - EXPECT_DOUBLES_EQUAL( - scalar0 - (*p_x0_given_x1_m)(modeZero)->negLogConstant(), (*phi_x1_m)(modeZero).second, 1e-9); - EXPECT_DOUBLES_EQUAL( - scalar1 - (*p_x0_given_x1_m)(modeOne)->negLogConstant(), (*phi_x1_m)(modeOne).second, 1e-9); + // Check that the scalars incorporate the negative log constant of the + // conditional + EXPECT_DOUBLES_EQUAL(scalar0 - (*p_x0_given_x1_m)(modeZero)->negLogConstant(), + (*phi_x1_m)(modeZero).second, + 1e-9); + EXPECT_DOUBLES_EQUAL(scalar1 - (*p_x0_given_x1_m)(modeOne)->negLogConstant(), + (*phi_x1_m)(modeOne).second, + 1e-9); - // Check that the conditional and remaining factor are consistent for both modes + // Check that the conditional and remaining factor are consistent for both + // modes for (auto&& mode : {modeZero, modeOne}) { const auto gc = (*p_x0_given_x1_m)(mode); const auto [gf, scalar] = (*phi_x1_m)(mode); - // The error of the original factors should equal the sum of errors of the conditional and - // remaining factor, modulo the normalization constant of the conditional. + // The error of the original factors should equal the sum of errors of the + // conditional and remaining factor, modulo the normalization constant of + // the conditional. double originalError = factors_x0.error({continuousValues, mode}); - const double actualError = - gc->negLogConstant() + gc->error(continuousValues) + gf->error(continuousValues) + scalar; + const double actualError = gc->negLogConstant() + + gc->error(continuousValues) + + gf->error(continuousValues) + scalar; EXPECT_DOUBLES_EQUAL(originalError, actualError, 1e-9); } // Create a clique for x1 HybridGaussianFactorGraph factors_x1; - factors_x1.push_back(factor); // Use the remaining factor from previous elimination - factors_x1.push_back(graph.at(2)); // Add the factor for x1 from the original graph + factors_x1.push_back( + factor); // Use the remaining factor from previous elimination + factors_x1.push_back( + graph.at(2)); // Add the factor for x1 from the original graph // Test collectProductFactor for x1 clique auto productFactor_x1 = factors_x1.collectProductFactor(); @@ -323,16 +336,18 @@ TEST(HybridBayesNet, Switching) { auto phi_x1 = std::dynamic_pointer_cast(factor_x1); CHECK(phi_x1); EXPECT_LONGS_EQUAL(1, phi_x1->keys().size()); // m0 - // We can't really check the error of the decision tree factor phi_x1, because the continuos - // factor whose error(kEmpty) we need is not available.. + // We can't really check the error of the decision tree factor phi_x1, because + // the continuos factor whose error(kEmpty) we need is not available.. - // However, we can still check the total error for the clique factors_x1 and the elimination - // results are equal, modulo -again- the negative log constant of the conditional. + // However, we can still check the total error for the clique factors_x1 and + // the elimination results are equal, modulo -again- the negative log constant + // of the conditional. for (auto&& mode : {modeZero, modeOne}) { auto gc_x1 = (*p_x1_given_m)(mode); double originalError_x1 = factors_x1.error({continuousValues, mode}); - const double actualError = - gc_x1->negLogConstant() + gc_x1->error(continuousValues) + phi_x1->error(mode); + const double actualError = gc_x1->negLogConstant() + + gc_x1->error(continuousValues) + + phi_x1->error(mode); EXPECT_DOUBLES_EQUAL(originalError_x1, actualError, 1e-9); } @@ -340,9 +355,11 @@ TEST(HybridBayesNet, Switching) { auto hybridBayesNet = graph.eliminateSequential(); CHECK(hybridBayesNet); - // Check that the posterior P(M|X=continuousValues) from the Bayes net is the same as the - // same posterior from the graph. This is a sanity check that the elimination is done correctly. - AlgebraicDecisionTree bnPosterior = hybridBayesNet->discretePosterior(continuousValues); + // Check that the posterior P(M|X=continuousValues) from the Bayes net is the + // same as the same posterior from the graph. This is a sanity check that the + // elimination is done correctly. + AlgebraicDecisionTree bnPosterior = + hybridBayesNet->discretePosterior(continuousValues); EXPECT(assert_equal(graphPosterior, bnPosterior)); } @@ -367,9 +384,11 @@ TEST(HybridGaussianFactorGraph, ErrorAndProbPrime) { // Check that the probability prime is the exponential of the error EXPECT(assert_equal(graph.probPrime(delta), exp(-error), 1e-7)); - // Check that the posterior P(M|X=continuousValues) from the Bayes net is the same as the - // same posterior from the graph. This is a sanity check that the elimination is done correctly. - const AlgebraicDecisionTree graphPosterior = graph.discretePosterior(delta.continuous()); + // Check that the posterior P(M|X=continuousValues) from the Bayes net is the + // same as the same posterior from the graph. This is a sanity check that the + // elimination is done correctly. + const AlgebraicDecisionTree graphPosterior = + graph.discretePosterior(delta.continuous()); const AlgebraicDecisionTree bnPosterior = hybridBayesNet->discretePosterior(delta.continuous()); EXPECT(assert_equal(graphPosterior, bnPosterior)); @@ -491,7 +510,8 @@ TEST(HybridGaussianFactorGraph, Conditionals) { expected_continuous.insert(X(1), 1); expected_continuous.insert(X(2), 2); expected_continuous.insert(X(3), 4); - Values result_continuous = switching.linearizationPoint.retract(result.continuous()); + Values result_continuous = + switching.linearizationPoint.retract(result.continuous()); EXPECT(assert_equal(expected_continuous, result_continuous)); DiscreteValues expected_discrete; @@ -519,8 +539,10 @@ TEST(HybridGaussianFactorGraph, IncrementalErrorTree) { // Check discrete posterior at optimum HybridValues delta = hybridBayesNet->optimize(); - AlgebraicDecisionTree graphPosterior = graph.discretePosterior(delta.continuous()); - AlgebraicDecisionTree bnPosterior = hybridBayesNet->discretePosterior(delta.continuous()); + AlgebraicDecisionTree graphPosterior = + graph.discretePosterior(delta.continuous()); + AlgebraicDecisionTree bnPosterior = + hybridBayesNet->discretePosterior(delta.continuous()); EXPECT(assert_equal(graphPosterior, bnPosterior)); graph = HybridGaussianFactorGraph(); @@ -579,11 +601,9 @@ TEST(HybridGaussianFactorGraph, collectProductFactor) { /* ****************************************************************************/ // Check that the factor graph unnormalized probability is proportional to the // Bayes net probability for the given measurements. -bool ratioTest(const HybridBayesNet& bn, - const VectorValues& measurements, - const HybridGaussianFactorGraph& fg, - size_t num_samples = 100) { - auto compute_ratio = [&](HybridValues* sample) -> double { +bool ratioTest(const HybridBayesNet &bn, const VectorValues &measurements, + const HybridGaussianFactorGraph &fg, size_t num_samples = 100) { + auto compute_ratio = [&](HybridValues *sample) -> double { sample->update(measurements); // update sample with given measurements: return bn.evaluate(*sample) / fg.probPrime(*sample); }; @@ -602,11 +622,9 @@ bool ratioTest(const HybridBayesNet& bn, /* ****************************************************************************/ // Check that the bayes net unnormalized probability is proportional to the // Bayes net probability for the given measurements. -bool ratioTest(const HybridBayesNet& bn, - const VectorValues& measurements, - const HybridBayesNet& posterior, - size_t num_samples = 100) { - auto compute_ratio = [&](HybridValues* sample) -> double { +bool ratioTest(const HybridBayesNet &bn, const VectorValues &measurements, + const HybridBayesNet &posterior, size_t num_samples = 100) { + auto compute_ratio = [&](HybridValues *sample) -> double { sample->update(measurements); // update sample with given measurements: return bn.evaluate(*sample) / posterior.evaluate(*sample); }; @@ -640,10 +658,10 @@ TEST(HybridGaussianFactorGraph, EliminateTiny1) { // Create hybrid Gaussian factor on X(0). using tiny::mode; // regression, but mean checked to be 5.0 in both cases: - const auto conditional0 = - std::make_shared(X(0), Vector1(14.1421), I_1x1 * 2.82843), - conditional1 = - std::make_shared(X(0), Vector1(10.1379), I_1x1 * 2.02759); + const auto conditional0 = std::make_shared( + X(0), Vector1(14.1421), I_1x1 * 2.82843), + conditional1 = std::make_shared( + X(0), Vector1(10.1379), I_1x1 * 2.02759); expectedBayesNet.emplace_shared( mode, std::vector{conditional0, conditional1}); @@ -671,7 +689,8 @@ TEST(HybridGaussianFactorGraph, EliminateTiny1Swapped) { bn.emplace_shared(m1, Z(0), I_1x1, X(0), parms); // Create prior on X(0). - bn.push_back(GaussianConditional::sharedMeanAndStddev(X(0), Vector1(5.0), 0.5)); + bn.push_back( + GaussianConditional::sharedMeanAndStddev(X(0), Vector1(5.0), 0.5)); // Add prior on m1. bn.emplace_shared(m1, "1/1"); @@ -689,10 +708,10 @@ TEST(HybridGaussianFactorGraph, EliminateTiny1Swapped) { // Create hybrid Gaussian factor on X(0). // regression, but mean checked to be 5.0 in both cases: - const auto conditional0 = - std::make_shared(X(0), Vector1(10.1379), I_1x1 * 2.02759), - conditional1 = - std::make_shared(X(0), Vector1(14.1421), I_1x1 * 2.82843); + const auto conditional0 = std::make_shared( + X(0), Vector1(10.1379), I_1x1 * 2.02759), + conditional1 = std::make_shared( + X(0), Vector1(14.1421), I_1x1 * 2.82843); expectedBayesNet.emplace_shared( m1, std::vector{conditional0, conditional1}); @@ -725,10 +744,10 @@ TEST(HybridGaussianFactorGraph, EliminateTiny2) { // Create hybrid Gaussian factor on X(0). using tiny::mode; // regression, but mean checked to be 5.0 in both cases: - const auto conditional0 = - std::make_shared(X(0), Vector1(17.3205), I_1x1 * 3.4641), - conditional1 = - std::make_shared(X(0), Vector1(10.274), I_1x1 * 2.0548); + const auto conditional0 = std::make_shared( + X(0), Vector1(17.3205), I_1x1 * 3.4641), + conditional1 = std::make_shared( + X(0), Vector1(10.274), I_1x1 * 2.0548); expectedBayesNet.emplace_shared( mode, std::vector{conditional0, conditional1}); @@ -772,25 +791,27 @@ TEST(HybridGaussianFactorGraph, EliminateSwitchingNetwork) { // NOTE: we add reverse topological so we can sample from the Bayes net.: // Add measurements: - std::vector> measurementModels{{Z_1x1, 3}, {Z_1x1, 0.5}}; + std::vector> measurementModels{{Z_1x1, 3}, + {Z_1x1, 0.5}}; for (size_t t : {0, 1, 2}) { // Create hybrid Gaussian factor on Z(t) conditioned on X(t) and mode N(t): const auto noise_mode_t = DiscreteKey{N(t), 2}; - bn.emplace_shared( - noise_mode_t, Z(t), I_1x1, X(t), measurementModels); + bn.emplace_shared(noise_mode_t, Z(t), I_1x1, + X(t), measurementModels); // Create prior on discrete mode N(t): bn.emplace_shared(noise_mode_t, "20/80"); } // Add motion models. TODO(frank): why are they exactly the same? - std::vector> motionModels{{Z_1x1, 0.2}, {Z_1x1, 0.2}}; + std::vector> motionModels{{Z_1x1, 0.2}, + {Z_1x1, 0.2}}; for (size_t t : {2, 1}) { // Create hybrid Gaussian factor on X(t) conditioned on X(t-1) // and mode M(t-1): const auto motion_model_t = DiscreteKey{M(t), 2}; - bn.emplace_shared( - motion_model_t, X(t), I_1x1, X(t - 1), motionModels); + bn.emplace_shared(motion_model_t, X(t), I_1x1, + X(t - 1), motionModels); // Create prior on motion model M(t): bn.emplace_shared(motion_model_t, "40/60"); @@ -803,7 +824,8 @@ TEST(HybridGaussianFactorGraph, EliminateSwitchingNetwork) { EXPECT_LONGS_EQUAL(6, bn.sample().continuous().size()); // Create measurements consistent with moving right every time: - const VectorValues measurements{{Z(0), Vector1(0.0)}, {Z(1), Vector1(1.0)}, {Z(2), Vector1(2.0)}}; + const VectorValues measurements{ + {Z(0), Vector1(0.0)}, {Z(1), Vector1(1.0)}, {Z(2), Vector1(2.0)}}; const HybridGaussianFactorGraph fg = bn.toFactorGraph(measurements); // Factor graph is: diff --git a/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp b/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp index 017df14a5..f41c5f0aa 100644 --- a/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp @@ -54,12 +54,15 @@ const auto f22 = std::make_shared(X(1), A1, X(3), A3, b); const HybridGaussianFactor hybridFactorB(m2, {{f20, 20}, {f21, 21}, {f22, 22}}); // Simulate a pruned hybrid factor, in this case m2==1 is nulled out. -const HybridGaussianFactor prunedFactorB(m2, {{f20, 20}, {nullptr, 1000}, {f22, 22}}); +const HybridGaussianFactor prunedFactorB( + m2, {{f20, 20}, {nullptr, 1000}, {f22, 22}}); } // namespace examples /* ************************************************************************* */ // Constructor -TEST(HybridGaussianProductFactor, Construct) { HybridGaussianProductFactor product; } +TEST(HybridGaussianProductFactor, Construct) { + HybridGaussianProductFactor product; +} /* ************************************************************************* */ // Add two Gaussian factors and check only one leaf in tree diff --git a/gtsam/hybrid/tests/testHybridMotionModel.cpp b/gtsam/hybrid/tests/testHybridMotionModel.cpp index bcffbe628..16a2bd476 100644 --- a/gtsam/hybrid/tests/testHybridMotionModel.cpp +++ b/gtsam/hybrid/tests/testHybridMotionModel.cpp @@ -46,25 +46,25 @@ using symbol_shorthand::Z; DiscreteKey m1(M(1), 2); -void addMeasurement(HybridBayesNet& hbn, Key z_key, Key x_key, double sigma) { +void addMeasurement(HybridBayesNet &hbn, Key z_key, Key x_key, double sigma) { auto measurement_model = noiseModel::Isotropic::Sigma(1, sigma); - hbn.emplace_shared( - z_key, Vector1(0.0), I_1x1, x_key, -I_1x1, measurement_model); + hbn.emplace_shared(z_key, Vector1(0.0), I_1x1, x_key, + -I_1x1, measurement_model); } /// Create hybrid motion model p(x1 | x0, m1) -static HybridGaussianConditional::shared_ptr CreateHybridMotionModel(double mu0, - double mu1, - double sigma0, - double sigma1) { +static HybridGaussianConditional::shared_ptr CreateHybridMotionModel( + double mu0, double mu1, double sigma0, double sigma1) { std::vector> motionModels{{Vector1(mu0), sigma0}, {Vector1(mu1), sigma1}}; - return std::make_shared(m1, X(1), I_1x1, X(0), motionModels); + return std::make_shared(m1, X(1), I_1x1, X(0), + motionModels); } /// Create two state Bayes network with 1 or two measurement models -HybridBayesNet CreateBayesNet(const HybridGaussianConditional::shared_ptr& hybridMotionModel, - bool add_second_measurement = false) { +HybridBayesNet CreateBayesNet( + const HybridGaussianConditional::shared_ptr &hybridMotionModel, + bool add_second_measurement = false) { HybridBayesNet hbn; // Add measurement model p(z0 | x0) @@ -86,16 +86,15 @@ HybridBayesNet CreateBayesNet(const HybridGaussianConditional::shared_ptr& hybri /// Approximate the discrete marginal P(m1) using importance sampling std::pair approximateDiscreteMarginal( - const HybridBayesNet& hbn, - const HybridGaussianConditional::shared_ptr& hybridMotionModel, - const VectorValues& given, - size_t N = 100000) { + const HybridBayesNet &hbn, + const HybridGaussianConditional::shared_ptr &hybridMotionModel, + const VectorValues &given, size_t N = 100000) { /// Create importance sampling network q(x0,x1,m) = p(x1|x0,m1) q(x0) P(m1), /// using q(x0) = N(z0, sigmaQ) to sample x0. HybridBayesNet q; q.push_back(hybridMotionModel); // Add hybrid motion model q.emplace_shared(GaussianConditional::FromMeanAndStddev( - X(0), given.at(Z(0)), /* sigmaQ = */ 3.0)); // Add proposal q(x0) for x0 + X(0), given.at(Z(0)), /* sigmaQ = */ 3.0)); // Add proposal q(x0) for x0 q.emplace_shared(m1, "50/50"); // Discrete prior. // Do importance sampling @@ -195,16 +194,20 @@ TEST(HybridGaussianFactor, TwoStateModel2) { HybridBayesNet::shared_ptr eliminated = gfg.eliminateSequential(); - for (VectorValues vv : {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, - VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { + for (VectorValues vv : + {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, + VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { vv.insert(given); // add measurements for HBN const auto& expectedDiscretePosterior = hbn.discretePosterior(vv); - // Equality of posteriors asserts that the factor graph is correct (same ratios for all modes) - EXPECT(assert_equal(expectedDiscretePosterior, gfg.discretePosterior(vv))); + // Equality of posteriors asserts that the factor graph is correct (same + // ratios for all modes) + EXPECT( + assert_equal(expectedDiscretePosterior, gfg.discretePosterior(vv))); // This one asserts that HBN resulting from elimination is correct. - EXPECT(assert_equal(expectedDiscretePosterior, eliminated->discretePosterior(vv))); + EXPECT(assert_equal(expectedDiscretePosterior, + eliminated->discretePosterior(vv))); } // Importance sampling run with 100k samples gives 50.095/49.905 @@ -227,16 +230,20 @@ TEST(HybridGaussianFactor, TwoStateModel2) { // Check that ratio of Bayes net and factor graph for different modes is // equal for several values of {x0,x1}. - for (VectorValues vv : {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, - VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { + for (VectorValues vv : + {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, + VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { vv.insert(given); // add measurements for HBN const auto& expectedDiscretePosterior = hbn.discretePosterior(vv); - // Equality of posteriors asserts that the factor graph is correct (same ratios for all modes) - EXPECT(assert_equal(expectedDiscretePosterior, gfg.discretePosterior(vv))); + // Equality of posteriors asserts that the factor graph is correct (same + // ratios for all modes) + EXPECT( + assert_equal(expectedDiscretePosterior, gfg.discretePosterior(vv))); // This one asserts that HBN resulting from elimination is correct. - EXPECT(assert_equal(expectedDiscretePosterior, eliminated->discretePosterior(vv))); + EXPECT(assert_equal(expectedDiscretePosterior, + eliminated->discretePosterior(vv))); } // Values taken from an importance sampling run with 100k samples: @@ -290,11 +297,13 @@ TEST(HybridGaussianFactor, TwoStateModel3) { // Check that ratio of Bayes net and factor graph for different modes is // equal for several values of {x0,x1}. - for (VectorValues vv : {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, - VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { + for (VectorValues vv : + {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, + VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { vv.insert(given); // add measurements for HBN HybridValues hv0(vv, {{M(1), 0}}), hv1(vv, {{M(1), 1}}); - EXPECT_DOUBLES_EQUAL(gfg.error(hv0) / hbn.error(hv0), gfg.error(hv1) / hbn.error(hv1), 1e-9); + EXPECT_DOUBLES_EQUAL(gfg.error(hv0) / hbn.error(hv0), + gfg.error(hv1) / hbn.error(hv1), 1e-9); } HybridBayesNet::shared_ptr bn = gfg.eliminateSequential(); @@ -318,11 +327,13 @@ TEST(HybridGaussianFactor, TwoStateModel3) { // Check that ratio of Bayes net and factor graph for different modes is // equal for several values of {x0,x1}. - for (VectorValues vv : {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, - VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { + for (VectorValues vv : + {VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(1.0)}}, + VectorValues{{X(0), Vector1(0.5)}, {X(1), Vector1(3.0)}}}) { vv.insert(given); // add measurements for HBN HybridValues hv0(vv, {{M(1), 0}}), hv1(vv, {{M(1), 1}}); - EXPECT_DOUBLES_EQUAL(gfg.error(hv0) / hbn.error(hv0), gfg.error(hv1) / hbn.error(hv1), 1e-9); + EXPECT_DOUBLES_EQUAL(gfg.error(hv0) / hbn.error(hv0), + gfg.error(hv1) / hbn.error(hv1), 1e-9); } HybridBayesNet::shared_ptr bn = gfg.eliminateSequential(); diff --git a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp index 5f939e589..b5486c6cd 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp @@ -34,7 +34,6 @@ #include #include "Switching.h" -#include "Test.h" // Include for test suite #include From a3087ffff791cfc68e16a105c3ce6608d34320ba Mon Sep 17 00:00:00 2001 From: Dan McGann Date: Tue, 8 Oct 2024 09:57:39 -0400 Subject: [PATCH 064/204] Fix machine precision bug in DogLeg compute blend This commit fixes a bug that could cause the incorrect solution to be returned from ComputeBlend that is documented in Issue #1861. --- gtsam/nonlinear/DoglegOptimizerImpl.cpp | 8 +++++--- tests/testDoglegOptimizer.cpp | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/gtsam/nonlinear/DoglegOptimizerImpl.cpp b/gtsam/nonlinear/DoglegOptimizerImpl.cpp index 7e9db6b64..4f1c6fb54 100644 --- a/gtsam/nonlinear/DoglegOptimizerImpl.cpp +++ b/gtsam/nonlinear/DoglegOptimizerImpl.cpp @@ -67,12 +67,14 @@ VectorValues DoglegOptimizerImpl::ComputeBlend(double delta, const VectorValues& double tau1 = (-b + sqrt_b_m4ac) / (2.*a); double tau2 = (-b - sqrt_b_m4ac) / (2.*a); + // Determine correct solution accounting for machine precision double tau; - if(0.0 <= tau1 && tau1 <= 1.0) { - assert(!(0.0 <= tau2 && tau2 <= 1.0)); + const double eps = std::numeric_limits::epsilon(); + if(-eps <= tau1 && tau1 <= 1.0 + eps) { + assert(!(-eps <= tau2 && tau2 <= 1.0 + eps)); tau = tau1; } else { - assert(0.0 <= tau2 && tau2 <= 1.0); + assert(-eps <= tau2 && tau2 <= 1.0 + eps); tau = tau2; } diff --git a/tests/testDoglegOptimizer.cpp b/tests/testDoglegOptimizer.cpp index 6fbc522d3..870ad5fb8 100644 --- a/tests/testDoglegOptimizer.cpp +++ b/tests/testDoglegOptimizer.cpp @@ -72,6 +72,24 @@ TEST(DoglegOptimizer, ComputeBlend) { DOUBLES_EQUAL(Delta, xb.vector().norm(), 1e-10); } +/* ************************************************************************* */ +TEST(DoglegOptimizer, ComputeBlendEdgeCases) { + // Test Derived from Issue #1861 + // Evaluate ComputeBlend Behavior for edge cases where the trust region + // is equal in size to that of the newton step or the gradient step. + + // Simulated Newton (n) and Gradient Descent (u) step vectors w/ ||n|| > ||u|| + VectorValues::Dims dims; + dims[0] = 3; + VectorValues n(Vector3(0.3233546123, -0.2133456123, 0.3664345632), dims); + VectorValues u(Vector3(0.0023456342, -0.04535687, 0.087345661212), dims); + + // Test upper edge case where trust region is equal to magnitude of newton step + EXPECT(assert_equal(n, DoglegOptimizerImpl::ComputeBlend(n.norm(), u, n, false))); + // Test lower edge case where trust region is equal to magnitude of gradient step + EXPECT(assert_equal(u, DoglegOptimizerImpl::ComputeBlend(u.norm(), u, n, false))); +} + /* ************************************************************************* */ TEST(DoglegOptimizer, ComputeDoglegPoint) { // Create an arbitrary Bayes Net From 8e85b68863ffdb451d7a729ff4d1bd89d2f4af32 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 11:48:20 -0400 Subject: [PATCH 065/204] formatting fixes --- gtsam/hybrid/HybridGaussianConditional.cpp | 8 ++--- gtsam/hybrid/HybridGaussianConditional.h | 2 +- gtsam/hybrid/HybridGaussianFactor.cpp | 29 ++++++++--------- gtsam/hybrid/HybridGaussianFactor.h | 17 +++++----- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 38 ++++++++++++---------- gtsam/hybrid/HybridGaussianFactorGraph.h | 6 ++-- 6 files changed, 50 insertions(+), 50 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index 4dcb84034..d6ba2dbb6 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -79,7 +79,7 @@ struct HybridGaussianConditional::Helper { explicit Helper(const Conditionals &conditionals) : conditionals(conditionals), minNegLogConstant(std::numeric_limits::infinity()) { - auto func = [this](const GC::shared_ptr& gc) -> GaussianFactorValuePair { + auto func = [this](const GC::shared_ptr &gc) -> GaussianFactorValuePair { if (!gc) return {nullptr, std::numeric_limits::infinity()}; if (!nrFrontals) nrFrontals = gc->nrFrontals(); double value = gc->negLogConstant(); @@ -97,10 +97,10 @@ struct HybridGaussianConditional::Helper { /* *******************************************************************************/ HybridGaussianConditional::HybridGaussianConditional( - const DiscreteKeys& discreteParents, const Helper& helper) + const DiscreteKeys &discreteParents, const Helper &helper) : BaseFactor(discreteParents, FactorValuePairs(helper.pairs, - [&](const GaussianFactorValuePair& + [&](const GaussianFactorValuePair & pair) { // subtract minNegLogConstant return GaussianFactorValuePair{ pair.first, @@ -225,7 +225,7 @@ KeyVector HybridGaussianConditional::continuousParents() const { // remove that key from continuousParentKeys: continuousParentKeys.erase(std::remove(continuousParentKeys.begin(), continuousParentKeys.end(), key), - continuousParentKeys.end()); + continuousParentKeys.end()); } return continuousParentKeys; } diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index 25391cb83..75138b0dc 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -24,9 +24,9 @@ #include #include #include -#include #include #include +#include #include #include diff --git a/gtsam/hybrid/HybridGaussianFactor.cpp b/gtsam/hybrid/HybridGaussianFactor.cpp index afc818c4a..8d03f18d0 100644 --- a/gtsam/hybrid/HybridGaussianFactor.cpp +++ b/gtsam/hybrid/HybridGaussianFactor.cpp @@ -93,27 +93,27 @@ struct HybridGaussianFactor::ConstructorHelper { }; /* *******************************************************************************/ -HybridGaussianFactor::HybridGaussianFactor(const ConstructorHelper &helper) +HybridGaussianFactor::HybridGaussianFactor(const ConstructorHelper& helper) : Base(helper.continuousKeys, helper.discreteKeys), factors_(helper.pairs) {} HybridGaussianFactor::HybridGaussianFactor( - const DiscreteKey &discreteKey, - const std::vector &factors) + const DiscreteKey& discreteKey, + const std::vector& factors) : HybridGaussianFactor(ConstructorHelper(discreteKey, factors)) {} HybridGaussianFactor::HybridGaussianFactor( - const DiscreteKey &discreteKey, - const std::vector &factorPairs) + const DiscreteKey& discreteKey, + const std::vector& factorPairs) : HybridGaussianFactor(ConstructorHelper(discreteKey, factorPairs)) {} -HybridGaussianFactor::HybridGaussianFactor(const DiscreteKeys &discreteKeys, - const FactorValuePairs &factors) +HybridGaussianFactor::HybridGaussianFactor(const DiscreteKeys& discreteKeys, + const FactorValuePairs& factors) : HybridGaussianFactor(ConstructorHelper(discreteKeys, factors)) {} /* *******************************************************************************/ -bool HybridGaussianFactor::equals(const HybridFactor &lf, double tol) const { - const This *e = dynamic_cast(&lf); +bool HybridGaussianFactor::equals(const HybridFactor& lf, double tol) const { + const This* e = dynamic_cast(&lf); if (e == nullptr) return false; // This will return false if either factors_ is empty or e->factors_ is @@ -130,8 +130,8 @@ bool HybridGaussianFactor::equals(const HybridFactor &lf, double tol) const { } /* *******************************************************************************/ -void HybridGaussianFactor::print(const std::string &s, - const KeyFormatter &formatter) const { +void HybridGaussianFactor::print(const std::string& s, + const KeyFormatter& formatter) const { std::cout << (s.empty() ? "" : s + "\n"); HybridFactor::print("", formatter); std::cout << "{\n"; @@ -139,8 +139,7 @@ void HybridGaussianFactor::print(const std::string &s, std::cout << " empty" << std::endl; } else { factors_.print( - "", - [&](Key k) { return formatter(k); }, + "", [&](Key k) { return formatter(k); }, [&](const auto& pair) -> std::string { RedirectCout rd; std::cout << ":\n"; @@ -158,7 +157,7 @@ void HybridGaussianFactor::print(const std::string &s, /* *******************************************************************************/ GaussianFactorValuePair HybridGaussianFactor::operator()( - const DiscreteValues &assignment) const { + const DiscreteValues& assignment) const { return factors_(assignment); } @@ -176,7 +175,7 @@ HybridGaussianProductFactor HybridGaussianFactor::asProductFactor() const { /* *******************************************************************************/ AlgebraicDecisionTree HybridGaussianFactor::errorTree( - const VectorValues &continuousValues) const { + const VectorValues& continuousValues) const { // functor to convert from sharedFactor to double error value. auto errorFunc = [&continuousValues](const auto& pair) { return pair.first ? pair.first->error(continuousValues) + pair.second diff --git a/gtsam/hybrid/HybridGaussianFactor.h b/gtsam/hybrid/HybridGaussianFactor.h index 48a8bf48d..6c06c1c0a 100644 --- a/gtsam/hybrid/HybridGaussianFactor.h +++ b/gtsam/hybrid/HybridGaussianFactor.h @@ -58,7 +58,7 @@ using GaussianFactorValuePair = std::pair; * @ingroup hybrid */ class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor { -public: + public: using Base = HybridFactor; using This = HybridGaussianFactor; using shared_ptr = std::shared_ptr; @@ -68,11 +68,11 @@ public: /// typedef for Decision Tree of Gaussian factors and arbitrary value. using FactorValuePairs = DecisionTree; -private: + private: /// Decision tree of Gaussian factors indexed by discrete keys. FactorValuePairs factors_; -public: + public: /// @name Constructors /// @{ @@ -120,9 +120,8 @@ public: bool equals(const HybridFactor &lf, double tol = 1e-9) const override; - void - print(const std::string &s = "", - const KeyFormatter &formatter = DefaultKeyFormatter) const override; + void print(const std::string &s = "", const KeyFormatter &formatter = + DefaultKeyFormatter) const override; /// @} /// @name Standard API @@ -138,8 +137,8 @@ public: * @return AlgebraicDecisionTree A decision tree with the same keys * as the factors involved, and leaf values as the error. */ - AlgebraicDecisionTree - errorTree(const VectorValues &continuousValues) const override; + AlgebraicDecisionTree errorTree( + const VectorValues &continuousValues) const override; /** * @brief Compute the log-likelihood, including the log-normalizing constant. @@ -159,7 +158,7 @@ public: /// @} -private: + private: /** * @brief Helper function to augment the [A|b] matrices in the factor * components with the additional scalar values. This is done by storing the diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 26cdb8dd2..62fbb2703 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -39,7 +40,6 @@ #include #include #include -#include "gtsam/discrete/DiscreteValues.h" #include #include @@ -63,9 +63,9 @@ static const VectorValues kEmpty; /* ************************************************************************ */ // Throw a runtime exception for method specified in string s, and factor f: -static void throwRuntimeError(const std::string& s, - const std::shared_ptr& f) { - auto& fr = *f; +static void throwRuntimeError(const std::string &s, + const std::shared_ptr &f) { + auto &fr = *f; throw std::runtime_error(s + " not implemented for factor type " + demangle(typeid(fr).name()) + "."); } @@ -83,11 +83,12 @@ static void printFactor(const std::shared_ptr &factor, const DiscreteValues &assignment, const KeyFormatter &keyFormatter) { if (auto hgf = dynamic_pointer_cast(factor)) { - if (assignment.empty()) + if (assignment.empty()) { hgf->print("HybridGaussianFactor:", keyFormatter); - else + } else { hgf->operator()(assignment) .first->print("HybridGaussianFactor, component:", keyFormatter); + } } else if (auto gf = dynamic_pointer_cast(factor)) { factor->print("GaussianFactor:\n", keyFormatter); @@ -99,12 +100,13 @@ static void printFactor(const std::shared_ptr &factor, } else if (hc->isDiscrete()) { factor->print("DiscreteConditional:\n", keyFormatter); } else { - if (assignment.empty()) + if (assignment.empty()) { hc->print("HybridConditional:", keyFormatter); - else + } else { hc->asHybrid() ->choose(assignment) ->print("HybridConditional, component:\n", keyFormatter); + } } } else { factor->print("Unknown factor type\n", keyFormatter); @@ -112,13 +114,13 @@ static void printFactor(const std::shared_ptr &factor, } /* ************************************************************************ */ -void HybridGaussianFactorGraph::print(const std::string& s, - const KeyFormatter& keyFormatter) const { +void HybridGaussianFactorGraph::print(const std::string &s, + const KeyFormatter &keyFormatter) const { std::cout << (s.empty() ? "" : s + " ") << std::endl; std::cout << "size: " << size() << std::endl; for (size_t i = 0; i < factors_.size(); i++) { - auto&& factor = factors_[i]; + auto &&factor = factors_[i]; if (factor == nullptr) { std::cout << "Factor " << i << ": nullptr\n"; continue; @@ -163,7 +165,7 @@ HybridGaussianProductFactor HybridGaussianFactorGraph::collectProductFactor() const { HybridGaussianProductFactor result; - for (auto& f : factors_) { + for (auto &f : factors_) { // TODO(dellaert): can we make this cleaner and less error-prone? if (auto orphan = dynamic_pointer_cast(f)) { continue; // Ignore OrphanWrapper @@ -235,7 +237,7 @@ discreteElimination(const HybridGaussianFactorGraph &factors, } else if (auto gmf = dynamic_pointer_cast(f)) { // Case where we have a HybridGaussianFactor with no continuous keys. // In this case, compute discrete probabilities. - auto potential = [&](const auto& pair) -> double { + auto potential = [&](const auto &pair) -> double { auto [factor, scalar] = pair; // If factor is null, it has been pruned, hence return potential zero if (!factor) return 0.0; @@ -270,10 +272,10 @@ discreteElimination(const HybridGaussianFactorGraph &factors, * depends on the discrete separator if present. */ static std::shared_ptr createDiscreteFactor( - const ResultTree& eliminationResults, + const ResultTree &eliminationResults, const DiscreteKeys &discreteSeparator) { auto potential = [&](const auto &pair) -> double { - const auto& [conditional, factor] = pair.first; + const auto &[conditional, factor] = pair.first; const double scalar = pair.second; if (conditional && factor) { // `error` has the following contributions: @@ -350,9 +352,9 @@ HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { // This is the elimination method on the leaf nodes bool someContinuousLeft = false; - auto eliminate = [&](const std::pair& pair) + auto eliminate = [&](const std::pair &pair) -> std::pair { - const auto& [graph, scalar] = pair; + const auto &[graph, scalar] = pair; if (graph.empty()) { return {{nullptr, nullptr}, 0.0}; @@ -382,7 +384,7 @@ HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { // Create the HybridGaussianConditional from the conditionals HybridGaussianConditional::Conditionals conditionals( - eliminationResults, [](const auto& pair) { return pair.first.first; }); + eliminationResults, [](const auto &pair) { return pair.first.first; }); auto hybridGaussian = std::make_shared( discreteSeparator, conditionals); diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.h b/gtsam/hybrid/HybridGaussianFactorGraph.h index d72e81489..c503118eb 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.h +++ b/gtsam/hybrid/HybridGaussianFactorGraph.h @@ -145,9 +145,9 @@ class GTSAM_EXPORT HybridGaussianFactorGraph /// @name Testable /// @{ - void - print(const std::string &s = "HybridGaussianFactorGraph", - const KeyFormatter &keyFormatter = DefaultKeyFormatter) const override; + void print( + const std::string& s = "HybridGaussianFactorGraph", + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override; /** * @brief Print the errors of each factor in the hybrid factor graph. From f39f678c14a0855c96a1302d142a7ad1906556a4 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 12:02:42 -0400 Subject: [PATCH 066/204] add type info --- gtsam/hybrid/HybridGaussianConditional.cpp | 10 ++++++---- gtsam/hybrid/HybridGaussianFactor.cpp | 16 +++++++++------- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 8 +++++--- 3 files changed, 20 insertions(+), 14 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index d6ba2dbb6..2a0b5a875 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -183,10 +183,12 @@ bool HybridGaussianConditional::equals(const HybridFactor &lf, // Check the base and the factors: return BaseFactor::equals(*e, tol) && - conditionals_.equals( - e->conditionals_, [tol](const auto &f1, const auto &f2) { - return (!f1 && !f2) || (f1 && f2 && f1->equals(*f2, tol)); - }); + conditionals_.equals(e->conditionals_, + [tol](const GaussianConditional::shared_ptr &f1, + const GaussianConditional::shared_ptr &f2) { + return (!f1 && !f2) || + (f1 && f2 && f1->equals(*f2, tol)); + }); } /* *******************************************************************************/ diff --git a/gtsam/hybrid/HybridGaussianFactor.cpp b/gtsam/hybrid/HybridGaussianFactor.cpp index 8d03f18d0..e24545293 100644 --- a/gtsam/hybrid/HybridGaussianFactor.cpp +++ b/gtsam/hybrid/HybridGaussianFactor.cpp @@ -51,7 +51,7 @@ struct HybridGaussianFactor::ConstructorHelper { // Build the FactorValuePairs DecisionTree pairs = FactorValuePairs( DecisionTree(discreteKeys, factors), - [](const auto& f) { + [](const sharedFactor& f) { return std::pair{f, f ? 0.0 : std::numeric_limits::infinity()}; }); @@ -63,7 +63,7 @@ struct HybridGaussianFactor::ConstructorHelper { const std::vector& factorPairs) : discreteKeys({discreteKey}) { // Extract continuous keys from the first non-null factor - for (const auto& pair : factorPairs) { + for (const GaussianFactorValuePair& pair : factorPairs) { if (pair.first && continuousKeys.empty()) { continuousKeys = pair.first->keys(); break; @@ -121,7 +121,8 @@ bool HybridGaussianFactor::equals(const HybridFactor& lf, double tol) const { if (factors_.empty() ^ e->factors_.empty()) return false; // Check the base and the factors: - auto compareFunc = [tol](const auto& pair1, const auto& pair2) { + auto compareFunc = [tol](const GaussianFactorValuePair& pair1, + const GaussianFactorValuePair& pair2) { auto f1 = pair1.first, f2 = pair2.first; bool match = (!f1 && !f2) || (f1 && f2 && f1->equals(*f2, tol)); return match && gtsam::equal(pair1.second, pair2.second, tol); @@ -140,7 +141,7 @@ void HybridGaussianFactor::print(const std::string& s, } else { factors_.print( "", [&](Key k) { return formatter(k); }, - [&](const auto& pair) -> std::string { + [&](const GaussianFactorValuePair& pair) -> std::string { RedirectCout rd; std::cout << ":\n"; if (pair.first) { @@ -168,7 +169,8 @@ HybridGaussianProductFactor HybridGaussianFactor::asProductFactor() const { // - Each leaf converted to a GaussianFactorGraph with just the factor and its // scalar. return {{factors_, - [](const auto& pair) -> std::pair { + [](const GaussianFactorValuePair& pair) + -> std::pair { return {GaussianFactorGraph{pair.first}, pair.second}; }}}; } @@ -177,7 +179,7 @@ HybridGaussianProductFactor HybridGaussianFactor::asProductFactor() const { AlgebraicDecisionTree HybridGaussianFactor::errorTree( const VectorValues& continuousValues) const { // functor to convert from sharedFactor to double error value. - auto errorFunc = [&continuousValues](const auto& pair) { + auto errorFunc = [&continuousValues](const GaussianFactorValuePair& pair) { return pair.first ? pair.first->error(continuousValues) + pair.second : std::numeric_limits::infinity(); }; @@ -188,7 +190,7 @@ AlgebraicDecisionTree HybridGaussianFactor::errorTree( /* *******************************************************************************/ double HybridGaussianFactor::error(const HybridValues& values) const { // Directly index to get the component, no need to build the whole tree. - const auto pair = factors_(values.discrete()); + const GaussianFactorValuePair pair = factors_(values.discrete()); return pair.first ? pair.first->error(values.continuous()) + pair.second : std::numeric_limits::infinity(); } diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 62fbb2703..167e53656 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -57,7 +57,8 @@ using std::dynamic_pointer_cast; using OrphanWrapper = BayesTreeOrphanWrapper; using Result = std::pair, GaussianFactor::shared_ptr>; -using ResultTree = DecisionTree>; +using ResultValuePair = std::pair; +using ResultTree = DecisionTree; static const VectorValues kEmpty; @@ -305,7 +306,7 @@ static std::shared_ptr createHybridGaussianFactor( const ResultTree &eliminationResults, const DiscreteKeys &discreteSeparator) { // Correct for the normalization constant used up by the conditional - auto correct = [&](const auto &pair) -> GaussianFactorValuePair { + auto correct = [&](const ResultValuePair &pair) -> GaussianFactorValuePair { const auto &[conditional, factor] = pair.first; const double scalar = pair.second; if (conditional && factor) { @@ -384,7 +385,8 @@ HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { // Create the HybridGaussianConditional from the conditionals HybridGaussianConditional::Conditionals conditionals( - eliminationResults, [](const auto &pair) { return pair.first.first; }); + eliminationResults, + [](const ResultValuePair &pair) { return pair.first.first; }); auto hybridGaussian = std::make_shared( discreteSeparator, conditionals); From 7603cd4871b9fbd813bf0c626974b1f772bd3d02 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 12:03:01 -0400 Subject: [PATCH 067/204] missing include and formatting --- gtsam/hybrid/tests/testHybridBayesTree.cpp | 29 +++++++++------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/gtsam/hybrid/tests/testHybridBayesTree.cpp b/gtsam/hybrid/tests/testHybridBayesTree.cpp index 319c6d52b..6da991173 100644 --- a/gtsam/hybrid/tests/testHybridBayesTree.cpp +++ b/gtsam/hybrid/tests/testHybridBayesTree.cpp @@ -22,6 +22,8 @@ #include #include +#include + #include "Switching.h" // Include for test suite @@ -62,7 +64,8 @@ std::vector components(Key key) { } // namespace two /* ************************************************************************* */ -TEST(HybridGaussianFactorGraph, eliminateFullMultifrontalSimple) { +TEST(HybridGaussianFactorGraph, + HybridGaussianFactorGraphEliminateFullMultifrontalSimple) { HybridGaussianFactorGraph hfg; hfg.add(JacobianFactor(X(0), I_3x3, Z_3x1)); @@ -179,10 +182,8 @@ TEST(HybridGaussianFactorGraph, Switching) { std::vector naturalX(N); std::iota(naturalX.begin(), naturalX.end(), 1); std::vector ordX; - std::transform( - naturalX.begin(), naturalX.end(), std::back_inserter(ordX), [](int x) { - return X(x); - }); + std::transform(naturalX.begin(), naturalX.end(), std::back_inserter(ordX), + [](int x) { return X(x); }); auto [ndX, lvls] = makeBinaryOrdering(ordX); std::copy(ndX.begin(), ndX.end(), std::back_inserter(ordering)); @@ -195,10 +196,8 @@ TEST(HybridGaussianFactorGraph, Switching) { std::vector naturalC(N - 1); std::iota(naturalC.begin(), naturalC.end(), 1); std::vector ordC; - std::transform( - naturalC.begin(), naturalC.end(), std::back_inserter(ordC), [](int x) { - return M(x); - }); + std::transform(naturalC.begin(), naturalC.end(), std::back_inserter(ordC), + [](int x) { return M(x); }); // std::copy(ordC.begin(), ordC.end(), std::back_inserter(ordering)); const auto [ndC, lvls] = makeBinaryOrdering(ordC); @@ -237,10 +236,8 @@ TEST(HybridGaussianFactorGraph, SwitchingISAM) { std::vector naturalX(N); std::iota(naturalX.begin(), naturalX.end(), 1); std::vector ordX; - std::transform( - naturalX.begin(), naturalX.end(), std::back_inserter(ordX), [](int x) { - return X(x); - }); + std::transform(naturalX.begin(), naturalX.end(), std::back_inserter(ordX), + [](int x) { return X(x); }); auto [ndX, lvls] = makeBinaryOrdering(ordX); std::copy(ndX.begin(), ndX.end(), std::back_inserter(ordering)); @@ -253,10 +250,8 @@ TEST(HybridGaussianFactorGraph, SwitchingISAM) { std::vector naturalC(N - 1); std::iota(naturalC.begin(), naturalC.end(), 1); std::vector ordC; - std::transform( - naturalC.begin(), naturalC.end(), std::back_inserter(ordC), [](int x) { - return M(x); - }); + std::transform(naturalC.begin(), naturalC.end(), std::back_inserter(ordC), + [](int x) { return M(x); }); // std::copy(ordC.begin(), ordC.end(), std::back_inserter(ordering)); const auto [ndC, lvls] = makeBinaryOrdering(ordC); From 874ba67693928a91b81597a8bf21872bcc118110 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 12:03:16 -0400 Subject: [PATCH 068/204] update comment --- gtsam/hybrid/tests/testHybridBayesNet.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/hybrid/tests/testHybridBayesNet.cpp b/gtsam/hybrid/tests/testHybridBayesNet.cpp index d4192cb71..ad4e4e7a8 100644 --- a/gtsam/hybrid/tests/testHybridBayesNet.cpp +++ b/gtsam/hybrid/tests/testHybridBayesNet.cpp @@ -199,7 +199,7 @@ TEST(HybridBayesNet, Tiny) { factors_x0.push_back(fg.at(1)); auto productFactor = factors_x0.collectProductFactor(); - // Check that scalars are 0 and 1.79 + // Check that scalars are 0 and 1.79 (regression) EXPECT_DOUBLES_EQUAL(0.0, productFactor({{M(0), 0}}).second, 1e-9); EXPECT_DOUBLES_EQUAL(1.791759, productFactor({{M(0), 1}}).second, 1e-5); From 21b4c4c8d30ab75b4e8004c767b8cd411bbf35b2 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 12:03:35 -0400 Subject: [PATCH 069/204] improve HybridGaussianProductFactor --- gtsam/hybrid/HybridGaussianProductFactor.cpp | 19 +++++++++++++++++-- gtsam/hybrid/HybridGaussianProductFactor.h | 7 +------ 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianProductFactor.cpp b/gtsam/hybrid/HybridGaussianProductFactor.cpp index 2e95ea8d1..375349f9b 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.cpp +++ b/gtsam/hybrid/HybridGaussianProductFactor.cpp @@ -26,39 +26,46 @@ namespace gtsam { using Y = HybridGaussianProductFactor::Y; +/* *******************************************************************************/ static Y add(const Y& y1, const Y& y2) { GaussianFactorGraph result = y1.first; result.push_back(y2.first); return {result, y1.second + y2.second}; }; +/* *******************************************************************************/ HybridGaussianProductFactor operator+(const HybridGaussianProductFactor& a, const HybridGaussianProductFactor& b) { return a.empty() ? b : HybridGaussianProductFactor(a.apply(b, add)); } +/* *******************************************************************************/ HybridGaussianProductFactor HybridGaussianProductFactor::operator+( const HybridGaussianFactor& factor) const { return *this + factor.asProductFactor(); } +/* *******************************************************************************/ HybridGaussianProductFactor HybridGaussianProductFactor::operator+( const GaussianFactor::shared_ptr& factor) const { return *this + HybridGaussianProductFactor(factor); } +/* *******************************************************************************/ HybridGaussianProductFactor& HybridGaussianProductFactor::operator+=( const GaussianFactor::shared_ptr& factor) { *this = *this + factor; return *this; } +/* *******************************************************************************/ HybridGaussianProductFactor& HybridGaussianProductFactor::operator+=( const HybridGaussianFactor& factor) { *this = *this + factor; return *this; } +/* *******************************************************************************/ void HybridGaussianProductFactor::print(const std::string& s, const KeyFormatter& formatter) const { KeySet keys; @@ -76,11 +83,19 @@ void HybridGaussianProductFactor::print(const std::string& s, } } +/* *******************************************************************************/ +bool HybridGaussianProductFactor::equals( + const HybridGaussianProductFactor& other, double tol) const { + return Base::equals(other, [tol](const Y& a, const Y& b) { + return a.first.equals(b.first, tol) && std::abs(a.second - b.second) < tol; + }); +} + +/* *******************************************************************************/ HybridGaussianProductFactor HybridGaussianProductFactor::removeEmpty() const { auto emptyGaussian = [](const Y& y) { bool hasNull = - std::any_of(y.first.begin(), - y.first.end(), + std::any_of(y.first.begin(), y.first.end(), [](const GaussianFactor::shared_ptr& ptr) { return !ptr; }); return hasNull ? Y{GaussianFactorGraph(), 0.0} : y; }; diff --git a/gtsam/hybrid/HybridGaussianProductFactor.h b/gtsam/hybrid/HybridGaussianProductFactor.h index 9c2aee74a..3658a0991 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.h +++ b/gtsam/hybrid/HybridGaussianProductFactor.h @@ -94,12 +94,7 @@ class HybridGaussianProductFactor * @return true if equal, false otherwise */ bool equals(const HybridGaussianProductFactor& other, - double tol = 1e-9) const { - return Base::equals(other, [tol](const Y& a, const Y& b) { - return a.first.equals(b.first, tol) && - std::abs(a.second - b.second) < tol; - }); - } + double tol = 1e-9) const; /// @} From 8b8466e04632dc62f49573608b21f2b8974c35f2 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 15:05:05 -0400 Subject: [PATCH 070/204] formatting testHybridGaussianFactorGraph --- .../tests/testHybridGaussianFactorGraph.cpp | 39 +++++++++---------- 1 file changed, 18 insertions(+), 21 deletions(-) diff --git a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp index 9fdc1aaea..980ae0bab 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp @@ -17,6 +17,8 @@ * @author Frank Dellaert */ +#include +#include #include #include #include @@ -37,9 +39,6 @@ #include #include -#include -#include - #include #include #include @@ -73,8 +72,8 @@ TEST(HybridGaussianFactorGraph, Creation) { HybridGaussianConditional gm( m0, {std::make_shared(X(0), Z_3x1, I_3x3, X(1), I_3x3), - std::make_shared( - X(0), Vector3::Ones(), I_3x3, X(1), I_3x3)}); + std::make_shared(X(0), Vector3::Ones(), I_3x3, X(1), + I_3x3)}); hfg.add(gm); EXPECT_LONGS_EQUAL(2, hfg.size()); @@ -118,8 +117,8 @@ TEST(HybridGaussianFactorGraph, hybridEliminationOneFactor) { auto factor = std::dynamic_pointer_cast(result.second); CHECK(factor); // regression test - EXPECT( - assert_equal(DecisionTreeFactor{m1, "15.74961 15.74961"}, *factor, 1e-5)); + // Originally 15.74961, which is normalized to 1 + EXPECT(assert_equal(DecisionTreeFactor{m1, "1 1"}, *factor, 1e-5)); } /* ************************************************************************* */ @@ -177,7 +176,7 @@ TEST(HybridBayesNet, Switching) { Switching s(2, betweenSigma, priorSigma); // Check size of linearized factor graph - const HybridGaussianFactorGraph& graph = s.linearizedFactorGraph; + const HybridGaussianFactorGraph &graph = s.linearizedFactorGraph; EXPECT_LONGS_EQUAL(4, graph.size()); // Create some continuous and discrete values @@ -203,20 +202,20 @@ TEST(HybridBayesNet, Switching) { // Check error for M(0) = 0 const HybridValues values0{continuousValues, modeZero}; double expectedError0 = 0; - for (const auto& factor : graph) expectedError0 += factor->error(values0); + for (const auto &factor : graph) expectedError0 += factor->error(values0); EXPECT_DOUBLES_EQUAL(expectedError0, graph.error(values0), 1e-5); // Check error for M(0) = 1 const HybridValues values1{continuousValues, modeOne}; double expectedError1 = 0; - for (const auto& factor : graph) expectedError1 += factor->error(values1); + for (const auto &factor : graph) expectedError1 += factor->error(values1); EXPECT_DOUBLES_EQUAL(expectedError1, graph.error(values1), 1e-5); // Check errorTree AlgebraicDecisionTree actualErrors = graph.errorTree(continuousValues); // Create expected error tree - const AlgebraicDecisionTree expectedErrors( - M(0), expectedError0, expectedError1); + const AlgebraicDecisionTree expectedErrors(M(0), expectedError0, + expectedError1); // Check that the actual error tree matches the expected one EXPECT(assert_equal(expectedErrors, actualErrors, 1e-5)); @@ -232,8 +231,8 @@ TEST(HybridBayesNet, Switching) { const AlgebraicDecisionTree graphPosterior = graph.discretePosterior(continuousValues); const double sum = probPrime0 + probPrime1; - const AlgebraicDecisionTree expectedPosterior( - M(0), probPrime0 / sum, probPrime1 / sum); + const AlgebraicDecisionTree expectedPosterior(M(0), probPrime0 / sum, + probPrime1 / sum); EXPECT(assert_equal(expectedPosterior, graphPosterior, 1e-5)); // Make the clique of factors connected to x0: @@ -275,15 +274,13 @@ TEST(HybridBayesNet, Switching) { // Check that the scalars incorporate the negative log constant of the // conditional EXPECT_DOUBLES_EQUAL(scalar0 - (*p_x0_given_x1_m)(modeZero)->negLogConstant(), - (*phi_x1_m)(modeZero).second, - 1e-9); + (*phi_x1_m)(modeZero).second, 1e-9); EXPECT_DOUBLES_EQUAL(scalar1 - (*p_x0_given_x1_m)(modeOne)->negLogConstant(), - (*phi_x1_m)(modeOne).second, - 1e-9); + (*phi_x1_m)(modeOne).second, 1e-9); // Check that the conditional and remaining factor are consistent for both // modes - for (auto&& mode : {modeZero, modeOne}) { + for (auto &&mode : {modeZero, modeOne}) { const auto gc = (*p_x0_given_x1_m)(mode); const auto [gf, scalar] = (*phi_x1_m)(mode); @@ -342,7 +339,7 @@ TEST(HybridBayesNet, Switching) { // However, we can still check the total error for the clique factors_x1 and // the elimination results are equal, modulo -again- the negative log constant // of the conditional. - for (auto&& mode : {modeZero, modeOne}) { + for (auto &&mode : {modeZero, modeOne}) { auto gc_x1 = (*p_x1_given_m)(mode); double originalError_x1 = factors_x1.error({continuousValues, mode}); const double actualError = gc_x1->negLogConstant() + @@ -372,7 +369,7 @@ TEST(HybridGaussianFactorGraph, ErrorAndProbPrime) { Switching s(3); // Check size of linearized factor graph - const HybridGaussianFactorGraph& graph = s.linearizedFactorGraph; + const HybridGaussianFactorGraph &graph = s.linearizedFactorGraph; EXPECT_LONGS_EQUAL(7, graph.size()); // Eliminate the graph From 02d5421de2721e8ea8daf6aca45c8f955706aaca Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 15:12:04 -0400 Subject: [PATCH 071/204] add GTSAM_EXPORT to HybridGaussianProductFactor --- gtsam/hybrid/HybridGaussianProductFactor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/hybrid/HybridGaussianProductFactor.h b/gtsam/hybrid/HybridGaussianProductFactor.h index 3658a0991..b4c51dbd3 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.h +++ b/gtsam/hybrid/HybridGaussianProductFactor.h @@ -27,7 +27,7 @@ namespace gtsam { class HybridGaussianFactor; /// Alias for DecisionTree of GaussianFactorGraphs and their scalar sums -class HybridGaussianProductFactor +class GTSAM_EXPORT HybridGaussianProductFactor : public DecisionTree> { public: using Y = std::pair; From 711a07c01b71599f19213aeb9e4c0d79546b1848 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 16:49:07 -0400 Subject: [PATCH 072/204] small fix --- gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp index 980ae0bab..1aa4c8d49 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp @@ -117,8 +117,7 @@ TEST(HybridGaussianFactorGraph, hybridEliminationOneFactor) { auto factor = std::dynamic_pointer_cast(result.second); CHECK(factor); // regression test - // Originally 15.74961, which is normalized to 1 - EXPECT(assert_equal(DecisionTreeFactor{m1, "1 1"}, *factor, 1e-5)); + EXPECT(assert_equal(DecisionTreeFactor{m1, "15.74961 15.74961"}, *factor, 1e-5)); } /* ************************************************************************* */ From d58bd6cbafa20bb862fa264432379c1742cf914f Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 18:28:58 -0400 Subject: [PATCH 073/204] get HybridBayesNet compiling on Windows --- gtsam/hybrid/HybridBayesNet.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gtsam/hybrid/HybridBayesNet.h b/gtsam/hybrid/HybridBayesNet.h index bba301be2..f46ce5249 100644 --- a/gtsam/hybrid/HybridBayesNet.h +++ b/gtsam/hybrid/HybridBayesNet.h @@ -25,6 +25,8 @@ #include #include +#include + namespace gtsam { /** From 0dbf13f9621d6f83112ddf68f3fd8e40f191038e Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 18:41:52 -0400 Subject: [PATCH 074/204] Another Windows fix --- gtsam/hybrid/HybridConditional.h | 1 + 1 file changed, 1 insertion(+) diff --git a/gtsam/hybrid/HybridConditional.h b/gtsam/hybrid/HybridConditional.h index 588c44e0b..3921df170 100644 --- a/gtsam/hybrid/HybridConditional.h +++ b/gtsam/hybrid/HybridConditional.h @@ -25,6 +25,7 @@ #include #include +#include #include #include #include From c60d3420a3561490db2f44b497dc840c3db385c5 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 18:57:32 -0400 Subject: [PATCH 075/204] include in HybridConditional.cpp --- gtsam/hybrid/HybridConditional.cpp | 3 +++ gtsam/hybrid/HybridConditional.h | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/gtsam/hybrid/HybridConditional.cpp b/gtsam/hybrid/HybridConditional.cpp index 175aec30c..463f3e283 100644 --- a/gtsam/hybrid/HybridConditional.cpp +++ b/gtsam/hybrid/HybridConditional.cpp @@ -13,6 +13,7 @@ * @file HybridConditional.cpp * @date Mar 11, 2022 * @author Fan Jiang + * @author Varun Agrawal */ #include @@ -21,6 +22,8 @@ #include #include +#include + namespace gtsam { /* ************************************************************************ */ diff --git a/gtsam/hybrid/HybridConditional.h b/gtsam/hybrid/HybridConditional.h index 3921df170..f3df725ef 100644 --- a/gtsam/hybrid/HybridConditional.h +++ b/gtsam/hybrid/HybridConditional.h @@ -13,6 +13,7 @@ * @file HybridConditional.h * @date Mar 11, 2022 * @author Fan Jiang + * @author Varun Agrawal */ #pragma once @@ -25,7 +26,6 @@ #include #include -#include #include #include #include From c08d6bdbed6bcf032b08215a3ffe15492cfe0bc9 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 19:07:15 -0400 Subject: [PATCH 076/204] maybe it's --- gtsam/hybrid/HybridConditional.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/hybrid/HybridConditional.cpp b/gtsam/hybrid/HybridConditional.cpp index 463f3e283..f57237e93 100644 --- a/gtsam/hybrid/HybridConditional.cpp +++ b/gtsam/hybrid/HybridConditional.cpp @@ -22,7 +22,7 @@ #include #include -#include +#include namespace gtsam { From 6da1b019137e3634da33154abcf287ccb3e54961 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 19:25:38 -0400 Subject: [PATCH 077/204] include in HybridBayesTree.cpp --- gtsam/hybrid/HybridBayesTree.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/gtsam/hybrid/HybridBayesTree.cpp b/gtsam/hybrid/HybridBayesTree.cpp index 0766f452b..df4a49f60 100644 --- a/gtsam/hybrid/HybridBayesTree.cpp +++ b/gtsam/hybrid/HybridBayesTree.cpp @@ -27,6 +27,7 @@ #include #include +#include #include "gtsam/hybrid/HybridConditional.h" From 03c467fe112befd8465d4f696d65fb9e35c2f2c0 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 19:43:20 -0400 Subject: [PATCH 078/204] add more includes to try and debug this --- gtsam/hybrid/HybridConditional.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gtsam/hybrid/HybridConditional.cpp b/gtsam/hybrid/HybridConditional.cpp index f57237e93..2228b8cdc 100644 --- a/gtsam/hybrid/HybridConditional.cpp +++ b/gtsam/hybrid/HybridConditional.cpp @@ -22,6 +22,8 @@ #include #include +#include +#include #include namespace gtsam { From 8e29d57251aa0414030d0c2a19e18d03beaf3901 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 22:33:56 -0400 Subject: [PATCH 079/204] remove includes --- gtsam/hybrid/HybridBayesNet.h | 2 -- gtsam/hybrid/HybridBayesTree.cpp | 4 +--- gtsam/hybrid/HybridConditional.cpp | 3 --- 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/gtsam/hybrid/HybridBayesNet.h b/gtsam/hybrid/HybridBayesNet.h index f46ce5249..bba301be2 100644 --- a/gtsam/hybrid/HybridBayesNet.h +++ b/gtsam/hybrid/HybridBayesNet.h @@ -25,8 +25,6 @@ #include #include -#include - namespace gtsam { /** diff --git a/gtsam/hybrid/HybridBayesTree.cpp b/gtsam/hybrid/HybridBayesTree.cpp index df4a49f60..193635a21 100644 --- a/gtsam/hybrid/HybridBayesTree.cpp +++ b/gtsam/hybrid/HybridBayesTree.cpp @@ -22,14 +22,12 @@ #include #include #include +#include #include #include #include #include -#include - -#include "gtsam/hybrid/HybridConditional.h" namespace gtsam { diff --git a/gtsam/hybrid/HybridConditional.cpp b/gtsam/hybrid/HybridConditional.cpp index 2228b8cdc..94d180727 100644 --- a/gtsam/hybrid/HybridConditional.cpp +++ b/gtsam/hybrid/HybridConditional.cpp @@ -22,9 +22,6 @@ #include #include -#include -#include -#include namespace gtsam { From 498079777d200d34bde12d1471593174ddc1a107 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 23:19:20 -0400 Subject: [PATCH 080/204] improved type aliasing --- gtsam/hybrid/HybridGaussianProductFactor.cpp | 2 +- gtsam/hybrid/HybridGaussianProductFactor.h | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianProductFactor.cpp b/gtsam/hybrid/HybridGaussianProductFactor.cpp index 375349f9b..e71201ceb 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.cpp +++ b/gtsam/hybrid/HybridGaussianProductFactor.cpp @@ -24,7 +24,7 @@ namespace gtsam { -using Y = HybridGaussianProductFactor::Y; +using Y = GaussianFactorGraphValuePair; /* *******************************************************************************/ static Y add(const Y& y1, const Y& y2) { diff --git a/gtsam/hybrid/HybridGaussianProductFactor.h b/gtsam/hybrid/HybridGaussianProductFactor.h index b4c51dbd3..508113e87 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.h +++ b/gtsam/hybrid/HybridGaussianProductFactor.h @@ -26,12 +26,13 @@ namespace gtsam { class HybridGaussianFactor; +using GaussianFactorGraphValuePair = std::pair; + /// Alias for DecisionTree of GaussianFactorGraphs and their scalar sums class GTSAM_EXPORT HybridGaussianProductFactor - : public DecisionTree> { + : public DecisionTree { public: - using Y = std::pair; - using Base = DecisionTree; + using Base = DecisionTree; /// @name Constructors /// @{ @@ -46,7 +47,7 @@ class GTSAM_EXPORT HybridGaussianProductFactor */ template HybridGaussianProductFactor(const std::shared_ptr& factor) - : Base(Y{GaussianFactorGraph{factor}, 0.0}) {} + : Base(GaussianFactorGraphValuePair{GaussianFactorGraph{factor}, 0.0}) {} /** * @brief Construct from DecisionTree From cd3e0f37fba6c6fdd1133b4a0fc0c17074572d81 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 23:20:41 -0400 Subject: [PATCH 081/204] include sstream in HybridGaussianProductFactor --- gtsam/hybrid/HybridGaussianProductFactor.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gtsam/hybrid/HybridGaussianProductFactor.cpp b/gtsam/hybrid/HybridGaussianProductFactor.cpp index e71201ceb..fc61e1c54 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.cpp +++ b/gtsam/hybrid/HybridGaussianProductFactor.cpp @@ -22,6 +22,8 @@ #include #include +#include + namespace gtsam { using Y = GaussianFactorGraphValuePair; From 4ae5596200b48146a4158f7f85087eace0404d70 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 23:21:46 -0400 Subject: [PATCH 082/204] add back PotentiallyPrunedComponentError as an inline function --- gtsam/hybrid/HybridGaussianFactor.cpp | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactor.cpp b/gtsam/hybrid/HybridGaussianFactor.cpp index e24545293..b31fdca20 100644 --- a/gtsam/hybrid/HybridGaussianFactor.cpp +++ b/gtsam/hybrid/HybridGaussianFactor.cpp @@ -175,13 +175,19 @@ HybridGaussianProductFactor HybridGaussianFactor::asProductFactor() const { }}}; } +/* *******************************************************************************/ +inline static double PotentiallyPrunedComponentError( + const GaussianFactorValuePair& pair, const VectorValues& continuousValues) { + return pair.first ? pair.first->error(continuousValues) + pair.second + : std::numeric_limits::infinity(); +} + /* *******************************************************************************/ AlgebraicDecisionTree HybridGaussianFactor::errorTree( const VectorValues& continuousValues) const { // functor to convert from sharedFactor to double error value. auto errorFunc = [&continuousValues](const GaussianFactorValuePair& pair) { - return pair.first ? pair.first->error(continuousValues) + pair.second - : std::numeric_limits::infinity(); + return PotentiallyPrunedComponentError(pair, continuousValues); }; DecisionTree error_tree(factors_, errorFunc); return error_tree; @@ -191,8 +197,7 @@ AlgebraicDecisionTree HybridGaussianFactor::errorTree( double HybridGaussianFactor::error(const HybridValues& values) const { // Directly index to get the component, no need to build the whole tree. const GaussianFactorValuePair pair = factors_(values.discrete()); - return pair.first ? pair.first->error(values.continuous()) + pair.second - : std::numeric_limits::infinity(); + return PotentiallyPrunedComponentError(pair, values.continuous()); } } // namespace gtsam From 26c9dcba74b3f33706d82bd34ccb4c1d90109b4e Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 23:22:47 -0400 Subject: [PATCH 083/204] formatting --- gtsam/hybrid/HybridConditional.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/gtsam/hybrid/HybridConditional.cpp b/gtsam/hybrid/HybridConditional.cpp index 94d180727..97ec1a1f8 100644 --- a/gtsam/hybrid/HybridConditional.cpp +++ b/gtsam/hybrid/HybridConditional.cpp @@ -22,7 +22,6 @@ #include #include - namespace gtsam { /* ************************************************************************ */ From 4df266ad64575c77346d4d8cd7a2dff593d027c7 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 23:31:15 -0400 Subject: [PATCH 084/204] replace sstream with string --- gtsam/hybrid/HybridGaussianProductFactor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/hybrid/HybridGaussianProductFactor.cpp b/gtsam/hybrid/HybridGaussianProductFactor.cpp index fc61e1c54..30e1c57af 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.cpp +++ b/gtsam/hybrid/HybridGaussianProductFactor.cpp @@ -22,7 +22,7 @@ #include #include -#include +#include namespace gtsam { From 436524a4df30187c505c2acbd7e7affc0479590b Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 23:55:19 -0400 Subject: [PATCH 085/204] use cout instead of stringstream --- gtsam/hybrid/HybridGaussianProductFactor.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianProductFactor.cpp b/gtsam/hybrid/HybridGaussianProductFactor.cpp index 30e1c57af..f7b5994f0 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.cpp +++ b/gtsam/hybrid/HybridGaussianProductFactor.cpp @@ -78,10 +78,9 @@ void HybridGaussianProductFactor::print(const std::string& s, }; Base::print(s, formatter, printer); if (!keys.empty()) { - std::stringstream ss; - ss << s << " Keys:"; - for (auto&& key : keys) ss << " " << formatter(key); - std::cout << ss.str() << "." << std::endl; + std::cout << s << " Keys:"; + for (auto&& key : keys) std::cout << " " << formatter(key); + std::cout << "." << std::endl; } } From 19fdb437ea5b457fc52c7083163ebaae6d4aa0e4 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 9 Oct 2024 20:03:05 +0900 Subject: [PATCH 086/204] Pure Google style in clang-format --- .clang-format | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.clang-format b/.clang-format index d54a39d88..f6cb8ad93 100644 --- a/.clang-format +++ b/.clang-format @@ -1,8 +1 @@ BasedOnStyle: Google - -BinPackArguments: false -BinPackParameters: false -ColumnLimit: 100 -DerivePointerAlignment: false -IncludeBlocks: Preserve -PointerAlignment: Left From 34bb1d0f343adfa79a6a3d9a991f60e1777eca07 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 9 Oct 2024 20:03:30 +0900 Subject: [PATCH 087/204] Shift error values before exponentiating --- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 42 ++++++++++++------- .../tests/testHybridGaussianFactorGraph.cpp | 16 +------ 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 167e53656..5c83fe514 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -48,6 +48,8 @@ #include #include +#include "gtsam/discrete/DecisionTreeFactor.h" + namespace gtsam { /// Specialize EliminateableFactorGraph for HybridGaussianFactorGraph: @@ -226,6 +228,18 @@ continuousElimination(const HybridGaussianFactorGraph &factors, return {std::make_shared(result.first), result.second}; } +/* ************************************************************************ */ +/// Take negative log-values, shift them so that the minimum value is 0, and +/// then exponentiate to create a DecisionTreeFactor (not normalized yet!). +static DecisionTreeFactor::shared_ptr DiscreteFactorFromErrors( + const DiscreteKeys &discreteKeys, + const AlgebraicDecisionTree &errors) { + double min_log = errors.min(); + AlgebraicDecisionTree potentials = DecisionTree( + errors, [&min_log](const double x) { return exp(-(x - min_log)); }); + return std::make_shared(discreteKeys, potentials); +} + /* ************************************************************************ */ static std::pair> discreteElimination(const HybridGaussianFactorGraph &factors, @@ -237,15 +251,15 @@ discreteElimination(const HybridGaussianFactorGraph &factors, dfg.push_back(df); } else if (auto gmf = dynamic_pointer_cast(f)) { // Case where we have a HybridGaussianFactor with no continuous keys. - // In this case, compute discrete probabilities. - auto potential = [&](const auto &pair) -> double { + // In this case, compute a discrete factor from the remaining error. + auto calculateError = [&](const auto &pair) -> double { auto [factor, scalar] = pair; - // If factor is null, it has been pruned, hence return potential zero - if (!factor) return 0.0; - return exp(-scalar - factor->error(kEmpty)); + // If factor is null, it has been pruned, hence return infinite error + if (!factor) return std::numeric_limits::infinity(); + return scalar + factor->error(kEmpty); }; - DecisionTree potentials(gmf->factors(), potential); - dfg.emplace_shared(gmf->discreteKeys(), potentials); + DecisionTree errors(gmf->factors(), calculateError); + dfg.push_back(DiscreteFactorFromErrors(gmf->discreteKeys(), errors)); } else if (auto orphan = dynamic_pointer_cast(f)) { // Ignore orphaned clique. @@ -275,7 +289,7 @@ discreteElimination(const HybridGaussianFactorGraph &factors, static std::shared_ptr createDiscreteFactor( const ResultTree &eliminationResults, const DiscreteKeys &discreteSeparator) { - auto potential = [&](const auto &pair) -> double { + auto calculateError = [&](const auto &pair) -> double { const auto &[conditional, factor] = pair.first; const double scalar = pair.second; if (conditional && factor) { @@ -284,19 +298,17 @@ static std::shared_ptr createDiscreteFactor( // - factor->error(kempty) is the error remaining after elimination // - negLogK is what is given to the conditional to normalize const double negLogK = conditional->negLogConstant(); - const double error = scalar + factor->error(kEmpty) - negLogK; - return exp(-error); + return scalar + factor->error(kEmpty) - negLogK; } else if (!conditional && !factor) { - // If the factor is null, it has been pruned, hence return potential of - // zero - return 0.0; + // If the factor has been pruned, return infinite error + return std::numeric_limits::infinity(); } else { throw std::runtime_error("createDiscreteFactor has mixed NULLs"); } }; - DecisionTree potentials(eliminationResults, potential); - return std::make_shared(discreteSeparator, potentials); + DecisionTree errors(eliminationResults, calculateError); + return DiscreteFactorFromErrors(discreteSeparator, errors); } /* *******************************************************************************/ diff --git a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp index 1aa4c8d49..01294b28c 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp @@ -117,7 +117,7 @@ TEST(HybridGaussianFactorGraph, hybridEliminationOneFactor) { auto factor = std::dynamic_pointer_cast(result.second); CHECK(factor); // regression test - EXPECT(assert_equal(DecisionTreeFactor{m1, "15.74961 15.74961"}, *factor, 1e-5)); + EXPECT(assert_equal(DecisionTreeFactor{m1, "1 1"}, *factor, 1e-5)); } /* ************************************************************************* */ @@ -333,19 +333,7 @@ TEST(HybridBayesNet, Switching) { CHECK(phi_x1); EXPECT_LONGS_EQUAL(1, phi_x1->keys().size()); // m0 // We can't really check the error of the decision tree factor phi_x1, because - // the continuos factor whose error(kEmpty) we need is not available.. - - // However, we can still check the total error for the clique factors_x1 and - // the elimination results are equal, modulo -again- the negative log constant - // of the conditional. - for (auto &&mode : {modeZero, modeOne}) { - auto gc_x1 = (*p_x1_given_m)(mode); - double originalError_x1 = factors_x1.error({continuousValues, mode}); - const double actualError = gc_x1->negLogConstant() + - gc_x1->error(continuousValues) + - phi_x1->error(mode); - EXPECT_DOUBLES_EQUAL(originalError_x1, actualError, 1e-9); - } + // the continuous factor whose error(kEmpty) we need is not available.. // Now test full elimination of the graph: auto hybridBayesNet = graph.eliminateSequential(); From caddc734c2053006dfd06463f2f70b20bc0b2181 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 9 Oct 2024 10:51:38 -0400 Subject: [PATCH 088/204] fix printing tests --- .../hybrid/tests/testHybridGaussianFactor.cpp | 5 ++- .../tests/testHybridNonlinearFactorGraph.cpp | 32 +++++++++---------- 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/gtsam/hybrid/tests/testHybridGaussianFactor.cpp b/gtsam/hybrid/tests/testHybridGaussianFactor.cpp index 71aac8224..05e05c79b 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactor.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactor.cpp @@ -104,13 +104,12 @@ TEST(HybridGaussianFactor, Keys) { } /* ************************************************************************* */ -TEST_DISABLED(HybridGaussianFactor, Printing) { +TEST(HybridGaussianFactor, Printing) { using namespace test_constructor; HybridGaussianFactor hybridFactor(m1, {f10, f11}); std::string expected = - R"(HybridGaussianFactor -Hybrid [x1 x2; 1]{ + R"(Hybrid [x1 x2; 1]{ Choice(1) 0 Leaf : A[x1] = [ diff --git a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp index b5486c6cd..bbf427ecb 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp @@ -529,7 +529,7 @@ TEST(HybridNonlinearFactorGraph, Full_Elimination) { /**************************************************************************** * Test printing */ -TEST_DISABLED(HybridNonlinearFactorGraph, Printing) { +TEST(HybridNonlinearFactorGraph, Printing) { Switching self(3); auto linearizedFactorGraph = self.linearizedFactorGraph; @@ -549,7 +549,7 @@ Factor 0 GaussianFactor: A[x0] = [ - 10 + 10 ] b = [ -10 ] No noise model @@ -560,25 +560,25 @@ Hybrid [x0 x1; m0]{ Choice(m0) 0 Leaf : A[x0] = [ - -1 + -1 ] A[x1] = [ - 1 + 1 ] b = [ -1 ] No noise model -scalar: 0 +scalar: 0.918939 1 Leaf : A[x0] = [ - -1 + -1 ] A[x1] = [ - 1 + 1 ] b = [ -0 ] No noise model -scalar: 0 +scalar: 0.918939 } @@ -588,25 +588,25 @@ Hybrid [x1 x2; m1]{ Choice(m1) 0 Leaf : A[x1] = [ - -1 + -1 ] A[x2] = [ - 1 + 1 ] b = [ -1 ] No noise model -scalar: 0 +scalar: 0.918939 1 Leaf : A[x1] = [ - -1 + -1 ] A[x2] = [ - 1 + 1 ] b = [ -0 ] No noise model -scalar: 0 +scalar: 0.918939 } @@ -614,7 +614,7 @@ Factor 3 GaussianFactor: A[x1] = [ - 10 + 10 ] b = [ -10 ] No noise model @@ -623,7 +623,7 @@ Factor 4 GaussianFactor: A[x2] = [ - 10 + 10 ] b = [ -10 ] No noise model From 205eb18f471b21b936f3240a3ad291b8295e38eb Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 9 Oct 2024 10:55:39 -0400 Subject: [PATCH 089/204] add serialization to HybridGaussianProductFactor --- gtsam/hybrid/HybridGaussianProductFactor.h | 10 ++++++++++ gtsam/hybrid/tests/testSerializationHybrid.cpp | 3 +++ 2 files changed, 13 insertions(+) diff --git a/gtsam/hybrid/HybridGaussianProductFactor.h b/gtsam/hybrid/HybridGaussianProductFactor.h index 508113e87..a06f24423 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.h +++ b/gtsam/hybrid/HybridGaussianProductFactor.h @@ -115,6 +115,16 @@ class GTSAM_EXPORT HybridGaussianProductFactor HybridGaussianProductFactor removeEmpty() const; ///@} + + private: +#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(Archive& ar, const unsigned int /*version*/) { + ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base); + } +#endif }; // Testable traits diff --git a/gtsam/hybrid/tests/testSerializationHybrid.cpp b/gtsam/hybrid/tests/testSerializationHybrid.cpp index 8258d8615..fd92e5cfb 100644 --- a/gtsam/hybrid/tests/testSerializationHybrid.cpp +++ b/gtsam/hybrid/tests/testSerializationHybrid.cpp @@ -59,6 +59,9 @@ BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::FactorValuePairs::Leaf, BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::FactorValuePairs::Choice, "gtsam_HybridGaussianFactor_Factors_Choice"); +BOOST_CLASS_EXPORT_GUID(GaussianFactorGraphValuePair, + "gtsam_GaussianFactorGraphValuePair"); + BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional, "gtsam_HybridGaussianConditional"); BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional::Conditionals, From 4f7473532878f852c96a5efff88cbb3021bf459f Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 9 Oct 2024 11:11:05 -0400 Subject: [PATCH 090/204] checking if print is the problem --- gtsam/hybrid/HybridGaussianProductFactor.cpp | 25 ++++++++++---------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianProductFactor.cpp b/gtsam/hybrid/HybridGaussianProductFactor.cpp index f7b5994f0..92888fbc8 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.cpp +++ b/gtsam/hybrid/HybridGaussianProductFactor.cpp @@ -70,18 +70,19 @@ HybridGaussianProductFactor& HybridGaussianProductFactor::operator+=( /* *******************************************************************************/ void HybridGaussianProductFactor::print(const std::string& s, const KeyFormatter& formatter) const { - KeySet keys; - auto printer = [&](const Y& y) { - if (keys.empty()) keys = y.first.keys(); - return "Graph of size " + std::to_string(y.first.size()) + - ", scalar sum: " + std::to_string(y.second); - }; - Base::print(s, formatter, printer); - if (!keys.empty()) { - std::cout << s << " Keys:"; - for (auto&& key : keys) std::cout << " " << formatter(key); - std::cout << "." << std::endl; - } + // KeySet keys; + // auto printer = [&](const Y& y) { + // if (keys.empty()) keys = y.first.keys(); + // return "Graph of size " + std::to_string(y.first.size()) + + // ", scalar sum: " + std::to_string(y.second); + // }; + // Base::print(s, formatter, printer); + // if (!keys.empty()) { + // std::cout << s << " Keys:"; + // for (auto&& key : keys) std::cout << " " << formatter(key); + // std::cout << "." << std::endl; + // } + std::cout << "HybridGaussianProductFactor" << std::endl; } /* *******************************************************************************/ From 8a650b6a4e4f0be462932766f0cec9c7971b0e4f Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 9 Oct 2024 11:41:23 -0400 Subject: [PATCH 091/204] undo print and remove extra includes --- gtsam/hybrid/HybridGaussianConditional.cpp | 1 - gtsam/hybrid/HybridGaussianConditional.h | 1 - gtsam/hybrid/HybridGaussianProductFactor.cpp | 25 ++++++++++---------- 3 files changed, 12 insertions(+), 15 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index 2a0b5a875..58724163e 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index 75138b0dc..4cc3d3196 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -26,7 +26,6 @@ #include #include #include -#include #include #include diff --git a/gtsam/hybrid/HybridGaussianProductFactor.cpp b/gtsam/hybrid/HybridGaussianProductFactor.cpp index 92888fbc8..f7b5994f0 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.cpp +++ b/gtsam/hybrid/HybridGaussianProductFactor.cpp @@ -70,19 +70,18 @@ HybridGaussianProductFactor& HybridGaussianProductFactor::operator+=( /* *******************************************************************************/ void HybridGaussianProductFactor::print(const std::string& s, const KeyFormatter& formatter) const { - // KeySet keys; - // auto printer = [&](const Y& y) { - // if (keys.empty()) keys = y.first.keys(); - // return "Graph of size " + std::to_string(y.first.size()) + - // ", scalar sum: " + std::to_string(y.second); - // }; - // Base::print(s, formatter, printer); - // if (!keys.empty()) { - // std::cout << s << " Keys:"; - // for (auto&& key : keys) std::cout << " " << formatter(key); - // std::cout << "." << std::endl; - // } - std::cout << "HybridGaussianProductFactor" << std::endl; + KeySet keys; + auto printer = [&](const Y& y) { + if (keys.empty()) keys = y.first.keys(); + return "Graph of size " + std::to_string(y.first.size()) + + ", scalar sum: " + std::to_string(y.second); + }; + Base::print(s, formatter, printer); + if (!keys.empty()) { + std::cout << s << " Keys:"; + for (auto&& key : keys) std::cout << " " << formatter(key); + std::cout << "." << std::endl; + } } /* *******************************************************************************/ From e87d1fb1de0093dcdcdba0657cfa851bef9e412a Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 9 Oct 2024 11:41:31 -0400 Subject: [PATCH 092/204] comment out serialization --- .../hybrid/tests/testSerializationHybrid.cpp | 178 +++++++++--------- 1 file changed, 89 insertions(+), 89 deletions(-) diff --git a/gtsam/hybrid/tests/testSerializationHybrid.cpp b/gtsam/hybrid/tests/testSerializationHybrid.cpp index fd92e5cfb..bb1a492fe 100644 --- a/gtsam/hybrid/tests/testSerializationHybrid.cpp +++ b/gtsam/hybrid/tests/testSerializationHybrid.cpp @@ -39,112 +39,112 @@ using symbol_shorthand::Z; using namespace serializationTestHelpers; -BOOST_CLASS_EXPORT_GUID(Factor, "gtsam_Factor"); -BOOST_CLASS_EXPORT_GUID(HybridFactor, "gtsam_HybridFactor"); -BOOST_CLASS_EXPORT_GUID(JacobianFactor, "gtsam_JacobianFactor"); -BOOST_CLASS_EXPORT_GUID(GaussianConditional, "gtsam_GaussianConditional"); -BOOST_CLASS_EXPORT_GUID(DiscreteConditional, "gtsam_DiscreteConditional"); +// BOOST_CLASS_EXPORT_GUID(Factor, "gtsam_Factor"); +// BOOST_CLASS_EXPORT_GUID(HybridFactor, "gtsam_HybridFactor"); +// BOOST_CLASS_EXPORT_GUID(JacobianFactor, "gtsam_JacobianFactor"); +// BOOST_CLASS_EXPORT_GUID(GaussianConditional, "gtsam_GaussianConditional"); +// BOOST_CLASS_EXPORT_GUID(DiscreteConditional, "gtsam_DiscreteConditional"); -BOOST_CLASS_EXPORT_GUID(DecisionTreeFactor, "gtsam_DecisionTreeFactor"); -using ADT = AlgebraicDecisionTree; -BOOST_CLASS_EXPORT_GUID(ADT, "gtsam_AlgebraicDecisionTree"); -BOOST_CLASS_EXPORT_GUID(ADT::Leaf, "gtsam_AlgebraicDecisionTree_Leaf"); -BOOST_CLASS_EXPORT_GUID(ADT::Choice, "gtsam_AlgebraicDecisionTree_Choice") +// BOOST_CLASS_EXPORT_GUID(DecisionTreeFactor, "gtsam_DecisionTreeFactor"); +// using ADT = AlgebraicDecisionTree; +// BOOST_CLASS_EXPORT_GUID(ADT, "gtsam_AlgebraicDecisionTree"); +// BOOST_CLASS_EXPORT_GUID(ADT::Leaf, "gtsam_AlgebraicDecisionTree_Leaf"); +// BOOST_CLASS_EXPORT_GUID(ADT::Choice, "gtsam_AlgebraicDecisionTree_Choice") -BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor, "gtsam_HybridGaussianFactor"); -BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::FactorValuePairs, - "gtsam_HybridGaussianFactor_Factors"); -BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::FactorValuePairs::Leaf, - "gtsam_HybridGaussianFactor_Factors_Leaf"); -BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::FactorValuePairs::Choice, - "gtsam_HybridGaussianFactor_Factors_Choice"); +// BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor, "gtsam_HybridGaussianFactor"); +// BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::FactorValuePairs, +// "gtsam_HybridGaussianFactor_Factors"); +// BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::FactorValuePairs::Leaf, +// "gtsam_HybridGaussianFactor_Factors_Leaf"); +// BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::FactorValuePairs::Choice, +// "gtsam_HybridGaussianFactor_Factors_Choice"); -BOOST_CLASS_EXPORT_GUID(GaussianFactorGraphValuePair, - "gtsam_GaussianFactorGraphValuePair"); +// BOOST_CLASS_EXPORT_GUID(GaussianFactorGraphValuePair, +// "gtsam_GaussianFactorGraphValuePair"); -BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional, - "gtsam_HybridGaussianConditional"); -BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional::Conditionals, - "gtsam_HybridGaussianConditional_Conditionals"); -BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional::Conditionals::Leaf, - "gtsam_HybridGaussianConditional_Conditionals_Leaf"); -BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional::Conditionals::Choice, - "gtsam_HybridGaussianConditional_Conditionals_Choice"); -// Needed since GaussianConditional::FromMeanAndStddev uses it -BOOST_CLASS_EXPORT_GUID(noiseModel::Isotropic, "gtsam_noiseModel_Isotropic"); +// BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional, +// "gtsam_HybridGaussianConditional"); +// BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional::Conditionals, +// "gtsam_HybridGaussianConditional_Conditionals"); +// BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional::Conditionals::Leaf, +// "gtsam_HybridGaussianConditional_Conditionals_Leaf"); +// BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional::Conditionals::Choice, +// "gtsam_HybridGaussianConditional_Conditionals_Choice"); +// // Needed since GaussianConditional::FromMeanAndStddev uses it +// BOOST_CLASS_EXPORT_GUID(noiseModel::Isotropic, "gtsam_noiseModel_Isotropic"); -BOOST_CLASS_EXPORT_GUID(HybridBayesNet, "gtsam_HybridBayesNet"); +// BOOST_CLASS_EXPORT_GUID(HybridBayesNet, "gtsam_HybridBayesNet"); -/* ****************************************************************************/ -// Test HybridGaussianFactor serialization. -TEST(HybridSerialization, HybridGaussianFactor) { - DiscreteKey discreteKey{M(0), 2}; +// /* ****************************************************************************/ +// // Test HybridGaussianFactor serialization. +// TEST(HybridSerialization, HybridGaussianFactor) { +// DiscreteKey discreteKey{M(0), 2}; - auto A = Matrix::Zero(2, 1); - auto b0 = Matrix::Zero(2, 1); - auto b1 = Matrix::Ones(2, 1); - auto f0 = std::make_shared(X(0), A, b0); - auto f1 = std::make_shared(X(0), A, b1); - std::vector factors{f0, f1}; +// auto A = Matrix::Zero(2, 1); +// auto b0 = Matrix::Zero(2, 1); +// auto b1 = Matrix::Ones(2, 1); +// auto f0 = std::make_shared(X(0), A, b0); +// auto f1 = std::make_shared(X(0), A, b1); +// std::vector factors{f0, f1}; - const HybridGaussianFactor factor(discreteKey, factors); +// const HybridGaussianFactor factor(discreteKey, factors); - EXPECT(equalsObj(factor)); - EXPECT(equalsXML(factor)); - EXPECT(equalsBinary(factor)); -} +// EXPECT(equalsObj(factor)); +// EXPECT(equalsXML(factor)); +// EXPECT(equalsBinary(factor)); +// } -/* ****************************************************************************/ -// Test HybridConditional serialization. -TEST(HybridSerialization, HybridConditional) { - const DiscreteKey mode(M(0), 2); - Matrix1 I = Matrix1::Identity(); - const auto conditional = std::make_shared( - GaussianConditional::FromMeanAndStddev(Z(0), I, X(0), Vector1(0), 0.5)); - const HybridConditional hc(conditional); +// /* ****************************************************************************/ +// // Test HybridConditional serialization. +// TEST(HybridSerialization, HybridConditional) { +// const DiscreteKey mode(M(0), 2); +// Matrix1 I = Matrix1::Identity(); +// const auto conditional = std::make_shared( +// GaussianConditional::FromMeanAndStddev(Z(0), I, X(0), Vector1(0), 0.5)); +// const HybridConditional hc(conditional); - EXPECT(equalsObj(hc)); - EXPECT(equalsXML(hc)); - EXPECT(equalsBinary(hc)); -} +// EXPECT(equalsObj(hc)); +// EXPECT(equalsXML(hc)); +// EXPECT(equalsBinary(hc)); +// } -/* ****************************************************************************/ -// Test HybridGaussianConditional serialization. -TEST(HybridSerialization, HybridGaussianConditional) { - const DiscreteKey mode(M(0), 2); - Matrix1 I = Matrix1::Identity(); - const auto conditional0 = std::make_shared( - GaussianConditional::FromMeanAndStddev(Z(0), I, X(0), Vector1(0), 0.5)); - const auto conditional1 = std::make_shared( - GaussianConditional::FromMeanAndStddev(Z(0), I, X(0), Vector1(0), 3)); - const HybridGaussianConditional gm(mode, {conditional0, conditional1}); +// /* ****************************************************************************/ +// // Test HybridGaussianConditional serialization. +// TEST(HybridSerialization, HybridGaussianConditional) { +// const DiscreteKey mode(M(0), 2); +// Matrix1 I = Matrix1::Identity(); +// const auto conditional0 = std::make_shared( +// GaussianConditional::FromMeanAndStddev(Z(0), I, X(0), Vector1(0), 0.5)); +// const auto conditional1 = std::make_shared( +// GaussianConditional::FromMeanAndStddev(Z(0), I, X(0), Vector1(0), 3)); +// const HybridGaussianConditional gm(mode, {conditional0, conditional1}); - EXPECT(equalsObj(gm)); - EXPECT(equalsXML(gm)); - EXPECT(equalsBinary(gm)); -} +// EXPECT(equalsObj(gm)); +// EXPECT(equalsXML(gm)); +// EXPECT(equalsBinary(gm)); +// } -/* ****************************************************************************/ -// Test HybridBayesNet serialization. -TEST(HybridSerialization, HybridBayesNet) { - Switching s(2); - HybridBayesNet hbn = *(s.linearizedFactorGraph.eliminateSequential()); +// /* ****************************************************************************/ +// // Test HybridBayesNet serialization. +// TEST(HybridSerialization, HybridBayesNet) { +// Switching s(2); +// HybridBayesNet hbn = *(s.linearizedFactorGraph.eliminateSequential()); - EXPECT(equalsObj(hbn)); - EXPECT(equalsXML(hbn)); - EXPECT(equalsBinary(hbn)); -} +// EXPECT(equalsObj(hbn)); +// EXPECT(equalsXML(hbn)); +// EXPECT(equalsBinary(hbn)); +// } -/* ****************************************************************************/ -// Test HybridBayesTree serialization. -TEST(HybridSerialization, HybridBayesTree) { - Switching s(2); - HybridBayesTree hbt = *(s.linearizedFactorGraph.eliminateMultifrontal()); +// /* ****************************************************************************/ +// // Test HybridBayesTree serialization. +// TEST(HybridSerialization, HybridBayesTree) { +// Switching s(2); +// HybridBayesTree hbt = *(s.linearizedFactorGraph.eliminateMultifrontal()); - EXPECT(equalsObj(hbt)); - EXPECT(equalsXML(hbt)); - EXPECT(equalsBinary(hbt)); -} +// EXPECT(equalsObj(hbt)); +// EXPECT(equalsXML(hbt)); +// EXPECT(equalsBinary(hbt)); +// } /* ************************************************************************* */ int main() { From 99a39e6568826739aa7d9185694ffe15f7d74433 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 9 Oct 2024 12:04:04 -0400 Subject: [PATCH 093/204] undo test change --- .../hybrid/tests/testSerializationHybrid.cpp | 180 +++++++++--------- 1 file changed, 91 insertions(+), 89 deletions(-) diff --git a/gtsam/hybrid/tests/testSerializationHybrid.cpp b/gtsam/hybrid/tests/testSerializationHybrid.cpp index bb1a492fe..5b06312ba 100644 --- a/gtsam/hybrid/tests/testSerializationHybrid.cpp +++ b/gtsam/hybrid/tests/testSerializationHybrid.cpp @@ -39,112 +39,114 @@ using symbol_shorthand::Z; using namespace serializationTestHelpers; -// BOOST_CLASS_EXPORT_GUID(Factor, "gtsam_Factor"); -// BOOST_CLASS_EXPORT_GUID(HybridFactor, "gtsam_HybridFactor"); -// BOOST_CLASS_EXPORT_GUID(JacobianFactor, "gtsam_JacobianFactor"); -// BOOST_CLASS_EXPORT_GUID(GaussianConditional, "gtsam_GaussianConditional"); -// BOOST_CLASS_EXPORT_GUID(DiscreteConditional, "gtsam_DiscreteConditional"); +BOOST_CLASS_EXPORT_GUID(Factor, "gtsam_Factor"); +BOOST_CLASS_EXPORT_GUID(HybridFactor, "gtsam_HybridFactor"); +BOOST_CLASS_EXPORT_GUID(JacobianFactor, "gtsam_JacobianFactor"); +BOOST_CLASS_EXPORT_GUID(GaussianConditional, "gtsam_GaussianConditional"); +BOOST_CLASS_EXPORT_GUID(DiscreteConditional, "gtsam_DiscreteConditional"); -// BOOST_CLASS_EXPORT_GUID(DecisionTreeFactor, "gtsam_DecisionTreeFactor"); -// using ADT = AlgebraicDecisionTree; -// BOOST_CLASS_EXPORT_GUID(ADT, "gtsam_AlgebraicDecisionTree"); -// BOOST_CLASS_EXPORT_GUID(ADT::Leaf, "gtsam_AlgebraicDecisionTree_Leaf"); -// BOOST_CLASS_EXPORT_GUID(ADT::Choice, "gtsam_AlgebraicDecisionTree_Choice") +BOOST_CLASS_EXPORT_GUID(DecisionTreeFactor, "gtsam_DecisionTreeFactor"); +using ADT = AlgebraicDecisionTree; +BOOST_CLASS_EXPORT_GUID(ADT, "gtsam_AlgebraicDecisionTree"); +BOOST_CLASS_EXPORT_GUID(ADT::Leaf, "gtsam_AlgebraicDecisionTree_Leaf"); +BOOST_CLASS_EXPORT_GUID(ADT::Choice, "gtsam_AlgebraicDecisionTree_Choice") -// BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor, "gtsam_HybridGaussianFactor"); -// BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::FactorValuePairs, -// "gtsam_HybridGaussianFactor_Factors"); -// BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::FactorValuePairs::Leaf, -// "gtsam_HybridGaussianFactor_Factors_Leaf"); -// BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::FactorValuePairs::Choice, -// "gtsam_HybridGaussianFactor_Factors_Choice"); +BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor, "gtsam_HybridGaussianFactor"); +BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::FactorValuePairs, + "gtsam_HybridGaussianFactor_Factors"); +BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::FactorValuePairs::Leaf, + "gtsam_HybridGaussianFactor_Factors_Leaf"); +BOOST_CLASS_EXPORT_GUID(HybridGaussianFactor::FactorValuePairs::Choice, + "gtsam_HybridGaussianFactor_Factors_Choice"); -// BOOST_CLASS_EXPORT_GUID(GaussianFactorGraphValuePair, -// "gtsam_GaussianFactorGraphValuePair"); +BOOST_CLASS_EXPORT_GUID(GaussianFactorGraphValuePair, + "gtsam_GaussianFactorGraphValuePair"); +BOOST_CLASS_EXPORT_GUID(HybridGaussianProductFactor, + "gtsam_HybridGaussianProductFactor"); -// BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional, -// "gtsam_HybridGaussianConditional"); -// BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional::Conditionals, -// "gtsam_HybridGaussianConditional_Conditionals"); -// BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional::Conditionals::Leaf, -// "gtsam_HybridGaussianConditional_Conditionals_Leaf"); -// BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional::Conditionals::Choice, -// "gtsam_HybridGaussianConditional_Conditionals_Choice"); -// // Needed since GaussianConditional::FromMeanAndStddev uses it -// BOOST_CLASS_EXPORT_GUID(noiseModel::Isotropic, "gtsam_noiseModel_Isotropic"); +BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional, + "gtsam_HybridGaussianConditional"); +BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional::Conditionals, + "gtsam_HybridGaussianConditional_Conditionals"); +BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional::Conditionals::Leaf, + "gtsam_HybridGaussianConditional_Conditionals_Leaf"); +BOOST_CLASS_EXPORT_GUID(HybridGaussianConditional::Conditionals::Choice, + "gtsam_HybridGaussianConditional_Conditionals_Choice"); +// Needed since GaussianConditional::FromMeanAndStddev uses it +BOOST_CLASS_EXPORT_GUID(noiseModel::Isotropic, "gtsam_noiseModel_Isotropic"); -// BOOST_CLASS_EXPORT_GUID(HybridBayesNet, "gtsam_HybridBayesNet"); +BOOST_CLASS_EXPORT_GUID(HybridBayesNet, "gtsam_HybridBayesNet"); -// /* ****************************************************************************/ -// // Test HybridGaussianFactor serialization. -// TEST(HybridSerialization, HybridGaussianFactor) { -// DiscreteKey discreteKey{M(0), 2}; +/* ****************************************************************************/ +// Test HybridGaussianFactor serialization. +TEST(HybridSerialization, HybridGaussianFactor) { + DiscreteKey discreteKey{M(0), 2}; -// auto A = Matrix::Zero(2, 1); -// auto b0 = Matrix::Zero(2, 1); -// auto b1 = Matrix::Ones(2, 1); -// auto f0 = std::make_shared(X(0), A, b0); -// auto f1 = std::make_shared(X(0), A, b1); -// std::vector factors{f0, f1}; + auto A = Matrix::Zero(2, 1); + auto b0 = Matrix::Zero(2, 1); + auto b1 = Matrix::Ones(2, 1); + auto f0 = std::make_shared(X(0), A, b0); + auto f1 = std::make_shared(X(0), A, b1); + std::vector factors{f0, f1}; -// const HybridGaussianFactor factor(discreteKey, factors); + const HybridGaussianFactor factor(discreteKey, factors); -// EXPECT(equalsObj(factor)); -// EXPECT(equalsXML(factor)); -// EXPECT(equalsBinary(factor)); -// } + EXPECT(equalsObj(factor)); + EXPECT(equalsXML(factor)); + EXPECT(equalsBinary(factor)); +} -// /* ****************************************************************************/ -// // Test HybridConditional serialization. -// TEST(HybridSerialization, HybridConditional) { -// const DiscreteKey mode(M(0), 2); -// Matrix1 I = Matrix1::Identity(); -// const auto conditional = std::make_shared( -// GaussianConditional::FromMeanAndStddev(Z(0), I, X(0), Vector1(0), 0.5)); -// const HybridConditional hc(conditional); +/* ****************************************************************************/ +// Test HybridConditional serialization. +TEST(HybridSerialization, HybridConditional) { + const DiscreteKey mode(M(0), 2); + Matrix1 I = Matrix1::Identity(); + const auto conditional = std::make_shared( + GaussianConditional::FromMeanAndStddev(Z(0), I, X(0), Vector1(0), 0.5)); + const HybridConditional hc(conditional); -// EXPECT(equalsObj(hc)); -// EXPECT(equalsXML(hc)); -// EXPECT(equalsBinary(hc)); -// } + EXPECT(equalsObj(hc)); + EXPECT(equalsXML(hc)); + EXPECT(equalsBinary(hc)); +} -// /* ****************************************************************************/ -// // Test HybridGaussianConditional serialization. -// TEST(HybridSerialization, HybridGaussianConditional) { -// const DiscreteKey mode(M(0), 2); -// Matrix1 I = Matrix1::Identity(); -// const auto conditional0 = std::make_shared( -// GaussianConditional::FromMeanAndStddev(Z(0), I, X(0), Vector1(0), 0.5)); -// const auto conditional1 = std::make_shared( -// GaussianConditional::FromMeanAndStddev(Z(0), I, X(0), Vector1(0), 3)); -// const HybridGaussianConditional gm(mode, {conditional0, conditional1}); +/* ****************************************************************************/ +// Test HybridGaussianConditional serialization. +TEST(HybridSerialization, HybridGaussianConditional) { + const DiscreteKey mode(M(0), 2); + Matrix1 I = Matrix1::Identity(); + const auto conditional0 = std::make_shared( + GaussianConditional::FromMeanAndStddev(Z(0), I, X(0), Vector1(0), 0.5)); + const auto conditional1 = std::make_shared( + GaussianConditional::FromMeanAndStddev(Z(0), I, X(0), Vector1(0), 3)); + const HybridGaussianConditional gm(mode, {conditional0, conditional1}); -// EXPECT(equalsObj(gm)); -// EXPECT(equalsXML(gm)); -// EXPECT(equalsBinary(gm)); -// } + EXPECT(equalsObj(gm)); + EXPECT(equalsXML(gm)); + EXPECT(equalsBinary(gm)); +} -// /* ****************************************************************************/ -// // Test HybridBayesNet serialization. -// TEST(HybridSerialization, HybridBayesNet) { -// Switching s(2); -// HybridBayesNet hbn = *(s.linearizedFactorGraph.eliminateSequential()); +/* ****************************************************************************/ +// Test HybridBayesNet serialization. +TEST(HybridSerialization, HybridBayesNet) { + Switching s(2); + HybridBayesNet hbn = *(s.linearizedFactorGraph.eliminateSequential()); -// EXPECT(equalsObj(hbn)); -// EXPECT(equalsXML(hbn)); -// EXPECT(equalsBinary(hbn)); -// } + EXPECT(equalsObj(hbn)); + EXPECT(equalsXML(hbn)); + EXPECT(equalsBinary(hbn)); +} -// /* ****************************************************************************/ -// // Test HybridBayesTree serialization. -// TEST(HybridSerialization, HybridBayesTree) { -// Switching s(2); -// HybridBayesTree hbt = *(s.linearizedFactorGraph.eliminateMultifrontal()); +/* ****************************************************************************/ +// Test HybridBayesTree serialization. +TEST(HybridSerialization, HybridBayesTree) { + Switching s(2); + HybridBayesTree hbt = *(s.linearizedFactorGraph.eliminateMultifrontal()); -// EXPECT(equalsObj(hbt)); -// EXPECT(equalsXML(hbt)); -// EXPECT(equalsBinary(hbt)); -// } + EXPECT(equalsObj(hbt)); + EXPECT(equalsXML(hbt)); + EXPECT(equalsBinary(hbt)); +} /* ************************************************************************* */ int main() { From 752e10fc21657a8dec54edfb1937a52f2411454d Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 9 Oct 2024 12:39:03 -0400 Subject: [PATCH 094/204] delete constructor from string to fix issue --- gtsam/hybrid/HybridGaussianProductFactor.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/gtsam/hybrid/HybridGaussianProductFactor.h b/gtsam/hybrid/HybridGaussianProductFactor.h index a06f24423..c94a04a2a 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.h +++ b/gtsam/hybrid/HybridGaussianProductFactor.h @@ -55,6 +55,11 @@ class GTSAM_EXPORT HybridGaussianProductFactor */ HybridGaussianProductFactor(Base&& tree) : Base(std::move(tree)) {} + /// Deleted constructor since we don't have istream operator for + /// GaussianFactorGraphValuePair + HybridGaussianProductFactor(const std::vector& labelCs, + const std::string& table) = delete; + ///@} /// @name Operators From 1ac8a6e2e0ab8b490d57fd8edba2b460f0f635ec Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 9 Oct 2024 12:57:03 -0400 Subject: [PATCH 095/204] fix vector type --- gtsam/hybrid/HybridGaussianProductFactor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/hybrid/HybridGaussianProductFactor.h b/gtsam/hybrid/HybridGaussianProductFactor.h index c94a04a2a..17d9238dd 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.h +++ b/gtsam/hybrid/HybridGaussianProductFactor.h @@ -57,7 +57,7 @@ class GTSAM_EXPORT HybridGaussianProductFactor /// Deleted constructor since we don't have istream operator for /// GaussianFactorGraphValuePair - HybridGaussianProductFactor(const std::vector& labelCs, + HybridGaussianProductFactor(const std::vector& labelCs, const std::string& table) = delete; ///@} From f4bf280047b352bf49ae939f8195f6aca65eebc4 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 9 Oct 2024 13:08:42 -0400 Subject: [PATCH 096/204] implement constructor --- gtsam/hybrid/HybridGaussianProductFactor.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/gtsam/hybrid/HybridGaussianProductFactor.h b/gtsam/hybrid/HybridGaussianProductFactor.h index 17d9238dd..ab15fee30 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.h +++ b/gtsam/hybrid/HybridGaussianProductFactor.h @@ -58,7 +58,10 @@ class GTSAM_EXPORT HybridGaussianProductFactor /// Deleted constructor since we don't have istream operator for /// GaussianFactorGraphValuePair HybridGaussianProductFactor(const std::vector& labelCs, - const std::string& table) = delete; + const std::string& table) { + throw std::runtime_error( + "HybridGaussianProductFactor: No way to construct."); + } ///@} From 55dc3f5372857f349db00e7581dadaf764fb35fc Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 9 Oct 2024 13:19:41 -0400 Subject: [PATCH 097/204] experiment with removing the constructor --- gtsam/discrete/DecisionTree-inl.h | 24 +++++++++++----------- gtsam/discrete/DecisionTree.h | 4 ++-- gtsam/discrete/tests/testDecisionTree.cpp | 14 ++++++------- gtsam/hybrid/HybridGaussianProductFactor.h | 8 -------- 4 files changed, 21 insertions(+), 29 deletions(-) diff --git a/gtsam/discrete/DecisionTree-inl.h b/gtsam/discrete/DecisionTree-inl.h index 27e98fcde..29c36d27a 100644 --- a/gtsam/discrete/DecisionTree-inl.h +++ b/gtsam/discrete/DecisionTree-inl.h @@ -523,19 +523,19 @@ namespace gtsam { root_ = create(labelCs.begin(), labelCs.end(), ys.begin(), ys.end()); } - /****************************************************************************/ - template - DecisionTree::DecisionTree(const std::vector& labelCs, - const std::string& table) { - // Convert std::string to values of type Y - std::vector ys; - std::istringstream iss(table); - copy(std::istream_iterator(iss), std::istream_iterator(), - back_inserter(ys)); + // /****************************************************************************/ + // template + // DecisionTree::DecisionTree(const std::vector& labelCs, + // const std::string& table) { + // // Convert std::string to values of type Y + // std::vector ys; + // std::istringstream iss(table); + // copy(std::istream_iterator(iss), std::istream_iterator(), + // back_inserter(ys)); - // now call recursive Create - root_ = create(labelCs.begin(), labelCs.end(), ys.begin(), ys.end()); - } + // // now call recursive Create + // root_ = create(labelCs.begin(), labelCs.end(), ys.begin(), ys.end()); + // } /****************************************************************************/ template diff --git a/gtsam/discrete/DecisionTree.h b/gtsam/discrete/DecisionTree.h index 6d6179a7e..a02fa9d38 100644 --- a/gtsam/discrete/DecisionTree.h +++ b/gtsam/discrete/DecisionTree.h @@ -205,8 +205,8 @@ namespace gtsam { /** Create from keys and a corresponding vector of values */ DecisionTree(const std::vector& labelCs, const std::vector& ys); - /** Create from keys and string table */ - DecisionTree(const std::vector& labelCs, const std::string& table); + // /** Create from keys and string table */ + // DecisionTree(const std::vector& labelCs, const std::string& table); /** Create DecisionTree from others */ template diff --git a/gtsam/discrete/tests/testDecisionTree.cpp b/gtsam/discrete/tests/testDecisionTree.cpp index c625e1ba6..2cc8d8441 100644 --- a/gtsam/discrete/tests/testDecisionTree.cpp +++ b/gtsam/discrete/tests/testDecisionTree.cpp @@ -333,17 +333,17 @@ TEST(DecisionTree, Compose) { // Create from string vector keys{DT::LabelC(A, 2), DT::LabelC(B, 2)}; - DT f2(keys, "0 2 1 3"); + DT f2(keys, {0, 2, 1, 3}); EXPECT(assert_equal(f2, f1, 1e-9)); // Put this AB tree together with another one - DT f3(keys, "4 6 5 7"); + DT f3(keys, {4, 6, 5, 7}); DT f4(C, f1, f3); DOT(f4); // a bigger tree keys.push_back(DT::LabelC(C, 2)); - DT f5(keys, "0 4 2 6 1 5 3 7"); + DT f5(keys, {0, 4, 2, 6, 1, 5, 3, 7}); EXPECT(assert_equal(f5, f4, 1e-9)); DOT(f5); } @@ -508,7 +508,7 @@ TEST(DecisionTree, threshold) { // Create three level tree const vector keys{DT::LabelC("C", 2), DT::LabelC("B", 2), DT::LabelC("A", 2)}; - DT tree(keys, "0 1 2 3 4 5 6 7"); + DT tree(keys, {0, 1, 2, 3, 4, 5, 6, 7}); // Check number of leaves equal to zero auto count = [](const int& value, int count) { @@ -536,10 +536,10 @@ TEST(DecisionTree, ApplyWithAssignment) { // Create three level tree const vector keys{DT::LabelC("C", 2), DT::LabelC("B", 2), DT::LabelC("A", 2)}; - DT tree(keys, "1 2 3 4 5 6 7 8"); + DT tree(keys, {1, 2, 3, 4, 5, 6, 7, 8}); DecisionTree probTree( - keys, "0.01 0.02 0.03 0.04 0.05 0.06 0.07 0.08"); + keys, {0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08}); double threshold = 0.045; // We test pruning one tree by indexing into another. @@ -553,7 +553,7 @@ TEST(DecisionTree, ApplyWithAssignment) { }; DT prunedTree = tree.apply(pruner); - DT expectedTree(keys, "0 0 0 0 5 6 7 8"); + DT expectedTree(keys, {0, 0, 0, 0, 5, 6, 7, 8}); EXPECT(assert_equal(expectedTree, prunedTree)); size_t count = 0; diff --git a/gtsam/hybrid/HybridGaussianProductFactor.h b/gtsam/hybrid/HybridGaussianProductFactor.h index ab15fee30..a06f24423 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.h +++ b/gtsam/hybrid/HybridGaussianProductFactor.h @@ -55,14 +55,6 @@ class GTSAM_EXPORT HybridGaussianProductFactor */ HybridGaussianProductFactor(Base&& tree) : Base(std::move(tree)) {} - /// Deleted constructor since we don't have istream operator for - /// GaussianFactorGraphValuePair - HybridGaussianProductFactor(const std::vector& labelCs, - const std::string& table) { - throw std::runtime_error( - "HybridGaussianProductFactor: No way to construct."); - } - ///@} /// @name Operators From 4d707e7cdd83b8c959c64c2e300ab28646ca1473 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 9 Oct 2024 13:37:55 -0400 Subject: [PATCH 098/204] Revert "experiment with removing the constructor" This reverts commit 55dc3f5372857f349db00e7581dadaf764fb35fc. --- gtsam/discrete/DecisionTree-inl.h | 24 +++++++++++----------- gtsam/discrete/DecisionTree.h | 4 ++-- gtsam/discrete/tests/testDecisionTree.cpp | 14 ++++++------- gtsam/hybrid/HybridGaussianProductFactor.h | 8 ++++++++ 4 files changed, 29 insertions(+), 21 deletions(-) diff --git a/gtsam/discrete/DecisionTree-inl.h b/gtsam/discrete/DecisionTree-inl.h index 29c36d27a..27e98fcde 100644 --- a/gtsam/discrete/DecisionTree-inl.h +++ b/gtsam/discrete/DecisionTree-inl.h @@ -523,19 +523,19 @@ namespace gtsam { root_ = create(labelCs.begin(), labelCs.end(), ys.begin(), ys.end()); } - // /****************************************************************************/ - // template - // DecisionTree::DecisionTree(const std::vector& labelCs, - // const std::string& table) { - // // Convert std::string to values of type Y - // std::vector ys; - // std::istringstream iss(table); - // copy(std::istream_iterator(iss), std::istream_iterator(), - // back_inserter(ys)); + /****************************************************************************/ + template + DecisionTree::DecisionTree(const std::vector& labelCs, + const std::string& table) { + // Convert std::string to values of type Y + std::vector ys; + std::istringstream iss(table); + copy(std::istream_iterator(iss), std::istream_iterator(), + back_inserter(ys)); - // // now call recursive Create - // root_ = create(labelCs.begin(), labelCs.end(), ys.begin(), ys.end()); - // } + // now call recursive Create + root_ = create(labelCs.begin(), labelCs.end(), ys.begin(), ys.end()); + } /****************************************************************************/ template diff --git a/gtsam/discrete/DecisionTree.h b/gtsam/discrete/DecisionTree.h index a02fa9d38..6d6179a7e 100644 --- a/gtsam/discrete/DecisionTree.h +++ b/gtsam/discrete/DecisionTree.h @@ -205,8 +205,8 @@ namespace gtsam { /** Create from keys and a corresponding vector of values */ DecisionTree(const std::vector& labelCs, const std::vector& ys); - // /** Create from keys and string table */ - // DecisionTree(const std::vector& labelCs, const std::string& table); + /** Create from keys and string table */ + DecisionTree(const std::vector& labelCs, const std::string& table); /** Create DecisionTree from others */ template diff --git a/gtsam/discrete/tests/testDecisionTree.cpp b/gtsam/discrete/tests/testDecisionTree.cpp index 2cc8d8441..c625e1ba6 100644 --- a/gtsam/discrete/tests/testDecisionTree.cpp +++ b/gtsam/discrete/tests/testDecisionTree.cpp @@ -333,17 +333,17 @@ TEST(DecisionTree, Compose) { // Create from string vector keys{DT::LabelC(A, 2), DT::LabelC(B, 2)}; - DT f2(keys, {0, 2, 1, 3}); + DT f2(keys, "0 2 1 3"); EXPECT(assert_equal(f2, f1, 1e-9)); // Put this AB tree together with another one - DT f3(keys, {4, 6, 5, 7}); + DT f3(keys, "4 6 5 7"); DT f4(C, f1, f3); DOT(f4); // a bigger tree keys.push_back(DT::LabelC(C, 2)); - DT f5(keys, {0, 4, 2, 6, 1, 5, 3, 7}); + DT f5(keys, "0 4 2 6 1 5 3 7"); EXPECT(assert_equal(f5, f4, 1e-9)); DOT(f5); } @@ -508,7 +508,7 @@ TEST(DecisionTree, threshold) { // Create three level tree const vector keys{DT::LabelC("C", 2), DT::LabelC("B", 2), DT::LabelC("A", 2)}; - DT tree(keys, {0, 1, 2, 3, 4, 5, 6, 7}); + DT tree(keys, "0 1 2 3 4 5 6 7"); // Check number of leaves equal to zero auto count = [](const int& value, int count) { @@ -536,10 +536,10 @@ TEST(DecisionTree, ApplyWithAssignment) { // Create three level tree const vector keys{DT::LabelC("C", 2), DT::LabelC("B", 2), DT::LabelC("A", 2)}; - DT tree(keys, {1, 2, 3, 4, 5, 6, 7, 8}); + DT tree(keys, "1 2 3 4 5 6 7 8"); DecisionTree probTree( - keys, {0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08}); + keys, "0.01 0.02 0.03 0.04 0.05 0.06 0.07 0.08"); double threshold = 0.045; // We test pruning one tree by indexing into another. @@ -553,7 +553,7 @@ TEST(DecisionTree, ApplyWithAssignment) { }; DT prunedTree = tree.apply(pruner); - DT expectedTree(keys, {0, 0, 0, 0, 5, 6, 7, 8}); + DT expectedTree(keys, "0 0 0 0 5 6 7 8"); EXPECT(assert_equal(expectedTree, prunedTree)); size_t count = 0; diff --git a/gtsam/hybrid/HybridGaussianProductFactor.h b/gtsam/hybrid/HybridGaussianProductFactor.h index a06f24423..ab15fee30 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.h +++ b/gtsam/hybrid/HybridGaussianProductFactor.h @@ -55,6 +55,14 @@ class GTSAM_EXPORT HybridGaussianProductFactor */ HybridGaussianProductFactor(Base&& tree) : Base(std::move(tree)) {} + /// Deleted constructor since we don't have istream operator for + /// GaussianFactorGraphValuePair + HybridGaussianProductFactor(const std::vector& labelCs, + const std::string& table) { + throw std::runtime_error( + "HybridGaussianProductFactor: No way to construct."); + } + ///@} /// @name Operators From 93ec276ef3090c46a13307c6c777b51f84f308aa Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 9 Oct 2024 13:47:59 -0400 Subject: [PATCH 099/204] implement dummy >> operator --- gtsam/hybrid/HybridGaussianProductFactor.cpp | 6 ++++++ gtsam/hybrid/HybridGaussianProductFactor.h | 20 ++++++++++++-------- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianProductFactor.cpp b/gtsam/hybrid/HybridGaussianProductFactor.cpp index f7b5994f0..280059f54 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.cpp +++ b/gtsam/hybrid/HybridGaussianProductFactor.cpp @@ -103,4 +103,10 @@ HybridGaussianProductFactor HybridGaussianProductFactor::removeEmpty() const { return {Base(*this, emptyGaussian)}; } +/* *******************************************************************************/ +std::istream& operator>>(std::istream& is, GaussianFactorGraphValuePair& pair) { + // Dummy, don't do anything + return is; +} + } // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianProductFactor.h b/gtsam/hybrid/HybridGaussianProductFactor.h index ab15fee30..60a58a3a5 100644 --- a/gtsam/hybrid/HybridGaussianProductFactor.h +++ b/gtsam/hybrid/HybridGaussianProductFactor.h @@ -22,6 +22,8 @@ #include #include +#include + namespace gtsam { class HybridGaussianFactor; @@ -55,14 +57,6 @@ class GTSAM_EXPORT HybridGaussianProductFactor */ HybridGaussianProductFactor(Base&& tree) : Base(std::move(tree)) {} - /// Deleted constructor since we don't have istream operator for - /// GaussianFactorGraphValuePair - HybridGaussianProductFactor(const std::vector& labelCs, - const std::string& table) { - throw std::runtime_error( - "HybridGaussianProductFactor: No way to construct."); - } - ///@} /// @name Operators @@ -140,4 +134,14 @@ template <> struct traits : public Testable {}; +/** + * Create a dummy overload of >> for GaussianFactorGraphValuePair + * so that HybridGaussianProductFactor compiles + * with the constructor + * `DecisionTree(const std::vector& labelCs, const std::string& table)`. + * + * Needed to compile on Windows. + */ +std::istream& operator>>(std::istream& is, GaussianFactorGraphValuePair& pair); + } // namespace gtsam From a9ffbf5299c36f8a8e92d2335ed8e6f73fc30b12 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 15:09:21 -0400 Subject: [PATCH 100/204] new AlgebraicDecisionTree constructor --- gtsam/discrete/AlgebraicDecisionTree.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/gtsam/discrete/AlgebraicDecisionTree.h b/gtsam/discrete/AlgebraicDecisionTree.h index 6001b1983..45b949d3c 100644 --- a/gtsam/discrete/AlgebraicDecisionTree.h +++ b/gtsam/discrete/AlgebraicDecisionTree.h @@ -182,6 +182,21 @@ namespace gtsam { this->root_ = DecisionTree::convertFrom(other.root_, L_of_M, op); } + /** + * @brief Create from an arbitrary DecisionTree by operating on it + * with a functional `f`. + * + * @tparam X The type of the leaf of the original DecisionTree + * @tparam Func Type signature of functional `f`. + * @param other The original DecisionTree from which the + * AlgbraicDecisionTree is constructed. + * @param f Functional used to operate on + * the leaves of the input DecisionTree. + */ + template + AlgebraicDecisionTree(const DecisionTree& other, Func f) + : Base(other, f) {} + /** sum */ AlgebraicDecisionTree operator+(const AlgebraicDecisionTree& g) const { return this->apply(g, &Ring::add); From 024e50f9f77bf6ba1b8f2bf14d7f9b6939a9eac6 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 15:09:33 -0400 Subject: [PATCH 101/204] normalize potentials --- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 5c83fe514..49f3be776 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -229,8 +229,13 @@ continuousElimination(const HybridGaussianFactorGraph &factors, } /* ************************************************************************ */ -/// Take negative log-values, shift them so that the minimum value is 0, and -/// then exponentiate to create a DecisionTreeFactor (not normalized yet!). +/** + * @brief Take negative log-values, shift them so that the minimum value is 0, + * and then exponentiate to create a DecisionTreeFactor (not normalized yet!). + * + * @param errors DecisionTree of (unnormalized) errors. + * @return AlgebraicDecisionTree + */ static DecisionTreeFactor::shared_ptr DiscreteFactorFromErrors( const DiscreteKeys &discreteKeys, const AlgebraicDecisionTree &errors) { @@ -258,7 +263,7 @@ discreteElimination(const HybridGaussianFactorGraph &factors, if (!factor) return std::numeric_limits::infinity(); return scalar + factor->error(kEmpty); }; - DecisionTree errors(gmf->factors(), calculateError); + AlgebraicDecisionTree errors(gmf->factors(), calculateError); dfg.push_back(DiscreteFactorFromErrors(gmf->discreteKeys(), errors)); } else if (auto orphan = dynamic_pointer_cast(f)) { @@ -307,7 +312,7 @@ static std::shared_ptr createDiscreteFactor( } }; - DecisionTree errors(eliminationResults, calculateError); + AlgebraicDecisionTree errors(eliminationResults, calculateError); return DiscreteFactorFromErrors(discreteSeparator, errors); } From ac52be9cf07141e530c7dc909df55018ab4b6ed2 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 8 Oct 2024 15:09:38 -0400 Subject: [PATCH 102/204] update test --- gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp index 01294b28c..4b91d091d 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactorGraph.cpp @@ -333,7 +333,7 @@ TEST(HybridBayesNet, Switching) { CHECK(phi_x1); EXPECT_LONGS_EQUAL(1, phi_x1->keys().size()); // m0 // We can't really check the error of the decision tree factor phi_x1, because - // the continuous factor whose error(kEmpty) we need is not available.. + // the continuous factor whose error(kEmpty) we need is not available. // Now test full elimination of the graph: auto hybridBayesNet = graph.eliminateSequential(); From b79c69b4084ff4cf292103a56e0c5b3acfe7428d Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 9 Oct 2024 15:10:13 -0400 Subject: [PATCH 103/204] small improvements --- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 49f3be776..8e6123f10 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -234,13 +234,13 @@ continuousElimination(const HybridGaussianFactorGraph &factors, * and then exponentiate to create a DecisionTreeFactor (not normalized yet!). * * @param errors DecisionTree of (unnormalized) errors. - * @return AlgebraicDecisionTree + * @return DecisionTreeFactor::shared_ptr */ static DecisionTreeFactor::shared_ptr DiscreteFactorFromErrors( const DiscreteKeys &discreteKeys, const AlgebraicDecisionTree &errors) { double min_log = errors.min(); - AlgebraicDecisionTree potentials = DecisionTree( + AlgebraicDecisionTree potentials( errors, [&min_log](const double x) { return exp(-(x - min_log)); }); return std::make_shared(discreteKeys, potentials); } From e9bf802d788815e9e6bccaea66059fdf18bc0502 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 9 Oct 2024 15:24:03 -0400 Subject: [PATCH 104/204] more simplification --- gtsam/hybrid/HybridGaussianFactor.cpp | 3 +-- gtsam/hybrid/HybridNonlinearFactor.cpp | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactor.cpp b/gtsam/hybrid/HybridGaussianFactor.cpp index b31fdca20..fd9bd2fd4 100644 --- a/gtsam/hybrid/HybridGaussianFactor.cpp +++ b/gtsam/hybrid/HybridGaussianFactor.cpp @@ -189,8 +189,7 @@ AlgebraicDecisionTree HybridGaussianFactor::errorTree( auto errorFunc = [&continuousValues](const GaussianFactorValuePair& pair) { return PotentiallyPrunedComponentError(pair, continuousValues); }; - DecisionTree error_tree(factors_, errorFunc); - return error_tree; + return {factors_, errorFunc}; } /* *******************************************************************************/ diff --git a/gtsam/hybrid/HybridNonlinearFactor.cpp b/gtsam/hybrid/HybridNonlinearFactor.cpp index 9378d07fe..6ffb95511 100644 --- a/gtsam/hybrid/HybridNonlinearFactor.cpp +++ b/gtsam/hybrid/HybridNonlinearFactor.cpp @@ -100,8 +100,7 @@ AlgebraicDecisionTree HybridNonlinearFactor::errorTree( auto [factor, val] = f; return factor->error(continuousValues) + val; }; - DecisionTree result(factors_, errorFunc); - return result; + return {factors_, errorFunc}; } /* *******************************************************************************/ From acda56d67b55eda015636ae1d164906768f0d844 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 9 Oct 2024 18:34:56 -0400 Subject: [PATCH 105/204] normalize no longer takes explicit sum, so that it normalizes correctly --- gtsam/discrete/AlgebraicDecisionTree.h | 5 +---- gtsam/discrete/tests/testAlgebraicDecisionTree.cpp | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/gtsam/discrete/AlgebraicDecisionTree.h b/gtsam/discrete/AlgebraicDecisionTree.h index 45b949d3c..e582db0ff 100644 --- a/gtsam/discrete/AlgebraicDecisionTree.h +++ b/gtsam/discrete/AlgebraicDecisionTree.h @@ -234,12 +234,9 @@ namespace gtsam { * @brief Helper method to perform normalization such that all leaves in the * tree sum to 1 * - * @param sum * @return AlgebraicDecisionTree */ - AlgebraicDecisionTree normalize(double sum) const { - return this->apply([&sum](const double& x) { return x / sum; }); - } + AlgebraicDecisionTree normalize() const { return (*this) / this->sum(); } /// Find the minimum values amongst all leaves double min() const { diff --git a/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp b/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp index bf728695c..a5e46d538 100644 --- a/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp +++ b/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp @@ -562,7 +562,7 @@ TEST(ADT, Sum) { TEST(ADT, Normalize) { ADT a = exampleADT(); double sum = a.sum(); - auto actual = a.normalize(sum); + auto actual = a.normalize(); DiscreteKey A(0, 2), B(1, 3), C(2, 2); DiscreteKeys keys = DiscreteKeys{A, B, C}; From 878c626a96cbabdeff0fe2b5665548bd65576357 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 10 Oct 2024 09:32:55 -0400 Subject: [PATCH 106/204] fix test TODOs --- .../tests/testHybridGaussianProductFactor.cpp | 15 ++++++++++----- .../tests/testHybridNonlinearFactorGraph.cpp | 2 -- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp b/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp index f41c5f0aa..3a4a6c1f4 100644 --- a/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianProductFactor.cpp @@ -128,7 +128,10 @@ TEST(HybridGaussianProductFactor, AsProductFactor) { EXPECT(actual.first.at(0) == f10); EXPECT_DOUBLES_EQUAL(10, actual.second, 1e-9); - // TODO(Frank): when killed hiding, f11 should also be there + mode[m1.first] = 1; + actual = product(mode); + EXPECT(actual.first.at(0) == f11); + EXPECT_DOUBLES_EQUAL(11, actual.second, 1e-9); } /* ************************************************************************* */ @@ -145,7 +148,10 @@ TEST(HybridGaussianProductFactor, AddOne) { EXPECT(actual.first.at(0) == f10); EXPECT_DOUBLES_EQUAL(10, actual.second, 1e-9); - // TODO(Frank): when killed hiding, f11 should also be there + mode[m1.first] = 1; + actual = product(mode); + EXPECT(actual.first.at(0) == f11); + EXPECT_DOUBLES_EQUAL(11, actual.second, 1e-9); } /* ************************************************************************* */ @@ -166,9 +172,8 @@ TEST(HybridGaussianProductFactor, AddTwo) { EXPECT_DOUBLES_EQUAL(10 + 20, actual00.second, 1e-9); auto actual12 = product({{M(1), 1}, {M(2), 2}}); - // TODO(Frank): when killed hiding, these should also equal: - // EXPECT(actual12.first.at(0) == f11); - // EXPECT(actual12.first.at(1) == f22); + EXPECT(actual12.first.at(0) == f11); + EXPECT(actual12.first.at(1) == f22); EXPECT_DOUBLES_EQUAL(11 + 22, actual12.second, 1e-9); } diff --git a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp index bbf427ecb..e77476e25 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp @@ -973,8 +973,6 @@ TEST(HybridNonlinearFactorGraph, DifferentMeans) { VectorValues cont0 = bn->optimize(dv0); double error0 = bn->error(HybridValues(cont0, dv0)); - // TODO(Varun) Perform importance sampling to estimate error? - // regression EXPECT_DOUBLES_EQUAL(0.69314718056, error0, 1e-9); From 32108ab8ecdf9e6a85617166824486b681ad5b87 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 10 Oct 2024 09:33:07 -0400 Subject: [PATCH 107/204] remove unnecessary TODOs --- gtsam/discrete/CMakeLists.txt | 1 - gtsam/hybrid/CMakeLists.txt | 1 - 2 files changed, 2 deletions(-) diff --git a/gtsam/discrete/CMakeLists.txt b/gtsam/discrete/CMakeLists.txt index d78dff34f..1c6aa9747 100644 --- a/gtsam/discrete/CMakeLists.txt +++ b/gtsam/discrete/CMakeLists.txt @@ -1,7 +1,6 @@ # Install headers set(subdir discrete) file(GLOB discrete_headers "*.h") -# FIXME: exclude headers install(FILES ${discrete_headers} DESTINATION include/gtsam/discrete) # Add all tests diff --git a/gtsam/hybrid/CMakeLists.txt b/gtsam/hybrid/CMakeLists.txt index f1cfcd5c4..cdada00dd 100644 --- a/gtsam/hybrid/CMakeLists.txt +++ b/gtsam/hybrid/CMakeLists.txt @@ -1,7 +1,6 @@ # Install headers set(subdir hybrid) file(GLOB hybrid_headers "*.h") -# FIXME: exclude headers install(FILES ${hybrid_headers} DESTINATION include/gtsam/hybrid) # Add all tests From e6dfa7be998a1dd8f1acdeabe35f57774bd7bdc8 Mon Sep 17 00:00:00 2001 From: Martin Valgur Date: Fri, 11 Oct 2024 18:04:28 +0300 Subject: [PATCH 108/204] precompiled_header.h: remove chartTesting.h Not used anywhere and fails to compile with C++20 on MSVC: gtsam\base\chartTesting.h(42,3): error C3878: syntax error: unexpected token ')' following 'expression_statement' --- gtsam/precompiled_header.h | 1 - 1 file changed, 1 deletion(-) diff --git a/gtsam/precompiled_header.h b/gtsam/precompiled_header.h index 5ff2a55c5..fbc79b87c 100644 --- a/gtsam/precompiled_header.h +++ b/gtsam/precompiled_header.h @@ -23,7 +23,6 @@ // numericalDerivative.h : includes things in linear, nonlinear :-( // testLie.h: includes numericalDerivative #include -#include #include #include #include From f98b9223e8cbd8a39fee2d91d6459dae1542c3b7 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 15 Oct 2024 15:43:03 +0900 Subject: [PATCH 109/204] Make compose and convertFrom static --- gtsam/discrete/DecisionTree-inl.h | 28 ++++++++++++++++------------ gtsam/discrete/DecisionTree.h | 8 ++++---- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/gtsam/discrete/DecisionTree-inl.h b/gtsam/discrete/DecisionTree-inl.h index 27e98fcde..8be5efaa6 100644 --- a/gtsam/discrete/DecisionTree-inl.h +++ b/gtsam/discrete/DecisionTree-inl.h @@ -580,7 +580,7 @@ namespace gtsam { template template typename DecisionTree::NodePtr DecisionTree::compose( - Iterator begin, Iterator end, const L& label) const { + Iterator begin, Iterator end, const L& label) { // find highest label among branches std::optional highestLabel; size_t nrChoices = 0; @@ -703,12 +703,9 @@ namespace gtsam { template typename DecisionTree::NodePtr DecisionTree::convertFrom( const typename DecisionTree::NodePtr& f, - std::function L_of_M, - std::function Y_of_X) const { + std::function L_of_M, std::function Y_of_X) { using LY = DecisionTree; - // Ugliness below because apparently we can't have templated virtual - // functions. // If leaf, apply unary conversion "op" and create a unique leaf. using MXLeaf = typename DecisionTree::Leaf; if (auto leaf = std::dynamic_pointer_cast(f)) { @@ -718,19 +715,27 @@ namespace gtsam { // Check if Choice using MXChoice = typename DecisionTree::Choice; auto choice = std::dynamic_pointer_cast(f); - if (!choice) throw std::invalid_argument( - "DecisionTree::convertFrom: Invalid NodePtr"); + if (!choice) + throw std::invalid_argument("DecisionTree::convertFrom: Invalid NodePtr"); // get new label const M oldLabel = choice->label(); const L newLabel = L_of_M(oldLabel); - // put together via Shannon expansion otherwise not sorted. + // Shannon expansion in this context involves: + // 1. Creating separate subtrees (functions) for each possible value of the new label. + // 2. Combining these subtrees using the 'compose' method, which implements the expansion. + // This approach guarantees that the resulting tree maintains the correct variable ordering + // based on the new labels (L) after translation from the old labels (M). + // Simply creating a Choice node here would not work because it wouldn't account for the + // potentially new ordering of variables resulting from the label translation, + // which is crucial for maintaining consistency and efficiency in the converted tree. std::vector functions; for (auto&& branch : choice->branches()) { functions.emplace_back(convertFrom(branch, L_of_M, Y_of_X)); } - return Choice::Unique(LY::compose(functions.begin(), functions.end(), newLabel)); + return Choice::Unique( + LY::compose(functions.begin(), functions.end(), newLabel)); } /****************************************************************************/ @@ -740,9 +745,8 @@ namespace gtsam { * * NOTE: We differentiate between leaves and assignments. Concretely, a 3 * binary variable tree will have 2^3=8 assignments, but based on pruning, it - * can have less than 8 leaves. For example, if a tree has all assignment - * values as 1, then pruning will cause the tree to have only 1 leaf yet 8 - * assignments. + * can have <8 leaves. For example, if a tree has all assignment values as 1, + * then pruning will cause the tree to have only 1 leaf yet 8 assignments. */ template struct Visit { diff --git a/gtsam/discrete/DecisionTree.h b/gtsam/discrete/DecisionTree.h index 6d6179a7e..0d9db1fce 100644 --- a/gtsam/discrete/DecisionTree.h +++ b/gtsam/discrete/DecisionTree.h @@ -176,9 +176,9 @@ namespace gtsam { * @return NodePtr */ template - NodePtr convertFrom(const typename DecisionTree::NodePtr& f, - std::function L_of_M, - std::function Y_of_X) const; + static NodePtr convertFrom(const typename DecisionTree::NodePtr& f, + std::function L_of_M, + std::function Y_of_X); public: /// @name Standard Constructors @@ -402,7 +402,7 @@ namespace gtsam { // internal use only template NodePtr - compose(Iterator begin, Iterator end, const L& label) const; + static compose(Iterator begin, Iterator end, const L& label); /// @} From 6a5dd60d33d1f9ec78fd2e6c4136d4124e33bd9d Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 15 Oct 2024 15:51:21 +0900 Subject: [PATCH 110/204] Faster version of convertFrom when no label translation needed --- gtsam/discrete/DecisionTree-inl.h | 39 +++++++++++++++++++++++++++---- gtsam/discrete/DecisionTree.h | 13 +++++++++++ 2 files changed, 47 insertions(+), 5 deletions(-) diff --git a/gtsam/discrete/DecisionTree-inl.h b/gtsam/discrete/DecisionTree-inl.h index 8be5efaa6..6f19574fc 100644 --- a/gtsam/discrete/DecisionTree-inl.h +++ b/gtsam/discrete/DecisionTree-inl.h @@ -557,9 +557,7 @@ namespace gtsam { template DecisionTree::DecisionTree(const DecisionTree& other, Func Y_of_X) { - // Define functor for identity mapping of node label. - auto L_of_L = [](const L& label) { return label; }; - root_ = convertFrom(other.root_, L_of_L, Y_of_X); + root_ = convertFrom(other.root_, Y_of_X); } /****************************************************************************/ @@ -698,6 +696,36 @@ namespace gtsam { } } + /****************************************************************************/ + template + template + typename DecisionTree::NodePtr DecisionTree::convertFrom( + const typename DecisionTree::NodePtr& f, + std::function Y_of_X) { + + // If leaf, apply unary conversion "op" and create a unique leaf. + using LXLeaf = typename DecisionTree::Leaf; + if (auto leaf = std::dynamic_pointer_cast(f)) { + return NodePtr(new Leaf(Y_of_X(leaf->constant()))); + } + + // Check if Choice + using LXChoice = typename DecisionTree::Choice; + auto choice = std::dynamic_pointer_cast(f); + if (!choice) throw std::invalid_argument( + "DecisionTree::convertFrom: Invalid NodePtr"); + + // Create a new Choice node with the same label + auto newChoice = std::make_shared(choice->label(), choice->nrChoices()); + + // Convert each branch recursively + for (auto&& branch : choice->branches()) { + newChoice->push_back(convertFrom(branch, Y_of_X)); + } + + return Choice::Unique(newChoice); + } + /****************************************************************************/ template template @@ -745,8 +773,9 @@ namespace gtsam { * * NOTE: We differentiate between leaves and assignments. Concretely, a 3 * binary variable tree will have 2^3=8 assignments, but based on pruning, it - * can have <8 leaves. For example, if a tree has all assignment values as 1, - * then pruning will cause the tree to have only 1 leaf yet 8 assignments. + * can have less than 8 leaves. For example, if a tree has all assignment + * values as 1, then pruning will cause the tree to have only 1 leaf yet 8 + * assignments. */ template struct Visit { diff --git a/gtsam/discrete/DecisionTree.h b/gtsam/discrete/DecisionTree.h index 0d9db1fce..c1d7ea05f 100644 --- a/gtsam/discrete/DecisionTree.h +++ b/gtsam/discrete/DecisionTree.h @@ -165,6 +165,19 @@ namespace gtsam { template NodePtr create(It begin, It end, ValueIt beginY, ValueIt endY) const; + /** + * @brief Convert from a DecisionTree to DecisionTree. + * + * @tparam M The previous label type. + * @tparam X The previous value type. + * @param f The node pointer to the root of the previous DecisionTree. + * @param Y_of_X Functor to convert from value type X to type Y. + * @return NodePtr + */ + template + static NodePtr convertFrom(const typename DecisionTree::NodePtr& f, + std::function Y_of_X); + /** * @brief Convert from a DecisionTree to DecisionTree. * From ba7674d5fb6f45da39eedd2f034ed7652ee6f308 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 15 Oct 2024 16:35:50 +0900 Subject: [PATCH 111/204] Move semantics --- gtsam/discrete/DecisionTree-inl.h | 53 +++++++++++++++++-------------- gtsam/discrete/DecisionTree.h | 1 - 2 files changed, 29 insertions(+), 25 deletions(-) diff --git a/gtsam/discrete/DecisionTree-inl.h b/gtsam/discrete/DecisionTree-inl.h index 6f19574fc..7cb470c53 100644 --- a/gtsam/discrete/DecisionTree-inl.h +++ b/gtsam/discrete/DecisionTree-inl.h @@ -22,18 +22,15 @@ #include #include - -#include +#include #include -#include +#include #include +#include #include #include #include #include -#include -#include -#include namespace gtsam { @@ -251,22 +248,28 @@ namespace gtsam { label_ = f.label(); size_t count = f.nrChoices(); branches_.reserve(count); - for (size_t i = 0; i < count; i++) - push_back(f.branches_[i]->apply_f_op_g(g, op)); + for (size_t i = 0; i < count; i++) { + NodePtr newBranch = f.branches_[i]->apply_f_op_g(g, op); + push_back(std::move(newBranch)); + } } else if (g.label() > f.label()) { // f lower than g label_ = g.label(); size_t count = g.nrChoices(); branches_.reserve(count); - for (size_t i = 0; i < count; i++) - push_back(g.branches_[i]->apply_g_op_fC(f, op)); + for (size_t i = 0; i < count; i++) { + NodePtr newBranch = g.branches_[i]->apply_g_op_fC(f, op); + push_back(std::move(newBranch)); + } } else { // f same level as g label_ = f.label(); size_t count = f.nrChoices(); branches_.reserve(count); - for (size_t i = 0; i < count; i++) - push_back(f.branches_[i]->apply_f_op_g(*g.branches_[i], op)); + for (size_t i = 0; i < count; i++) { + NodePtr newBranch = f.branches_[i]->apply_f_op_g(*g.branches_[i], op); + push_back(std::move(newBranch)); + } } } @@ -284,12 +287,12 @@ namespace gtsam { } /** add a branch: TODO merge into constructor */ - void push_back(const NodePtr& node) { + void push_back(NodePtr&& node) { // allSame_ is restricted to leaf nodes in a decision tree if (allSame_ && !branches_.empty()) { allSame_ = node->sameLeaf(*branches_.back()); } - branches_.push_back(node); + branches_.push_back(std::move(node)); } /// print (as a tree). @@ -497,9 +500,9 @@ namespace gtsam { DecisionTree::DecisionTree(const L& label, const Y& y1, const Y& y2) { auto a = std::make_shared(label, 2); NodePtr l1(new Leaf(y1)), l2(new Leaf(y2)); - a->push_back(l1); - a->push_back(l2); - root_ = Choice::Unique(a); + a->push_back(std::move(l1)); + a->push_back(std::move(l2)); + root_ = Choice::Unique(std::move(a)); } /****************************************************************************/ @@ -510,11 +513,10 @@ namespace gtsam { "DecisionTree: binary constructor called with non-binary label"); auto a = std::make_shared(labelC.first, 2); NodePtr l1(new Leaf(y1)), l2(new Leaf(y2)); - a->push_back(l1); - a->push_back(l2); - root_ = Choice::Unique(a); + a->push_back(std::move(l1)); + a->push_back(std::move(l2)); + root_ = Choice::Unique(std::move(a)); } - /****************************************************************************/ template DecisionTree::DecisionTree(const std::vector& labelCs, @@ -596,8 +598,10 @@ namespace gtsam { // if label is already in correct order, just put together a choice on label if (!nrChoices || !highestLabel || label > *highestLabel) { auto choiceOnLabel = std::make_shared(label, end - begin); - for (Iterator it = begin; it != end; it++) - choiceOnLabel->push_back(it->root_); + for (Iterator it = begin; it != end; it++) { + NodePtr root = it->root_; + choiceOnLabel->push_back(std::move(root)); + } // If no reordering, no need to call Choice::Unique return choiceOnLabel; } else { @@ -616,7 +620,7 @@ namespace gtsam { } // We then recurse, for all values of the highest label NodePtr fi = compose(functions.begin(), functions.end(), label); - choiceOnHighestLabel->push_back(fi); + choiceOnHighestLabel->push_back(std::move(fi)); } return choiceOnHighestLabel; } @@ -673,6 +677,7 @@ namespace gtsam { // Creates one tree (i.e.,function) for each choice of current key // by calling create recursively, and then puts them all together. std::vector functions; + functions.reserve(nrChoices); size_t split = size / nrChoices; for (size_t i = 0; i < nrChoices; i++, beginY += split) { NodePtr f = build(labelC, end, beginY, beginY + split); diff --git a/gtsam/discrete/DecisionTree.h b/gtsam/discrete/DecisionTree.h index c1d7ea05f..34c916d02 100644 --- a/gtsam/discrete/DecisionTree.h +++ b/gtsam/discrete/DecisionTree.h @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include From 8cc5171cbc700382f22b1d5461ea99151407f4d8 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 15 Oct 2024 16:50:28 +0900 Subject: [PATCH 112/204] Two more methods are static --- gtsam/discrete/DecisionTree-inl.h | 4 ++-- gtsam/discrete/DecisionTree.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/gtsam/discrete/DecisionTree-inl.h b/gtsam/discrete/DecisionTree-inl.h index 7cb470c53..4266ace15 100644 --- a/gtsam/discrete/DecisionTree-inl.h +++ b/gtsam/discrete/DecisionTree-inl.h @@ -650,7 +650,7 @@ namespace gtsam { template template typename DecisionTree::NodePtr DecisionTree::build( - It begin, It end, ValueIt beginY, ValueIt endY) const { + It begin, It end, ValueIt beginY, ValueIt endY) { // get crucial counts size_t nrChoices = begin->second; size_t size = endY - beginY; @@ -692,7 +692,7 @@ namespace gtsam { template template typename DecisionTree::NodePtr DecisionTree::create( - It begin, It end, ValueIt beginY, ValueIt endY) const { + It begin, It end, ValueIt beginY, ValueIt endY) { auto node = build(begin, end, beginY, endY); if (auto choice = std::dynamic_pointer_cast(node)) { return Choice::Unique(choice); diff --git a/gtsam/discrete/DecisionTree.h b/gtsam/discrete/DecisionTree.h index 34c916d02..6d8d86530 100644 --- a/gtsam/discrete/DecisionTree.h +++ b/gtsam/discrete/DecisionTree.h @@ -154,7 +154,7 @@ namespace gtsam { * and Y values */ template - NodePtr build(It begin, It end, ValueIt beginY, ValueIt endY) const; + static NodePtr build(It begin, It end, ValueIt beginY, ValueIt endY); /** Internal helper function to create from * keys, cardinalities, and Y values. @@ -162,7 +162,7 @@ namespace gtsam { * before we prune in a top-down fashion. */ template - NodePtr create(It begin, It end, ValueIt beginY, ValueIt endY) const; + static NodePtr create(It begin, It end, ValueIt beginY, ValueIt endY); /** * @brief Convert from a DecisionTree to DecisionTree. From 935df2b90cf35251eaea31f6880c51a04e8c6cf0 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 15 Oct 2024 22:36:21 +0900 Subject: [PATCH 113/204] Add missing header --- gtsam/discrete/AlgebraicDecisionTree.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gtsam/discrete/AlgebraicDecisionTree.h b/gtsam/discrete/AlgebraicDecisionTree.h index e582db0ff..9948b0be6 100644 --- a/gtsam/discrete/AlgebraicDecisionTree.h +++ b/gtsam/discrete/AlgebraicDecisionTree.h @@ -22,10 +22,12 @@ #include #include +#include #include #include #include #include + namespace gtsam { /** From 4a0257c0ce720a8202d7e96798347cfafb4cd1f0 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 14 Oct 2024 16:51:43 -0400 Subject: [PATCH 114/204] move test file to nonlinear directory --- .../nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tests/testGradientDescentOptimizer.cpp => gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp (100%) diff --git a/tests/testGradientDescentOptimizer.cpp b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp similarity index 100% rename from tests/testGradientDescentOptimizer.cpp rename to gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp From 322a23d49c9e26d00cf546c7c95e52997983f523 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 14 Oct 2024 21:01:31 -0400 Subject: [PATCH 115/204] clean up and formatting --- ...estNonlinearConjugateGradientOptimizer.cpp | 61 +++++++++---------- 1 file changed, 29 insertions(+), 32 deletions(-) diff --git a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp index d7ca70459..36673c7a0 100644 --- a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp @@ -1,56 +1,52 @@ /** - * @file NonlinearConjugateGradientOptimizer.cpp - * @brief Test simple CG optimizer + * @file testNonlinearConjugateGradientOptimizer.cpp + * @brief Test nonlinear CG optimizer * @author Yong-Dian Jian + * @author Varun Agrawal * @date June 11, 2012 */ -/** - * @file testGradientDescentOptimizer.cpp - * @brief Small test of NonlinearConjugateGradientOptimizer - * @author Yong-Dian Jian - * @date Jun 11, 2012 - */ - -#include +#include +#include #include #include #include -#include - -#include - +#include using namespace std; using namespace gtsam; // Generate a small PoseSLAM problem std::tuple generateProblem() { - // 1. Create graph container and add factors to it NonlinearFactorGraph graph; // 2a. Add Gaussian prior - Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin - SharedDiagonal priorNoise = noiseModel::Diagonal::Sigmas( - Vector3(0.3, 0.3, 0.1)); + Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin + SharedDiagonal priorNoise = + noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1)); graph.addPrior(1, priorMean, priorNoise); // 2b. Add odometry factors - SharedDiagonal odometryNoise = noiseModel::Diagonal::Sigmas( - Vector3(0.2, 0.2, 0.1)); - graph.emplace_shared>(1, 2, Pose2(2.0, 0.0, 0.0), odometryNoise); - graph.emplace_shared>(2, 3, Pose2(2.0, 0.0, M_PI_2), odometryNoise); - graph.emplace_shared>(3, 4, Pose2(2.0, 0.0, M_PI_2), odometryNoise); - graph.emplace_shared>(4, 5, Pose2(2.0, 0.0, M_PI_2), odometryNoise); + SharedDiagonal odometryNoise = + noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1)); + graph.emplace_shared>(1, 2, Pose2(2.0, 0.0, 0.0), + odometryNoise); + graph.emplace_shared>(2, 3, Pose2(2.0, 0.0, M_PI_2), + odometryNoise); + graph.emplace_shared>(3, 4, Pose2(2.0, 0.0, M_PI_2), + odometryNoise); + graph.emplace_shared>(4, 5, Pose2(2.0, 0.0, M_PI_2), + odometryNoise); // 2c. Add pose constraint - SharedDiagonal constraintUncertainty = noiseModel::Diagonal::Sigmas( - Vector3(0.2, 0.2, 0.1)); + SharedDiagonal constraintUncertainty = + noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1)); graph.emplace_shared>(5, 2, Pose2(2.0, 0.0, M_PI_2), - constraintUncertainty); + constraintUncertainty); - // 3. Create the data structure to hold the initialEstimate estimate to the solution + // 3. Create the data structure to hold the initialEstimate estimate to the + // solution Values initialEstimate; Pose2 x1(0.5, 0.0, 0.2); initialEstimate.insert(1, x1); @@ -68,16 +64,17 @@ std::tuple generateProblem() { /* ************************************************************************* */ TEST(NonlinearConjugateGradientOptimizer, Optimize) { -const auto [graph, initialEstimate] = generateProblem(); -// cout << "initial error = " << graph.error(initialEstimate) << endl; + const auto [graph, initialEstimate] = generateProblem(); + // cout << "initial error = " << graph.error(initialEstimate) << endl; NonlinearOptimizerParams param; - param.maxIterations = 500; /* requires a larger number of iterations to converge */ + param.maxIterations = + 500; /* requires a larger number of iterations to converge */ param.verbosity = NonlinearOptimizerParams::SILENT; NonlinearConjugateGradientOptimizer optimizer(graph, initialEstimate, param); Values result = optimizer.optimize(); -// cout << "cg final error = " << graph.error(result) << endl; + // cout << "cg final error = " << graph.error(result) << endl; EXPECT_DOUBLES_EQUAL(0.0, graph.error(result), 1e-4); } From 91d637b0928b6ef9426b66d079852fd732c07943 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 14 Oct 2024 21:16:31 -0400 Subject: [PATCH 116/204] improve PCGSolverParameters --- gtsam/linear/PCGSolver.cpp | 2 +- gtsam/linear/PCGSolver.h | 26 +++++++++++++++----------- gtsam/sfm/ShonanAveraging.cpp | 11 +++-------- 3 files changed, 19 insertions(+), 20 deletions(-) diff --git a/gtsam/linear/PCGSolver.cpp b/gtsam/linear/PCGSolver.cpp index 5ec6fa67b..320c3e535 100644 --- a/gtsam/linear/PCGSolver.cpp +++ b/gtsam/linear/PCGSolver.cpp @@ -40,7 +40,7 @@ void PCGSolverParameters::print(ostream &os) const { /*****************************************************************************/ PCGSolver::PCGSolver(const PCGSolverParameters &p) { parameters_ = p; - preconditioner_ = createPreconditioner(p.preconditioner_); + preconditioner_ = createPreconditioner(p.preconditioner()); } void PCGSolverParameters::setPreconditionerParams(const std::shared_ptr preconditioner) { diff --git a/gtsam/linear/PCGSolver.h b/gtsam/linear/PCGSolver.h index 5ed2c7c6d..35f6bdacb 100644 --- a/gtsam/linear/PCGSolver.h +++ b/gtsam/linear/PCGSolver.h @@ -34,26 +34,30 @@ struct PreconditionerParameters; * Parameters for PCG */ struct GTSAM_EXPORT PCGSolverParameters: public ConjugateGradientParameters { -public: + public: typedef ConjugateGradientParameters Base; typedef std::shared_ptr shared_ptr; - PCGSolverParameters() { - } +protected: + std::shared_ptr preconditioner_; + +public: + PCGSolverParameters() {} + + PCGSolverParameters( + const std::shared_ptr &preconditioner) + : preconditioner_(preconditioner) {} void print(std::ostream &os) const override; - /* interface to preconditioner parameters */ - inline const PreconditionerParameters& preconditioner() const { - return *preconditioner_; + const std::shared_ptr preconditioner() const { + return preconditioner_; } - // needed for python wrapper + void setPreconditionerParams( + const std::shared_ptr preconditioner); + void print(const std::string &s) const; - - std::shared_ptr preconditioner_; - - void setPreconditionerParams(const std::shared_ptr preconditioner); }; /** diff --git a/gtsam/sfm/ShonanAveraging.cpp b/gtsam/sfm/ShonanAveraging.cpp index 7c8b07f37..830f1c719 100644 --- a/gtsam/sfm/ShonanAveraging.cpp +++ b/gtsam/sfm/ShonanAveraging.cpp @@ -67,20 +67,15 @@ ShonanAveragingParameters::ShonanAveragingParameters( builderParameters.augmentationWeight = SubgraphBuilderParameters::SKELETON; builderParameters.augmentationFactor = 0.0; - auto pcg = std::make_shared(); - // Choose optimization method if (method == "SUBGRAPH") { lm.iterativeParams = std::make_shared(builderParameters); } else if (method == "SGPC") { - pcg->preconditioner_ = - std::make_shared(builderParameters); - lm.iterativeParams = pcg; + lm.iterativeParams = std::make_shared( + std::make_shared(builderParameters)); } else if (method == "JACOBI") { - pcg->preconditioner_ = - std::make_shared(); - lm.iterativeParams = pcg; + lm.iterativeParams = std::make_shared(std::make_shared()); } else if (method == "QR") { lm.setLinearSolverType("MULTIFRONTAL_QR"); } else if (method == "CHOLESKY") { From 1b422d5c98632598e726a9525520ee5046104522 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 06:54:00 -0400 Subject: [PATCH 117/204] rearrange GaussianFactorGraphSystem --- gtsam/linear/PCGSolver.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/gtsam/linear/PCGSolver.h b/gtsam/linear/PCGSolver.h index 35f6bdacb..a6a16f722 100644 --- a/gtsam/linear/PCGSolver.h +++ b/gtsam/linear/PCGSolver.h @@ -91,16 +91,16 @@ public: * System class needed for calling preconditionedConjugateGradient */ class GTSAM_EXPORT GaussianFactorGraphSystem { -public: + GaussianFactorGraph gfg_; + Preconditioner preconditioner_; + KeyInfo keyInfo_; + std::map lambda_; + public: GaussianFactorGraphSystem(const GaussianFactorGraph &gfg, - const Preconditioner &preconditioner, const KeyInfo &info, - const std::map &lambda); - - const GaussianFactorGraph &gfg_; - const Preconditioner &preconditioner_; - const KeyInfo &keyInfo_; - const std::map &lambda_; + const Preconditioner &preconditioner, + const KeyInfo &info, + const std::map &lambda); void residual(const Vector &x, Vector &r) const; void multiply(const Vector &x, Vector& y) const; From ec8714a54896ff60cb48d340998b19b8f0336456 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 08:40:20 -0400 Subject: [PATCH 118/204] restrict pointer access via methods --- tests/testPCGSolver.cpp | 13 ++++++------- tests/testPreconditioner.cpp | 12 ++++++++---- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/tests/testPCGSolver.cpp b/tests/testPCGSolver.cpp index bc9f9e608..039864886 100644 --- a/tests/testPCGSolver.cpp +++ b/tests/testPCGSolver.cpp @@ -125,8 +125,8 @@ TEST( GaussianFactorGraphSystem, multiply_getb) TEST(PCGSolver, dummy) { LevenbergMarquardtParams params; params.linearSolverType = LevenbergMarquardtParams::Iterative; - auto pcg = std::make_shared(); - pcg->preconditioner_ = std::make_shared(); + auto pcg = std::make_shared( + std::make_shared()); params.iterativeParams = pcg; NonlinearFactorGraph fg = example::createReallyNonlinearFactorGraph(); @@ -145,9 +145,8 @@ TEST(PCGSolver, dummy) { TEST(PCGSolver, blockjacobi) { LevenbergMarquardtParams params; params.linearSolverType = LevenbergMarquardtParams::Iterative; - auto pcg = std::make_shared(); - pcg->preconditioner_ = - std::make_shared(); + auto pcg = std::make_shared( + std::make_shared()); params.iterativeParams = pcg; NonlinearFactorGraph fg = example::createReallyNonlinearFactorGraph(); @@ -166,8 +165,8 @@ TEST(PCGSolver, blockjacobi) { TEST(PCGSolver, subgraph) { LevenbergMarquardtParams params; params.linearSolverType = LevenbergMarquardtParams::Iterative; - auto pcg = std::make_shared(); - pcg->preconditioner_ = std::make_shared(); + auto pcg = std::make_shared( + std::make_shared()); params.iterativeParams = pcg; NonlinearFactorGraph fg = example::createReallyNonlinearFactorGraph(); diff --git a/tests/testPreconditioner.cpp b/tests/testPreconditioner.cpp index ecdf36322..9eff3e7f0 100644 --- a/tests/testPreconditioner.cpp +++ b/tests/testPreconditioner.cpp @@ -60,13 +60,15 @@ TEST( PCGsolver, verySimpleLinearSystem) { //pcg->setVerbosity("ERROR"); // With Dummy preconditioner - pcg->preconditioner_ = std::make_shared(); + pcg->setPreconditionerParams( + std::make_shared()); VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(exactSolution, deltaPCGDummy, 1e-7)); //deltaPCGDummy.print("PCG Dummy"); // With Block-Jacobi preconditioner - pcg->preconditioner_ = std::make_shared(); + pcg->setPreconditionerParams( + std::make_shared()); // It takes more than 1000 iterations for this test pcg->setMaxIterations(1500); VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG); @@ -111,13 +113,15 @@ TEST(PCGSolver, simpleLinearSystem) { //pcg->setVerbosity("ERROR"); // With Dummy preconditioner - pcg->preconditioner_ = std::make_shared(); + pcg->setPreconditionerParams( + std::make_shared()); VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(expectedSolution, deltaPCGDummy, 1e-5)); //deltaPCGDummy.print("PCG Dummy"); // With Block-Jacobi preconditioner - pcg->preconditioner_ = std::make_shared(); + pcg->setPreconditionerParams( + std::make_shared()); VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(expectedSolution, deltaPCGJacobi, 1e-5)); //deltaPCGJacobi.print("PCG Jacobi"); From a0c0cd1fced607d6c09c9ddb3f62e0c2f34ed7c3 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 09:14:47 -0400 Subject: [PATCH 119/204] improved docstrings --- gtsam/linear/ConjugateGradientSolver.h | 6 ++++-- gtsam/linear/PCGSolver.h | 6 +++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/gtsam/linear/ConjugateGradientSolver.h b/gtsam/linear/ConjugateGradientSolver.h index b09b1a352..ccdab45da 100644 --- a/gtsam/linear/ConjugateGradientSolver.h +++ b/gtsam/linear/ConjugateGradientSolver.h @@ -24,20 +24,22 @@ namespace gtsam { /** - * parameters for the conjugate gradient method + * Parameters for the Conjugate Gradient method */ class GTSAM_EXPORT ConjugateGradientParameters : public IterativeOptimizationParameters { -public: + public: typedef IterativeOptimizationParameters Base; typedef std::shared_ptr shared_ptr; + protected: size_t minIterations_; ///< minimum number of cg iterations size_t maxIterations_; ///< maximum number of cg iterations size_t reset_; ///< number of iterations before reset double epsilon_rel_; ///< threshold for relative error decrease double epsilon_abs_; ///< threshold for absolute error decrease + public: /* Matrix Operation Kernel */ enum BLASKernel { GTSAM = 0, ///< Jacobian Factor Graph of GTSAM diff --git a/gtsam/linear/PCGSolver.h b/gtsam/linear/PCGSolver.h index a6a16f722..cb671f288 100644 --- a/gtsam/linear/PCGSolver.h +++ b/gtsam/linear/PCGSolver.h @@ -31,7 +31,7 @@ class VectorValues; struct PreconditionerParameters; /** - * Parameters for PCG + * Parameters for Preconditioned Conjugate Gradient solver. */ struct GTSAM_EXPORT PCGSolverParameters: public ConjugateGradientParameters { public: @@ -91,8 +91,8 @@ public: * System class needed for calling preconditionedConjugateGradient */ class GTSAM_EXPORT GaussianFactorGraphSystem { - GaussianFactorGraph gfg_; - Preconditioner preconditioner_; + const GaussianFactorGraph &gfg_; + const Preconditioner &preconditioner_; KeyInfo keyInfo_; std::map lambda_; From a94169a973e42d77c752d4d4bcf8f2ffd56d8693 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 09:59:16 -0400 Subject: [PATCH 120/204] remove unnecessary typedefs --- .../NonlinearConjugateGradientOptimizer.cpp | 24 ++++++++++--------- .../NonlinearConjugateGradientOptimizer.h | 21 ++++++++-------- 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index 0b1a43815..403c72908 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -42,24 +42,26 @@ static VectorValues gradientInPlace(const NonlinearFactorGraph &nfg, } NonlinearConjugateGradientOptimizer::NonlinearConjugateGradientOptimizer( - const NonlinearFactorGraph& graph, const Values& initialValues, const Parameters& params) - : Base(graph, std::unique_ptr(new State(initialValues, graph.error(initialValues)))), - params_(params) {} + const NonlinearFactorGraph& graph, const Values& initialValues, + const Parameters& params) + : Base(graph, std::unique_ptr( + new State(initialValues, graph.error(initialValues)))), + params_(params) {} -double NonlinearConjugateGradientOptimizer::System::error(const State& state) const { +double NonlinearConjugateGradientOptimizer::System::error( + const Values& state) const { return graph_.error(state); } -NonlinearConjugateGradientOptimizer::System::Gradient NonlinearConjugateGradientOptimizer::System::gradient( - const State &state) const { +VectorValues NonlinearConjugateGradientOptimizer::System::gradient( + const Values& state) const { return gradientInPlace(graph_, state); } -NonlinearConjugateGradientOptimizer::System::State NonlinearConjugateGradientOptimizer::System::advance( - const State ¤t, const double alpha, const Gradient &g) const { - Gradient step = g; - step *= alpha; - return current.retract(step); +Values NonlinearConjugateGradientOptimizer::System::advance( + const Values& current, const double alpha, + const VectorValues& gradient) const { + return current.retract(alpha * gradient); } GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 3dd6c7e05..26f596b06 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -29,8 +29,6 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimiz /* a class for the nonlinearConjugateGradient template */ class System { public: - typedef Values State; - typedef VectorValues Gradient; typedef NonlinearOptimizerParams Parameters; protected: @@ -40,10 +38,10 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimiz System(const NonlinearFactorGraph &graph) : graph_(graph) { } - double error(const State &state) const; - Gradient gradient(const State &state) const; - State advance(const State ¤t, const double alpha, - const Gradient &g) const; + double error(const Values &state) const; + VectorValues gradient(const Values &state) const; + Values advance(const Values ¤t, const double alpha, + const VectorValues &g) const; }; public: @@ -52,7 +50,7 @@ public: typedef NonlinearOptimizerParams Parameters; typedef std::shared_ptr shared_ptr; -protected: + protected: Parameters params_; const NonlinearOptimizerParams& _params() const override { @@ -62,8 +60,9 @@ protected: public: /// Constructor - NonlinearConjugateGradientOptimizer(const NonlinearFactorGraph& graph, - const Values& initialValues, const Parameters& params = Parameters()); + NonlinearConjugateGradientOptimizer( + const NonlinearFactorGraph &graph, const Values &initialValues, + const Parameters ¶ms = Parameters()); /// Destructor ~NonlinearConjugateGradientOptimizer() override { @@ -163,8 +162,8 @@ std::tuple nonlinearConjugateGradient(const S &system, } V currentValues = initial; - typename S::Gradient currentGradient = system.gradient(currentValues), - prevGradient, direction = currentGradient; + VectorValues currentGradient = system.gradient(currentValues), prevGradient, + direction = currentGradient; /* do one step of gradient descent */ V prevValues = currentValues; From ba6e2b8d7f925cbd5ddcc3d6a77897cc3cd4dc1c Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 10:06:37 -0400 Subject: [PATCH 121/204] remove System inside NonlinearConjugateGradientOptimizer --- .../NonlinearConjugateGradientOptimizer.cpp | 19 +- .../NonlinearConjugateGradientOptimizer.h | 284 +++++++++--------- 2 files changed, 145 insertions(+), 158 deletions(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index 403c72908..40bd76a88 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -48,26 +48,27 @@ NonlinearConjugateGradientOptimizer::NonlinearConjugateGradientOptimizer( new State(initialValues, graph.error(initialValues)))), params_(params) {} -double NonlinearConjugateGradientOptimizer::System::error( +double NonlinearConjugateGradientOptimizer::error( const Values& state) const { return graph_.error(state); } -VectorValues NonlinearConjugateGradientOptimizer::System::gradient( +VectorValues NonlinearConjugateGradientOptimizer::gradient( const Values& state) const { return gradientInPlace(graph_, state); } -Values NonlinearConjugateGradientOptimizer::System::advance( +Values NonlinearConjugateGradientOptimizer::advance( const Values& current, const double alpha, const VectorValues& gradient) const { return current.retract(alpha * gradient); } GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { - const auto [newValues, dummy] = nonlinearConjugateGradient( - System(graph_), state_->values, params_, true /* single iteration */); - state_.reset(new State(newValues, graph_.error(newValues), state_->iterations + 1)); + const auto [newValues, dummy] = nonlinearConjugateGradient( + state_->values, params_, true /* single iteration */); + state_.reset( + new State(newValues, graph_.error(newValues), state_->iterations + 1)); // NOTE(frank): We don't linearize this system, so we must return null here. return nullptr; @@ -75,10 +76,10 @@ GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { const Values& NonlinearConjugateGradientOptimizer::optimize() { // Optimize until convergence - System system(graph_); const auto [newValues, iterations] = - nonlinearConjugateGradient(system, state_->values, params_, false); - state_.reset(new State(std::move(newValues), graph_.error(newValues), iterations)); + nonlinearConjugateGradient(state_->values, params_, false); + state_.reset( + new State(std::move(newValues), graph_.error(newValues), iterations)); return state_->values; } diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 26f596b06..4a4eb22c5 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -24,28 +24,9 @@ namespace gtsam { /** An implementation of the nonlinear CG method using the template below */ -class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimizer { - - /* a class for the nonlinearConjugateGradient template */ - class System { - public: - typedef NonlinearOptimizerParams Parameters; - - protected: - const NonlinearFactorGraph &graph_; - - public: - System(const NonlinearFactorGraph &graph) : - graph_(graph) { - } - double error(const Values &state) const; - VectorValues gradient(const Values &state) const; - Values advance(const Values ¤t, const double alpha, - const VectorValues &g) const; - }; - -public: - +class GTSAM_EXPORT NonlinearConjugateGradientOptimizer + : public NonlinearOptimizer { + public: typedef NonlinearOptimizer Base; typedef NonlinearOptimizerParams Parameters; typedef std::shared_ptr shared_ptr; @@ -53,20 +34,23 @@ public: protected: Parameters params_; - const NonlinearOptimizerParams& _params() const override { - return params_; - } - -public: + const NonlinearOptimizerParams &_params() const override { return params_; } + public: /// Constructor - NonlinearConjugateGradientOptimizer( - const NonlinearFactorGraph &graph, const Values &initialValues, - const Parameters ¶ms = Parameters()); + NonlinearConjugateGradientOptimizer(const NonlinearFactorGraph &graph, + const Values &initialValues, + const Parameters ¶ms = Parameters()); /// Destructor - ~NonlinearConjugateGradientOptimizer() override { - } + ~NonlinearConjugateGradientOptimizer() override {} + + double error(const Values &state) const; + + VectorValues gradient(const Values &state) const; + + Values advance(const Values ¤t, const double alpha, + const VectorValues &g) const; /** * Perform a single iteration, returning GaussianFactorGraph corresponding to @@ -79,145 +63,147 @@ public: * variable assignments. */ const Values& optimize() override; -}; -/** Implement the golden-section line search algorithm */ -template -double lineSearch(const S &system, const V currentValues, const W &gradient) { + /** Implement the golden-section line search algorithm */ + template + double lineSearch(const V currentValues, const W &gradient) { + /* normalize it such that it becomes a unit vector */ + const double g = gradient.norm(); - /* normalize it such that it becomes a unit vector */ - const double g = gradient.norm(); + // perform the golden section search algorithm to decide the the optimal + // step size detail refer to + // http://en.wikipedia.org/wiki/Golden_section_search + const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi, + tau = 1e-5; + double minStep = -1.0 / g, maxStep = 0, + newStep = minStep + (maxStep - minStep) / (phi + 1.0); - // perform the golden section search algorithm to decide the the optimal step size - // detail refer to http://en.wikipedia.org/wiki/Golden_section_search - const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi, tau = - 1e-5; - double minStep = -1.0 / g, maxStep = 0, newStep = minStep - + (maxStep - minStep) / (phi + 1.0); + V newValues = advance(currentValues, newStep, gradient); + double newError = error(newValues); - V newValues = system.advance(currentValues, newStep, gradient); - double newError = system.error(newValues); + while (true) { + const bool flag = (maxStep - newStep > newStep - minStep) ? true : false; + const double testStep = flag ? newStep + resphi * (maxStep - newStep) + : newStep - resphi * (newStep - minStep); - while (true) { - const bool flag = (maxStep - newStep > newStep - minStep) ? true : false; - const double testStep = - flag ? newStep + resphi * (maxStep - newStep) : - newStep - resphi * (newStep - minStep); + if ((maxStep - minStep) < + tau * (std::abs(testStep) + std::abs(newStep))) { + return 0.5 * (minStep + maxStep); + } - if ((maxStep - minStep) - < tau * (std::abs(testStep) + std::abs(newStep))) { - return 0.5 * (minStep + maxStep); - } + const V testValues = advance(currentValues, testStep, gradient); + const double testError = error(testValues); - const V testValues = system.advance(currentValues, testStep, gradient); - const double testError = system.error(testValues); - - // update the working range - if (testError >= newError) { - if (flag) - maxStep = testStep; - else - minStep = testStep; - } else { - if (flag) { - minStep = newStep; - newStep = testStep; - newError = testError; + // update the working range + if (testError >= newError) { + if (flag) + maxStep = testStep; + else + minStep = testStep; } else { - maxStep = newStep; - newStep = testStep; - newError = testError; + if (flag) { + minStep = newStep; + newStep = testStep; + newError = testError; + } else { + maxStep = newStep; + newStep = testStep; + newError = testError; + } } } - } - return 0.0; -} - -/** - * Implement the nonlinear conjugate gradient method using the Polak-Ribiere formula suggested in - * http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method. - * - * The S (system) class requires three member functions: error(state), gradient(state) and - * advance(state, step-size, direction). The V class denotes the state or the solution. - * - * The last parameter is a switch between gradient-descent and conjugate gradient - */ -template -std::tuple nonlinearConjugateGradient(const S &system, - const V &initial, const NonlinearOptimizerParams ¶ms, - const bool singleIteration, const bool gradientDescent = false) { - - // GTSAM_CONCEPT_MANIFOLD_TYPE(V) - - size_t iteration = 0; - - // check if we're already close enough - double currentError = system.error(initial); - if (currentError <= params.errorTol) { - if (params.verbosity >= NonlinearOptimizerParams::ERROR) { - std::cout << "Exiting, as error = " << currentError << " < " - << params.errorTol << std::endl; - } - return {initial, iteration}; + return 0.0; } - V currentValues = initial; - VectorValues currentGradient = system.gradient(currentValues), prevGradient, - direction = currentGradient; + /** + * Implement the nonlinear conjugate gradient method using the Polak-Ribiere + * formula suggested in + * http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method. + * + * The V class + * denotes the state or the solution. + * + * The last parameter is a switch between gradient-descent and conjugate + * gradient + */ + template + std::tuple nonlinearConjugateGradient( + const V &initial, const NonlinearOptimizerParams ¶ms, + const bool singleIteration, const bool gradientDescent = false) { + // GTSAM_CONCEPT_MANIFOLD_TYPE(V) - /* do one step of gradient descent */ - V prevValues = currentValues; - double prevError = currentError; - double alpha = lineSearch(system, currentValues, direction); - currentValues = system.advance(prevValues, alpha, direction); - currentError = system.error(currentValues); + size_t iteration = 0; - // Maybe show output - if (params.verbosity >= NonlinearOptimizerParams::ERROR) - std::cout << "Initial error: " << currentError << std::endl; - - // Iterative loop - do { - if (gradientDescent == true) { - direction = system.gradient(currentValues); - } else { - prevGradient = currentGradient; - currentGradient = system.gradient(currentValues); - // Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1 - const double beta = std::max(0.0, - currentGradient.dot(currentGradient - prevGradient) - / prevGradient.dot(prevGradient)); - direction = currentGradient + (beta * direction); + // check if we're already close enough + double currentError = error(initial); + if (currentError <= params.errorTol) { + if (params.verbosity >= NonlinearOptimizerParams::ERROR) { + std::cout << "Exiting, as error = " << currentError << " < " + << params.errorTol << std::endl; + } + return {initial, iteration}; } - alpha = lineSearch(system, currentValues, direction); + V currentValues = initial; + VectorValues currentGradient = gradient(currentValues), prevGradient, + direction = currentGradient; - prevValues = currentValues; - prevError = currentError; - - currentValues = system.advance(prevValues, alpha, direction); - currentError = system.error(currentValues); - - // User hook: - if (params.iterationHook) - params.iterationHook(iteration, prevError, currentError); + /* do one step of gradient descent */ + V prevValues = currentValues; + double prevError = currentError; + double alpha = lineSearch(currentValues, direction); + currentValues = advance(prevValues, alpha, direction); + currentError = error(currentValues); // Maybe show output if (params.verbosity >= NonlinearOptimizerParams::ERROR) - std::cout << "iteration: " << iteration << ", currentError: " << currentError << std::endl; - } while (++iteration < params.maxIterations && !singleIteration - && !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, - params.errorTol, prevError, currentError, params.verbosity)); + std::cout << "Initial error: " << currentError << std::endl; - // Printing if verbose - if (params.verbosity >= NonlinearOptimizerParams::ERROR - && iteration >= params.maxIterations) - std::cout - << "nonlinearConjugateGradient: Terminating because reached maximum iterations" - << std::endl; + // Iterative loop + do { + if (gradientDescent == true) { + direction = gradient(currentValues); + } else { + prevGradient = currentGradient; + currentGradient = gradient(currentValues); + // Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1 + const double beta = + std::max(0.0, currentGradient.dot(currentGradient - prevGradient) / + prevGradient.dot(prevGradient)); + direction = currentGradient + (beta * direction); + } - return {currentValues, iteration}; -} + alpha = lineSearch(currentValues, direction); + + prevValues = currentValues; + prevError = currentError; + + currentValues = advance(prevValues, alpha, direction); + currentError = error(currentValues); + + // User hook: + if (params.iterationHook) + params.iterationHook(iteration, prevError, currentError); + + // Maybe show output + if (params.verbosity >= NonlinearOptimizerParams::ERROR) + std::cout << "iteration: " << iteration + << ", currentError: " << currentError << std::endl; + } while (++iteration < params.maxIterations && !singleIteration && + !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, + params.errorTol, prevError, currentError, + params.verbosity)); + + // Printing if verbose + if (params.verbosity >= NonlinearOptimizerParams::ERROR && + iteration >= params.maxIterations) + std::cout << "nonlinearConjugateGradient: Terminating because reached " + "maximum iterations" + << std::endl; + + return {currentValues, iteration}; + } +}; } // \ namespace gtsam From 1de138678f800ca2ceb641d6521ae20089864f45 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 12:29:07 -0400 Subject: [PATCH 122/204] remove templates --- .../NonlinearConjugateGradientOptimizer.cpp | 2 +- .../NonlinearConjugateGradientOptimizer.h | 36 ++++++++----------- 2 files changed, 15 insertions(+), 23 deletions(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index 40bd76a88..6190b041e 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -65,7 +65,7 @@ Values NonlinearConjugateGradientOptimizer::advance( } GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { - const auto [newValues, dummy] = nonlinearConjugateGradient( + const auto [newValues, dummy] = nonlinearConjugateGradient( state_->values, params_, true /* single iteration */); state_.reset( new State(newValues, graph_.error(newValues), state_->iterations + 1)); diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 4a4eb22c5..28de45894 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -52,21 +52,20 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer Values advance(const Values ¤t, const double alpha, const VectorValues &g) const; - /** - * Perform a single iteration, returning GaussianFactorGraph corresponding to + /** + * Perform a single iteration, returning GaussianFactorGraph corresponding to * the linearized factor graph. */ GaussianFactorGraph::shared_ptr iterate() override; - /** - * Optimize for the maximum-likelihood estimate, returning a the optimized + /** + * Optimize for the maximum-likelihood estimate, returning a the optimized * variable assignments. */ - const Values& optimize() override; + const Values &optimize() override; /** Implement the golden-section line search algorithm */ - template - double lineSearch(const V currentValues, const W &gradient) { + double lineSearch(const Values ¤tValues, const VectorValues &gradient) const { /* normalize it such that it becomes a unit vector */ const double g = gradient.norm(); @@ -78,7 +77,7 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer double minStep = -1.0 / g, maxStep = 0, newStep = minStep + (maxStep - minStep) / (phi + 1.0); - V newValues = advance(currentValues, newStep, gradient); + Values newValues = advance(currentValues, newStep, gradient); double newError = error(newValues); while (true) { @@ -91,7 +90,7 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer return 0.5 * (minStep + maxStep); } - const V testValues = advance(currentValues, testStep, gradient); + const Values testValues = advance(currentValues, testStep, gradient); const double testError = error(testValues); // update the working range @@ -120,18 +119,12 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer * formula suggested in * http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method. * - * The V class - * denotes the state or the solution. - * * The last parameter is a switch between gradient-descent and conjugate * gradient */ - template - std::tuple nonlinearConjugateGradient( - const V &initial, const NonlinearOptimizerParams ¶ms, - const bool singleIteration, const bool gradientDescent = false) { - // GTSAM_CONCEPT_MANIFOLD_TYPE(V) - + std::tuple nonlinearConjugateGradient( + const Values &initial, const NonlinearOptimizerParams ¶ms, + const bool singleIteration, const bool gradientDescent = false) const { size_t iteration = 0; // check if we're already close enough @@ -144,12 +137,12 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer return {initial, iteration}; } - V currentValues = initial; + Values currentValues = initial; VectorValues currentGradient = gradient(currentValues), prevGradient, direction = currentGradient; /* do one step of gradient descent */ - V prevValues = currentValues; + Values prevValues = currentValues; double prevError = currentError; double alpha = lineSearch(currentValues, direction); currentValues = advance(prevValues, alpha, direction); @@ -205,5 +198,4 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer } }; -} // \ namespace gtsam - +} // namespace gtsam From 69729d603beb76e300a9ea40d9109af456b1a5f5 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 12:35:11 -0400 Subject: [PATCH 123/204] move all implementation to cpp file --- .../NonlinearConjugateGradientOptimizer.cpp | 139 +++++++++++++++++- .../NonlinearConjugateGradientOptimizer.h | 123 +--------------- 2 files changed, 134 insertions(+), 128 deletions(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index 6190b041e..9fce1776b 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -16,11 +16,11 @@ * @date Jun 11, 2012 */ -#include -#include -#include #include #include +#include +#include +#include #include @@ -34,8 +34,8 @@ typedef internal::NonlinearOptimizerState State; * @param values a linearization point * Can be moved to NonlinearFactorGraph.h if desired */ -static VectorValues gradientInPlace(const NonlinearFactorGraph &nfg, - const Values &values) { +static VectorValues gradientInPlace(const NonlinearFactorGraph& nfg, + const Values& values) { // Linearize graph GaussianFactorGraph::shared_ptr linear = nfg.linearize(values); return linear->gradientAtZero(); @@ -48,8 +48,7 @@ NonlinearConjugateGradientOptimizer::NonlinearConjugateGradientOptimizer( new State(initialValues, graph.error(initialValues)))), params_(params) {} -double NonlinearConjugateGradientOptimizer::error( - const Values& state) const { +double NonlinearConjugateGradientOptimizer::error(const Values& state) const { return graph_.error(state); } @@ -83,5 +82,129 @@ const Values& NonlinearConjugateGradientOptimizer::optimize() { return state_->values; } -} /* namespace gtsam */ +double NonlinearConjugateGradientOptimizer::lineSearch( + const Values& currentValues, const VectorValues& gradient) const { + /* normalize it such that it becomes a unit vector */ + const double g = gradient.norm(); + // perform the golden section search algorithm to decide the the optimal + // step size detail refer to + // http://en.wikipedia.org/wiki/Golden_section_search + const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi, + tau = 1e-5; + double minStep = -1.0 / g, maxStep = 0, + newStep = minStep + (maxStep - minStep) / (phi + 1.0); + + Values newValues = advance(currentValues, newStep, gradient); + double newError = error(newValues); + + while (true) { + const bool flag = (maxStep - newStep > newStep - minStep) ? true : false; + const double testStep = flag ? newStep + resphi * (maxStep - newStep) + : newStep - resphi * (newStep - minStep); + + if ((maxStep - minStep) < tau * (std::abs(testStep) + std::abs(newStep))) { + return 0.5 * (minStep + maxStep); + } + + const Values testValues = advance(currentValues, testStep, gradient); + const double testError = error(testValues); + + // update the working range + if (testError >= newError) { + if (flag) + maxStep = testStep; + else + minStep = testStep; + } else { + if (flag) { + minStep = newStep; + newStep = testStep; + newError = testError; + } else { + maxStep = newStep; + newStep = testStep; + newError = testError; + } + } + } + return 0.0; +} + +std::tuple +NonlinearConjugateGradientOptimizer::nonlinearConjugateGradient( + const Values& initial, const NonlinearOptimizerParams& params, + const bool singleIteration, const bool gradientDescent) const { + size_t iteration = 0; + + // check if we're already close enough + double currentError = error(initial); + if (currentError <= params.errorTol) { + if (params.verbosity >= NonlinearOptimizerParams::ERROR) { + std::cout << "Exiting, as error = " << currentError << " < " + << params.errorTol << std::endl; + } + return {initial, iteration}; + } + + Values currentValues = initial; + VectorValues currentGradient = gradient(currentValues), prevGradient, + direction = currentGradient; + + /* do one step of gradient descent */ + Values prevValues = currentValues; + double prevError = currentError; + double alpha = lineSearch(currentValues, direction); + currentValues = advance(prevValues, alpha, direction); + currentError = error(currentValues); + + // Maybe show output + if (params.verbosity >= NonlinearOptimizerParams::ERROR) + std::cout << "Initial error: " << currentError << std::endl; + + // Iterative loop + do { + if (gradientDescent == true) { + direction = gradient(currentValues); + } else { + prevGradient = currentGradient; + currentGradient = gradient(currentValues); + // Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1 + const double beta = + std::max(0.0, currentGradient.dot(currentGradient - prevGradient) / + prevGradient.dot(prevGradient)); + direction = currentGradient + (beta * direction); + } + + alpha = lineSearch(currentValues, direction); + + prevValues = currentValues; + prevError = currentError; + + currentValues = advance(prevValues, alpha, direction); + currentError = error(currentValues); + + // User hook: + if (params.iterationHook) + params.iterationHook(iteration, prevError, currentError); + + // Maybe show output + if (params.verbosity >= NonlinearOptimizerParams::ERROR) + std::cout << "iteration: " << iteration + << ", currentError: " << currentError << std::endl; + } while (++iteration < params.maxIterations && !singleIteration && + !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, + params.errorTol, prevError, currentError, + params.verbosity)); + + // Printing if verbose + if (params.verbosity >= NonlinearOptimizerParams::ERROR && + iteration >= params.maxIterations) + std::cout << "nonlinearConjugateGradient: Terminating because reached " + "maximum iterations" + << std::endl; + + return {currentValues, iteration}; +} + +} /* namespace gtsam */ diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 28de45894..cdc0634d6 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -65,54 +65,8 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer const Values &optimize() override; /** Implement the golden-section line search algorithm */ - double lineSearch(const Values ¤tValues, const VectorValues &gradient) const { - /* normalize it such that it becomes a unit vector */ - const double g = gradient.norm(); - - // perform the golden section search algorithm to decide the the optimal - // step size detail refer to - // http://en.wikipedia.org/wiki/Golden_section_search - const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi, - tau = 1e-5; - double minStep = -1.0 / g, maxStep = 0, - newStep = minStep + (maxStep - minStep) / (phi + 1.0); - - Values newValues = advance(currentValues, newStep, gradient); - double newError = error(newValues); - - while (true) { - const bool flag = (maxStep - newStep > newStep - minStep) ? true : false; - const double testStep = flag ? newStep + resphi * (maxStep - newStep) - : newStep - resphi * (newStep - minStep); - - if ((maxStep - minStep) < - tau * (std::abs(testStep) + std::abs(newStep))) { - return 0.5 * (minStep + maxStep); - } - - const Values testValues = advance(currentValues, testStep, gradient); - const double testError = error(testValues); - - // update the working range - if (testError >= newError) { - if (flag) - maxStep = testStep; - else - minStep = testStep; - } else { - if (flag) { - minStep = newStep; - newStep = testStep; - newError = testError; - } else { - maxStep = newStep; - newStep = testStep; - newError = testError; - } - } - } - return 0.0; - } + double lineSearch(const Values ¤tValues, + const VectorValues &gradient) const; /** * Implement the nonlinear conjugate gradient method using the Polak-Ribiere @@ -124,78 +78,7 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer */ std::tuple nonlinearConjugateGradient( const Values &initial, const NonlinearOptimizerParams ¶ms, - const bool singleIteration, const bool gradientDescent = false) const { - size_t iteration = 0; - - // check if we're already close enough - double currentError = error(initial); - if (currentError <= params.errorTol) { - if (params.verbosity >= NonlinearOptimizerParams::ERROR) { - std::cout << "Exiting, as error = " << currentError << " < " - << params.errorTol << std::endl; - } - return {initial, iteration}; - } - - Values currentValues = initial; - VectorValues currentGradient = gradient(currentValues), prevGradient, - direction = currentGradient; - - /* do one step of gradient descent */ - Values prevValues = currentValues; - double prevError = currentError; - double alpha = lineSearch(currentValues, direction); - currentValues = advance(prevValues, alpha, direction); - currentError = error(currentValues); - - // Maybe show output - if (params.verbosity >= NonlinearOptimizerParams::ERROR) - std::cout << "Initial error: " << currentError << std::endl; - - // Iterative loop - do { - if (gradientDescent == true) { - direction = gradient(currentValues); - } else { - prevGradient = currentGradient; - currentGradient = gradient(currentValues); - // Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1 - const double beta = - std::max(0.0, currentGradient.dot(currentGradient - prevGradient) / - prevGradient.dot(prevGradient)); - direction = currentGradient + (beta * direction); - } - - alpha = lineSearch(currentValues, direction); - - prevValues = currentValues; - prevError = currentError; - - currentValues = advance(prevValues, alpha, direction); - currentError = error(currentValues); - - // User hook: - if (params.iterationHook) - params.iterationHook(iteration, prevError, currentError); - - // Maybe show output - if (params.verbosity >= NonlinearOptimizerParams::ERROR) - std::cout << "iteration: " << iteration - << ", currentError: " << currentError << std::endl; - } while (++iteration < params.maxIterations && !singleIteration && - !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, - params.errorTol, prevError, currentError, - params.verbosity)); - - // Printing if verbose - if (params.verbosity >= NonlinearOptimizerParams::ERROR && - iteration >= params.maxIterations) - std::cout << "nonlinearConjugateGradient: Terminating because reached " - "maximum iterations" - << std::endl; - - return {currentValues, iteration}; - } + const bool singleIteration, const bool gradientDescent = false) const; }; } // namespace gtsam From 8fd26a341b54f1a6728e6d5afdc5221118bae615 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 15:14:33 -0400 Subject: [PATCH 124/204] add additional direction methods beyond Polak-Ribiere --- .../NonlinearConjugateGradientOptimizer.cpp | 62 +++++++++++++++++-- .../NonlinearConjugateGradientOptimizer.h | 15 ++++- 2 files changed, 69 insertions(+), 8 deletions(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index 9fce1776b..14d2a705b 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -28,6 +28,46 @@ namespace gtsam { typedef internal::NonlinearOptimizerState State; +/// Fletcher-Reeves formula for computing β, the direction of steepest descent. +static double FletcherReeves(const VectorValues& currentGradient, + const VectorValues& prevGradient) { + // Fletcher-Reeves: beta = g_n'*g_n/g_n-1'*g_n-1 + const double beta = std::max(0.0, currentGradient.dot(currentGradient) / + prevGradient.dot(prevGradient)); + return beta; +} + +/// Polak-Ribiere formula for computing β, the direction of steepest descent. +static double PolakRibiere(const VectorValues& currentGradient, + const VectorValues& prevGradient) { + // Polak-Ribiere: beta = g_n'*(g_n-g_n-1)/g_n-1'*g_n-1 + const double beta = + std::max(0.0, currentGradient.dot(currentGradient - prevGradient) / + prevGradient.dot(prevGradient)); + return beta; +} + +/// The Hestenes-Stiefel formula for computing β, the direction of steepest descent. +static double HestenesStiefel(const VectorValues& currentGradient, + const VectorValues& prevGradient, + const VectorValues& direction) { + // Hestenes-Stiefel: beta = g_n'*(g_n-g_n-1)/(-s_n-1')*(g_n-g_n-1) + VectorValues d = currentGradient - prevGradient; + const double beta = std::max(0.0, currentGradient.dot(d) / -direction.dot(d)); + return beta; +} + +/// The Dai-Yuan formula for computing β, the direction of steepest descent. +static double DaiYuan(const VectorValues& currentGradient, + const VectorValues& prevGradient, + const VectorValues& direction) { + // Dai-Yuan: beta = g_n'*g_n/(-s_n-1')*(g_n-g_n-1) + const double beta = + std::max(0.0, currentGradient.dot(currentGradient) / + -direction.dot(currentGradient - prevGradient)); + return beta; +} + /** * @brief Return the gradient vector of a nonlinear factor graph * @param nfg the graph @@ -43,7 +83,7 @@ static VectorValues gradientInPlace(const NonlinearFactorGraph& nfg, NonlinearConjugateGradientOptimizer::NonlinearConjugateGradientOptimizer( const NonlinearFactorGraph& graph, const Values& initialValues, - const Parameters& params) + const Parameters& params, const DirectionMethod& directionMethod) : Base(graph, std::unique_ptr( new State(initialValues, graph.error(initialValues)))), params_(params) {} @@ -169,10 +209,22 @@ NonlinearConjugateGradientOptimizer::nonlinearConjugateGradient( } else { prevGradient = currentGradient; currentGradient = gradient(currentValues); - // Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1 - const double beta = - std::max(0.0, currentGradient.dot(currentGradient - prevGradient) / - prevGradient.dot(prevGradient)); + double beta; + + switch (directionMethod_) { + case DirectionMethod::FletcherReeves: + beta = FletcherReeves(currentGradient, prevGradient); + break; + case DirectionMethod::PolakRibiere: + beta = PolakRibiere(currentGradient, prevGradient); + break; + case DirectionMethod::HestenesStiefel: + beta = HestenesStiefel(currentGradient, prevGradient, direction); + break; + case DirectionMethod::DaiYuan: + beta = DaiYuan(currentGradient, prevGradient, direction); + break; + } direction = currentGradient + (beta * direction); } diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index cdc0634d6..f9cd22361 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -31,16 +31,25 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer typedef NonlinearOptimizerParams Parameters; typedef std::shared_ptr shared_ptr; + enum class DirectionMethod { + FletcherReeves, + PolakRibiere, + HestenesStiefel, + DaiYuan + }; + protected: Parameters params_; + DirectionMethod directionMethod_; const NonlinearOptimizerParams &_params() const override { return params_; } public: /// Constructor - NonlinearConjugateGradientOptimizer(const NonlinearFactorGraph &graph, - const Values &initialValues, - const Parameters ¶ms = Parameters()); + NonlinearConjugateGradientOptimizer( + const NonlinearFactorGraph &graph, const Values &initialValues, + const Parameters ¶ms = Parameters(), + const DirectionMethod &directionMethod = DirectionMethod::PolakRibiere); /// Destructor ~NonlinearConjugateGradientOptimizer() override {} From bc27d9fc96b0ba4e663331a17f1445b87ca0159b Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 17:07:56 -0400 Subject: [PATCH 125/204] set default direction method --- gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index f9cd22361..63ec4f6ea 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -40,7 +40,7 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer protected: Parameters params_; - DirectionMethod directionMethod_; + DirectionMethod directionMethod_ = DirectionMethod::PolakRibiere; const NonlinearOptimizerParams &_params() const override { return params_; } From df1c008955cd90f7fe2ec79e5e71d4fc78072e6d Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 23:31:29 -0400 Subject: [PATCH 126/204] make ConjugateGradientParameters a public struct --- gtsam/linear/ConjugateGradientSolver.cpp | 14 ++--- gtsam/linear/ConjugateGradientSolver.h | 76 ++++++++++++++---------- gtsam/linear/iterative-inl.h | 17 +++--- 3 files changed, 61 insertions(+), 46 deletions(-) diff --git a/gtsam/linear/ConjugateGradientSolver.cpp b/gtsam/linear/ConjugateGradientSolver.cpp index 006e7d67d..b2725ea1e 100644 --- a/gtsam/linear/ConjugateGradientSolver.cpp +++ b/gtsam/linear/ConjugateGradientSolver.cpp @@ -26,13 +26,13 @@ namespace gtsam { /*****************************************************************************/ void ConjugateGradientParameters::print(ostream &os) const { - Base::print(os); - cout << "ConjugateGradientParameters" << endl - << "minIter: " << minIterations_ << endl - << "maxIter: " << maxIterations_ << endl - << "resetIter: " << reset_ << endl - << "eps_rel: " << epsilon_rel_ << endl - << "eps_abs: " << epsilon_abs_ << endl; + Base::print(os); + cout << "ConjugateGradientParameters" << endl + << "minIter: " << minIterations << endl + << "maxIter: " << maxIterations << endl + << "resetIter: " << reset << endl + << "eps_rel: " << epsilon_rel << endl + << "eps_abs: " << epsilon_abs << endl; } /*****************************************************************************/ diff --git a/gtsam/linear/ConjugateGradientSolver.h b/gtsam/linear/ConjugateGradientSolver.h index ccdab45da..ab804e3f4 100644 --- a/gtsam/linear/ConjugateGradientSolver.h +++ b/gtsam/linear/ConjugateGradientSolver.h @@ -26,38 +26,50 @@ namespace gtsam { /** * Parameters for the Conjugate Gradient method */ -class GTSAM_EXPORT ConjugateGradientParameters : public IterativeOptimizationParameters { - - public: +struct GTSAM_EXPORT ConjugateGradientParameters + : public IterativeOptimizationParameters { typedef IterativeOptimizationParameters Base; typedef std::shared_ptr shared_ptr; - protected: - size_t minIterations_; ///< minimum number of cg iterations - size_t maxIterations_; ///< maximum number of cg iterations - size_t reset_; ///< number of iterations before reset - double epsilon_rel_; ///< threshold for relative error decrease - double epsilon_abs_; ///< threshold for absolute error decrease + size_t minIterations; ///< minimum number of cg iterations + size_t maxIterations; ///< maximum number of cg iterations + size_t reset; ///< number of iterations before reset + double epsilon_rel; ///< threshold for relative error decrease + double epsilon_abs; ///< threshold for absolute error decrease - public: /* Matrix Operation Kernel */ enum BLASKernel { - GTSAM = 0, ///< Jacobian Factor Graph of GTSAM - } blas_kernel_ ; + GTSAM = 0, ///< Jacobian Factor Graph of GTSAM + } blas_kernel; ConjugateGradientParameters() - : minIterations_(1), maxIterations_(500), reset_(501), epsilon_rel_(1e-3), - epsilon_abs_(1e-3), blas_kernel_(GTSAM) {} + : minIterations(1), + maxIterations(500), + reset(501), + epsilon_rel(1e-3), + epsilon_abs(1e-3), + blas_kernel(GTSAM) {} - ConjugateGradientParameters(size_t minIterations, size_t maxIterations, size_t reset, - double epsilon_rel, double epsilon_abs, BLASKernel blas) - : minIterations_(minIterations), maxIterations_(maxIterations), reset_(reset), - epsilon_rel_(epsilon_rel), epsilon_abs_(epsilon_abs), blas_kernel_(blas) {} + ConjugateGradientParameters(size_t minIterations, size_t maxIterations, + size_t reset, double epsilon_rel, + double epsilon_abs, BLASKernel blas) + : minIterations(minIterations), + maxIterations(maxIterations), + reset(reset), + epsilon_rel(epsilon_rel), + epsilon_abs(epsilon_abs), + blas_kernel(blas) {} ConjugateGradientParameters(const ConjugateGradientParameters &p) - : Base(p), minIterations_(p.minIterations_), maxIterations_(p.maxIterations_), reset_(p.reset_), - epsilon_rel_(p.epsilon_rel_), epsilon_abs_(p.epsilon_abs_), blas_kernel_(GTSAM) {} + : Base(p), + minIterations(p.minIterations), + maxIterations(p.maxIterations), + reset(p.reset), + epsilon_rel(p.epsilon_rel), + epsilon_abs(p.epsilon_abs), + blas_kernel(GTSAM) {} +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V43 /* general interface */ inline size_t minIterations() const { return minIterations_; } inline size_t maxIterations() const { return maxIterations_; } @@ -79,6 +91,7 @@ class GTSAM_EXPORT ConjugateGradientParameters : public IterativeOptimizationPar inline void setEpsilon(double value) { epsilon_rel_ = value; } inline void setEpsilon_rel(double value) { epsilon_rel_ = value; } inline void setEpsilon_abs(double value) { epsilon_abs_ = value; } +#endif void print() const { Base::print(); } @@ -111,18 +124,19 @@ V preconditionedConjugateGradient(const S &system, const V &initial, double currentGamma = system.dot(residual, residual), prevGamma, alpha, beta; - const size_t iMaxIterations = parameters.maxIterations(), - iMinIterations = parameters.minIterations(), - iReset = parameters.reset() ; - const double threshold = std::max(parameters.epsilon_abs(), - parameters.epsilon() * parameters.epsilon() * currentGamma); + const size_t iMaxIterations = parameters.maxIterations, + iMinIterations = parameters.minIterations, + iReset = parameters.reset; + const double threshold = + std::max(parameters.epsilon_abs, + parameters.epsilon_rel * parameters.epsilon_rel * currentGamma); - if (parameters.verbosity() >= ConjugateGradientParameters::COMPLEXITY ) - std::cout << "[PCG] epsilon = " << parameters.epsilon() - << ", max = " << parameters.maxIterations() - << ", reset = " << parameters.reset() - << ", ||r0||^2 = " << currentGamma - << ", threshold = " << threshold << std::endl; + if (parameters.verbosity() >= ConjugateGradientParameters::COMPLEXITY) + std::cout << "[PCG] epsilon = " << parameters.epsilon_rel + << ", max = " << parameters.maxIterations + << ", reset = " << parameters.reset + << ", ||r0||^2 = " << currentGamma + << ", threshold = " << threshold << std::endl; size_t k; for ( k = 1 ; k <= iMaxIterations && (currentGamma > threshold || k <= iMinIterations) ; k++ ) { diff --git a/gtsam/linear/iterative-inl.h b/gtsam/linear/iterative-inl.h index c96d643b2..a8185b544 100644 --- a/gtsam/linear/iterative-inl.h +++ b/gtsam/linear/iterative-inl.h @@ -49,10 +49,12 @@ namespace gtsam { // init gamma and calculate threshold gamma = dot(g,g); - threshold = std::max(parameters_.epsilon_abs(), parameters_.epsilon() * parameters_.epsilon() * gamma); + threshold = + std::max(parameters_.epsilon_abs, + parameters_.epsilon_rel * parameters_.epsilon_rel * gamma); // Allocate and calculate A*d for first iteration - if (gamma > parameters_.epsilon_abs()) Ad = Ab * d; + if (gamma > parameters_.epsilon_abs) Ad = Ab * d; } /* ************************************************************************* */ @@ -79,13 +81,13 @@ namespace gtsam { // take a step, return true if converged bool step(const S& Ab, V& x) { - if ((++k) >= ((int)parameters_.maxIterations())) return true; + if ((++k) >= ((int)parameters_.maxIterations)) return true; //----------------------------------> double alpha = takeOptimalStep(x); // update gradient (or re-calculate at reset time) - if (k % parameters_.reset() == 0) g = Ab.gradient(x); + if (k % parameters_.reset == 0) g = Ab.gradient(x); // axpy(alpha, Ab ^ Ad, g); // g += alpha*(Ab^Ad) else Ab.transposeMultiplyAdd(alpha, Ad, g); @@ -126,11 +128,10 @@ namespace gtsam { CGState state(Ab, x, parameters, steepest); if (parameters.verbosity() != ConjugateGradientParameters::SILENT) - std::cout << "CG: epsilon = " << parameters.epsilon() - << ", maxIterations = " << parameters.maxIterations() + std::cout << "CG: epsilon = " << parameters.epsilon_rel + << ", maxIterations = " << parameters.maxIterations << ", ||g0||^2 = " << state.gamma - << ", threshold = " << state.threshold - << std::endl; + << ", threshold = " << state.threshold << std::endl; if ( state.gamma < state.threshold ) { if (parameters.verbosity() != ConjugateGradientParameters::SILENT) From 49a3b2e8a22e9bbe6b848bd0e885647c5d0e110a Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 23:43:41 -0400 Subject: [PATCH 127/204] make PCGSolverParameters a public struct --- gtsam/linear/PCGSolver.cpp | 8 ++------ gtsam/linear/PCGSolver.h | 17 +++-------------- gtsam/linear/linear.i | 20 ++++++++------------ 3 files changed, 13 insertions(+), 32 deletions(-) diff --git a/gtsam/linear/PCGSolver.cpp b/gtsam/linear/PCGSolver.cpp index 320c3e535..c0510961f 100644 --- a/gtsam/linear/PCGSolver.cpp +++ b/gtsam/linear/PCGSolver.cpp @@ -34,17 +34,13 @@ namespace gtsam { void PCGSolverParameters::print(ostream &os) const { Base::print(os); os << "PCGSolverParameters:" << endl; - preconditioner_->print(os); + preconditioner->print(os); } /*****************************************************************************/ PCGSolver::PCGSolver(const PCGSolverParameters &p) { parameters_ = p; - preconditioner_ = createPreconditioner(p.preconditioner()); -} - -void PCGSolverParameters::setPreconditionerParams(const std::shared_ptr preconditioner) { - preconditioner_ = preconditioner; + preconditioner_ = createPreconditioner(p.preconditioner); } void PCGSolverParameters::print(const std::string &s) const { diff --git a/gtsam/linear/PCGSolver.h b/gtsam/linear/PCGSolver.h index cb671f288..17cc2d3db 100644 --- a/gtsam/linear/PCGSolver.h +++ b/gtsam/linear/PCGSolver.h @@ -33,30 +33,19 @@ struct PreconditionerParameters; /** * Parameters for Preconditioned Conjugate Gradient solver. */ -struct GTSAM_EXPORT PCGSolverParameters: public ConjugateGradientParameters { - public: +struct GTSAM_EXPORT PCGSolverParameters : public ConjugateGradientParameters { typedef ConjugateGradientParameters Base; typedef std::shared_ptr shared_ptr; -protected: - std::shared_ptr preconditioner_; + std::shared_ptr preconditioner; -public: PCGSolverParameters() {} PCGSolverParameters( const std::shared_ptr &preconditioner) - : preconditioner_(preconditioner) {} + : preconditioner(preconditioner) {} void print(std::ostream &os) const override; - - const std::shared_ptr preconditioner() const { - return preconditioner_; - } - - void setPreconditionerParams( - const std::shared_ptr preconditioner); - void print(const std::string &s) const; }; diff --git a/gtsam/linear/linear.i b/gtsam/linear/linear.i index af6c2ee22..eecc0192c 100644 --- a/gtsam/linear/linear.i +++ b/gtsam/linear/linear.i @@ -710,17 +710,11 @@ virtual class IterativeOptimizationParameters { #include virtual class ConjugateGradientParameters : gtsam::IterativeOptimizationParameters { ConjugateGradientParameters(); - int getMinIterations() const ; - int getMaxIterations() const ; - int getReset() const; - double getEpsilon_rel() const; - double getEpsilon_abs() const; - - void setMinIterations(int value); - void setMaxIterations(int value); - void setReset(int value); - void setEpsilon_rel(double value); - void setEpsilon_abs(double value); + int minIterations; + int maxIterations; + int reset; + double epsilon_rel; + double epsilon_abs; }; #include @@ -739,8 +733,10 @@ virtual class BlockJacobiPreconditionerParameters : gtsam::PreconditionerParamet #include virtual class PCGSolverParameters : gtsam::ConjugateGradientParameters { PCGSolverParameters(); + PCGSolverParameters(gtsam::PreconditionerParameters* preconditioner); void print(string s = ""); - void setPreconditionerParams(gtsam::PreconditionerParameters* preconditioner); + + gtsam::PreconditionerParameters* preconditioner; }; #include From 94a95b69cba31dc6d8317cca370da365a44d55aa Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 23:43:52 -0400 Subject: [PATCH 128/204] update tests --- python/gtsam/tests/test_NonlinearOptimizer.py | 2 +- tests/testIterative.cpp | 12 ++++---- tests/testPreconditioner.cpp | 28 +++++++++---------- timing/timeShonanFactor.cpp | 2 +- 4 files changed, 22 insertions(+), 22 deletions(-) diff --git a/python/gtsam/tests/test_NonlinearOptimizer.py b/python/gtsam/tests/test_NonlinearOptimizer.py index 1b4209509..2b276425f 100644 --- a/python/gtsam/tests/test_NonlinearOptimizer.py +++ b/python/gtsam/tests/test_NonlinearOptimizer.py @@ -69,7 +69,7 @@ class TestScenario(GtsamTestCase): lmParams = LevenbergMarquardtParams.CeresDefaults() lmParams.setLinearSolverType("ITERATIVE") cgParams = PCGSolverParameters() - cgParams.setPreconditionerParams(DummyPreconditionerParameters()) + cgParams.preconditioner = DummyPreconditionerParameters() lmParams.setIterativeParams(cgParams) actual = LevenbergMarquardtOptimizer(self.fg, self.initial_values, lmParams).optimize() self.assertAlmostEqual(0, self.fg.error(actual)) diff --git a/tests/testIterative.cpp b/tests/testIterative.cpp index 6d9918b76..4df598f80 100644 --- a/tests/testIterative.cpp +++ b/tests/testIterative.cpp @@ -95,9 +95,9 @@ TEST( Iterative, conjugateGradientDescent_hard_constraint ) VectorValues zeros = config.zeroVectors(); ConjugateGradientParameters parameters; - parameters.setEpsilon_abs(1e-3); - parameters.setEpsilon_rel(1e-5); - parameters.setMaxIterations(100); + parameters.epsilon_abs = 1e-3; + parameters.epsilon_rel = 1e-5; + parameters.maxIterations = 100; VectorValues actual = conjugateGradientDescent(*fg, zeros, parameters); VectorValues expected; @@ -122,9 +122,9 @@ TEST( Iterative, conjugateGradientDescent_soft_constraint ) VectorValues zeros = config.zeroVectors(); ConjugateGradientParameters parameters; - parameters.setEpsilon_abs(1e-3); - parameters.setEpsilon_rel(1e-5); - parameters.setMaxIterations(100); + parameters.epsilon_abs = 1e-3; + parameters.epsilon_rel = 1e-5; + parameters.maxIterations = 100; VectorValues actual = conjugateGradientDescent(*fg, zeros, parameters); VectorValues expected; diff --git a/tests/testPreconditioner.cpp b/tests/testPreconditioner.cpp index 9eff3e7f0..2cea40c3d 100644 --- a/tests/testPreconditioner.cpp +++ b/tests/testPreconditioner.cpp @@ -54,21 +54,21 @@ TEST( PCGsolver, verySimpleLinearSystem) { // Solve the system using Preconditioned Conjugate Gradient solver // Common PCG parameters gtsam::PCGSolverParameters::shared_ptr pcg = std::make_shared(); - pcg->setMaxIterations(500); - pcg->setEpsilon_abs(0.0); - pcg->setEpsilon_rel(0.0); + pcg->maxIterations = 500; + pcg->epsilon_abs = 0.0; + pcg->epsilon_rel = 0.0; //pcg->setVerbosity("ERROR"); // With Dummy preconditioner - pcg->setPreconditionerParams( - std::make_shared()); + pcg->preconditioner = + std::make_shared(); VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(exactSolution, deltaPCGDummy, 1e-7)); //deltaPCGDummy.print("PCG Dummy"); // With Block-Jacobi preconditioner - pcg->setPreconditionerParams( - std::make_shared()); + pcg->preconditioner = + std::make_shared(); // It takes more than 1000 iterations for this test pcg->setMaxIterations(1500); VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG); @@ -107,21 +107,21 @@ TEST(PCGSolver, simpleLinearSystem) { // Solve the system using Preconditioned Conjugate Gradient solver // Common PCG parameters gtsam::PCGSolverParameters::shared_ptr pcg = std::make_shared(); - pcg->setMaxIterations(500); - pcg->setEpsilon_abs(0.0); - pcg->setEpsilon_rel(0.0); + pcg->maxIterations = 500; + pcg->epsilon_abs = 0.0; + pcg->epsilon_rel = 0.0; //pcg->setVerbosity("ERROR"); // With Dummy preconditioner - pcg->setPreconditionerParams( - std::make_shared()); + pcg->preconditioner = + std::make_shared(); VectorValues deltaPCGDummy = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(expectedSolution, deltaPCGDummy, 1e-5)); //deltaPCGDummy.print("PCG Dummy"); // With Block-Jacobi preconditioner - pcg->setPreconditionerParams( - std::make_shared()); + pcg->preconditioner = + std::make_shared(); VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(expectedSolution, deltaPCGJacobi, 1e-5)); //deltaPCGJacobi.print("PCG Jacobi"); diff --git a/timing/timeShonanFactor.cpp b/timing/timeShonanFactor.cpp index fc97d01a8..3cdb596cd 100644 --- a/timing/timeShonanFactor.cpp +++ b/timing/timeShonanFactor.cpp @@ -83,7 +83,7 @@ int main(int argc, char* argv[]) { // params.setVerbosityLM("SUMMARY"); // params.linearSolverType = LevenbergMarquardtParams::Iterative; // auto pcg = std::make_shared(); - // pcg->preconditioner_ = + // pcg->preconditioner = // std::make_shared(); // std::make_shared(); // params.iterativeParams = pcg; From d2ca1ef285230a7263e4966a94aedf700bf9e9ac Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 23:43:56 -0400 Subject: [PATCH 129/204] updat example --- examples/SFMExample_SmartFactorPCG.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/examples/SFMExample_SmartFactorPCG.cpp b/examples/SFMExample_SmartFactorPCG.cpp index a913d32ad..b64fcd048 100644 --- a/examples/SFMExample_SmartFactorPCG.cpp +++ b/examples/SFMExample_SmartFactorPCG.cpp @@ -94,11 +94,10 @@ int main(int argc, char* argv[]) { parameters.maxIterations = 500; PCGSolverParameters::shared_ptr pcg = std::make_shared(); - pcg->preconditioner_ = - std::make_shared(); + pcg->preconditioner = std::make_shared(); // Following is crucial: - pcg->setEpsilon_abs(1e-10); - pcg->setEpsilon_rel(1e-10); + pcg->epsilon_abs = 1e-10; + pcg->epsilon_rel = 1e-10; parameters.iterativeParams = pcg; LevenbergMarquardtOptimizer optimizer(graph, initialEstimate, parameters); From 5c0171b69cfb9d3884328d1d6f17368d6c2acc8c Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 23:56:43 -0400 Subject: [PATCH 130/204] re-add typedefs --- .../NonlinearConjugateGradientOptimizer.cpp | 18 +++++++++++------- .../NonlinearConjugateGradientOptimizer.h | 14 ++++++++------ 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index 403c72908..211acc78d 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -49,19 +49,23 @@ NonlinearConjugateGradientOptimizer::NonlinearConjugateGradientOptimizer( params_(params) {} double NonlinearConjugateGradientOptimizer::System::error( - const Values& state) const { + const State& state) const { return graph_.error(state); } -VectorValues NonlinearConjugateGradientOptimizer::System::gradient( - const Values& state) const { +NonlinearConjugateGradientOptimizer::System::Gradient +NonlinearConjugateGradientOptimizer::System::gradient( + const State& state) const { return gradientInPlace(graph_, state); } -Values NonlinearConjugateGradientOptimizer::System::advance( - const Values& current, const double alpha, - const VectorValues& gradient) const { - return current.retract(alpha * gradient); +NonlinearConjugateGradientOptimizer::System::State +NonlinearConjugateGradientOptimizer::System::advance(const State& current, + const double alpha, + const Gradient& g) const { + Gradient step = g; + step *= alpha; + return current.retract(step); } GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 26f596b06..f662403dc 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -29,6 +29,8 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimiz /* a class for the nonlinearConjugateGradient template */ class System { public: + typedef Values State; + typedef VectorValues Gradient; typedef NonlinearOptimizerParams Parameters; protected: @@ -38,10 +40,10 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimiz System(const NonlinearFactorGraph &graph) : graph_(graph) { } - double error(const Values &state) const; - VectorValues gradient(const Values &state) const; - Values advance(const Values ¤t, const double alpha, - const VectorValues &g) const; + double error(const State &state) const; + Gradient gradient(const State &state) const; + State advance(const State ¤t, const double alpha, + const Gradient &g) const; }; public: @@ -162,8 +164,8 @@ std::tuple nonlinearConjugateGradient(const S &system, } V currentValues = initial; - VectorValues currentGradient = system.gradient(currentValues), prevGradient, - direction = currentGradient; + typename S::Gradient currentGradient = system.gradient(currentValues), + prevGradient, direction = currentGradient; /* do one step of gradient descent */ V prevValues = currentValues; From 9931e29108d6b50d2382696c55b85216759f8c3c Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Tue, 15 Oct 2024 23:56:52 -0400 Subject: [PATCH 131/204] fix tests --- tests/testPreconditioner.cpp | 2 +- tests/testSubgraphSolver.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/testPreconditioner.cpp b/tests/testPreconditioner.cpp index 2cea40c3d..967584b7d 100644 --- a/tests/testPreconditioner.cpp +++ b/tests/testPreconditioner.cpp @@ -70,7 +70,7 @@ TEST( PCGsolver, verySimpleLinearSystem) { pcg->preconditioner = std::make_shared(); // It takes more than 1000 iterations for this test - pcg->setMaxIterations(1500); + pcg->maxIterations = 1500; VectorValues deltaPCGJacobi = PCGSolver(*pcg).optimize(simpleGFG); EXPECT(assert_equal(exactSolution, deltaPCGJacobi, 1e-5)); diff --git a/tests/testSubgraphSolver.cpp b/tests/testSubgraphSolver.cpp index 026f3c919..e31ce23b5 100644 --- a/tests/testSubgraphSolver.cpp +++ b/tests/testSubgraphSolver.cpp @@ -48,7 +48,7 @@ static double error(const GaussianFactorGraph& fg, const VectorValues& x) { TEST( SubgraphSolver, Parameters ) { LONGS_EQUAL(SubgraphSolverParameters::SILENT, kParameters.verbosity()); - LONGS_EQUAL(500, kParameters.maxIterations()); + LONGS_EQUAL(500, kParameters.maxIterations); } /* ************************************************************************* */ From 38b5dd5eb04aee5a4a90108b1a482b24ae073754 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 16 Oct 2024 09:17:22 -0400 Subject: [PATCH 132/204] update deprecated methods --- gtsam/linear/ConjugateGradientSolver.h | 36 +++++++++++++------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/gtsam/linear/ConjugateGradientSolver.h b/gtsam/linear/ConjugateGradientSolver.h index ab804e3f4..9cdb41382 100644 --- a/gtsam/linear/ConjugateGradientSolver.h +++ b/gtsam/linear/ConjugateGradientSolver.h @@ -71,26 +71,26 @@ struct GTSAM_EXPORT ConjugateGradientParameters #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V43 /* general interface */ - inline size_t minIterations() const { return minIterations_; } - inline size_t maxIterations() const { return maxIterations_; } - inline size_t reset() const { return reset_; } - inline double epsilon() const { return epsilon_rel_; } - inline double epsilon_rel() const { return epsilon_rel_; } - inline double epsilon_abs() const { return epsilon_abs_; } + inline size_t minIterations() const { return minIterations; } + inline size_t maxIterations() const { return maxIterations; } + inline size_t reset() const { return reset; } + inline double epsilon() const { return epsilon_rel; } + inline double epsilon_rel() const { return epsilon_rel; } + inline double epsilon_abs() const { return epsilon_abs; } - inline size_t getMinIterations() const { return minIterations_; } - inline size_t getMaxIterations() const { return maxIterations_; } - inline size_t getReset() const { return reset_; } - inline double getEpsilon() const { return epsilon_rel_; } - inline double getEpsilon_rel() const { return epsilon_rel_; } - inline double getEpsilon_abs() const { return epsilon_abs_; } + inline size_t getMinIterations() const { return minIterations; } + inline size_t getMaxIterations() const { return maxIterations; } + inline size_t getReset() const { return reset; } + inline double getEpsilon() const { return epsilon_rel; } + inline double getEpsilon_rel() const { return epsilon_rel; } + inline double getEpsilon_abs() const { return epsilon_abs; } - inline void setMinIterations(size_t value) { minIterations_ = value; } - inline void setMaxIterations(size_t value) { maxIterations_ = value; } - inline void setReset(size_t value) { reset_ = value; } - inline void setEpsilon(double value) { epsilon_rel_ = value; } - inline void setEpsilon_rel(double value) { epsilon_rel_ = value; } - inline void setEpsilon_abs(double value) { epsilon_abs_ = value; } + inline void setMinIterations(size_t value) { minIterations = value; } + inline void setMaxIterations(size_t value) { maxIterations = value; } + inline void setReset(size_t value) { reset = value; } + inline void setEpsilon(double value) { epsilon_rel = value; } + inline void setEpsilon_rel(double value) { epsilon_rel = value; } + inline void setEpsilon_abs(double value) { epsilon_abs = value; } #endif From 7dd6d977fe5695aeb3e0dc7aa72b31b12013ba78 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 16 Oct 2024 09:44:47 -0400 Subject: [PATCH 133/204] undo template removal --- .../NonlinearConjugateGradientOptimizer.cpp | 139 +-------------- .../NonlinearConjugateGradientOptimizer.h | 162 +++++++++++++++++- 2 files changed, 163 insertions(+), 138 deletions(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index 9fce1776b..dc44deeb7 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -48,24 +48,25 @@ NonlinearConjugateGradientOptimizer::NonlinearConjugateGradientOptimizer( new State(initialValues, graph.error(initialValues)))), params_(params) {} -double NonlinearConjugateGradientOptimizer::error(const Values& state) const { +double NonlinearConjugateGradientOptimizer::System::error( + const Values& state) const { return graph_.error(state); } -VectorValues NonlinearConjugateGradientOptimizer::gradient( +VectorValues NonlinearConjugateGradientOptimizer::System::gradient( const Values& state) const { return gradientInPlace(graph_, state); } -Values NonlinearConjugateGradientOptimizer::advance( +Values NonlinearConjugateGradientOptimizer::System::advance( const Values& current, const double alpha, const VectorValues& gradient) const { return current.retract(alpha * gradient); } GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { - const auto [newValues, dummy] = nonlinearConjugateGradient( - state_->values, params_, true /* single iteration */); + const auto [newValues, dummy] = nonlinearConjugateGradient( + System(graph_), state_->values, params_, true /* single iteration */); state_.reset( new State(newValues, graph_.error(newValues), state_->iterations + 1)); @@ -75,136 +76,12 @@ GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { const Values& NonlinearConjugateGradientOptimizer::optimize() { // Optimize until convergence + System system(graph_); const auto [newValues, iterations] = - nonlinearConjugateGradient(state_->values, params_, false); + nonlinearConjugateGradient(system, state_->values, params_, false); state_.reset( new State(std::move(newValues), graph_.error(newValues), iterations)); return state_->values; } -double NonlinearConjugateGradientOptimizer::lineSearch( - const Values& currentValues, const VectorValues& gradient) const { - /* normalize it such that it becomes a unit vector */ - const double g = gradient.norm(); - - // perform the golden section search algorithm to decide the the optimal - // step size detail refer to - // http://en.wikipedia.org/wiki/Golden_section_search - const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi, - tau = 1e-5; - double minStep = -1.0 / g, maxStep = 0, - newStep = minStep + (maxStep - minStep) / (phi + 1.0); - - Values newValues = advance(currentValues, newStep, gradient); - double newError = error(newValues); - - while (true) { - const bool flag = (maxStep - newStep > newStep - minStep) ? true : false; - const double testStep = flag ? newStep + resphi * (maxStep - newStep) - : newStep - resphi * (newStep - minStep); - - if ((maxStep - minStep) < tau * (std::abs(testStep) + std::abs(newStep))) { - return 0.5 * (minStep + maxStep); - } - - const Values testValues = advance(currentValues, testStep, gradient); - const double testError = error(testValues); - - // update the working range - if (testError >= newError) { - if (flag) - maxStep = testStep; - else - minStep = testStep; - } else { - if (flag) { - minStep = newStep; - newStep = testStep; - newError = testError; - } else { - maxStep = newStep; - newStep = testStep; - newError = testError; - } - } - } - return 0.0; -} - -std::tuple -NonlinearConjugateGradientOptimizer::nonlinearConjugateGradient( - const Values& initial, const NonlinearOptimizerParams& params, - const bool singleIteration, const bool gradientDescent) const { - size_t iteration = 0; - - // check if we're already close enough - double currentError = error(initial); - if (currentError <= params.errorTol) { - if (params.verbosity >= NonlinearOptimizerParams::ERROR) { - std::cout << "Exiting, as error = " << currentError << " < " - << params.errorTol << std::endl; - } - return {initial, iteration}; - } - - Values currentValues = initial; - VectorValues currentGradient = gradient(currentValues), prevGradient, - direction = currentGradient; - - /* do one step of gradient descent */ - Values prevValues = currentValues; - double prevError = currentError; - double alpha = lineSearch(currentValues, direction); - currentValues = advance(prevValues, alpha, direction); - currentError = error(currentValues); - - // Maybe show output - if (params.verbosity >= NonlinearOptimizerParams::ERROR) - std::cout << "Initial error: " << currentError << std::endl; - - // Iterative loop - do { - if (gradientDescent == true) { - direction = gradient(currentValues); - } else { - prevGradient = currentGradient; - currentGradient = gradient(currentValues); - // Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1 - const double beta = - std::max(0.0, currentGradient.dot(currentGradient - prevGradient) / - prevGradient.dot(prevGradient)); - direction = currentGradient + (beta * direction); - } - - alpha = lineSearch(currentValues, direction); - - prevValues = currentValues; - prevError = currentError; - - currentValues = advance(prevValues, alpha, direction); - currentError = error(currentValues); - - // User hook: - if (params.iterationHook) - params.iterationHook(iteration, prevError, currentError); - - // Maybe show output - if (params.verbosity >= NonlinearOptimizerParams::ERROR) - std::cout << "iteration: " << iteration - << ", currentError: " << currentError << std::endl; - } while (++iteration < params.maxIterations && !singleIteration && - !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, - params.errorTol, prevError, currentError, - params.verbosity)); - - // Printing if verbose - if (params.verbosity >= NonlinearOptimizerParams::ERROR && - iteration >= params.maxIterations) - std::cout << "nonlinearConjugateGradient: Terminating because reached " - "maximum iterations" - << std::endl; - - return {currentValues, iteration}; -} - } /* namespace gtsam */ diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index cdc0634d6..2c4628c29 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -26,6 +26,22 @@ namespace gtsam { /** An implementation of the nonlinear CG method using the template below */ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimizer { + /* a class for the nonlinearConjugateGradient template */ + class System { + public: + typedef NonlinearOptimizerParams Parameters; + + protected: + const NonlinearFactorGraph &graph_; + + public: + System(const NonlinearFactorGraph &graph) : graph_(graph) {} + double error(const Values &state) const; + VectorValues gradient(const Values &state) const; + Values advance(const Values ¤t, const double alpha, + const VectorValues &g) const; + }; + public: typedef NonlinearOptimizer Base; typedef NonlinearOptimizerParams Parameters; @@ -45,13 +61,6 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer /// Destructor ~NonlinearConjugateGradientOptimizer() override {} - double error(const Values &state) const; - - VectorValues gradient(const Values &state) const; - - Values advance(const Values ¤t, const double alpha, - const VectorValues &g) const; - /** * Perform a single iteration, returning GaussianFactorGraph corresponding to * the linearized factor graph. @@ -81,4 +90,143 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer const bool singleIteration, const bool gradientDescent = false) const; }; +/** Implement the golden-section line search algorithm */ +template +double lineSearch(const S &system, const V currentValues, const W &gradient) { + /* normalize it such that it becomes a unit vector */ + const double g = gradient.norm(); + + // perform the golden section search algorithm to decide the the optimal step + // size detail refer to http://en.wikipedia.org/wiki/Golden_section_search + const double phi = 0.5 * (1.0 + std::sqrt(5.0)), resphi = 2.0 - phi, + tau = 1e-5; + double minStep = -1.0 / g, maxStep = 0, + newStep = minStep + (maxStep - minStep) / (phi + 1.0); + + V newValues = system.advance(currentValues, newStep, gradient); + double newError = system.error(newValues); + + while (true) { + const bool flag = (maxStep - newStep > newStep - minStep) ? true : false; + const double testStep = flag ? newStep + resphi * (maxStep - newStep) + : newStep - resphi * (newStep - minStep); + + if ((maxStep - minStep) < tau * (std::abs(testStep) + std::abs(newStep))) { + return 0.5 * (minStep + maxStep); + } + + const V testValues = system.advance(currentValues, testStep, gradient); + const double testError = system.error(testValues); + + // update the working range + if (testError >= newError) { + if (flag) + maxStep = testStep; + else + minStep = testStep; + } else { + if (flag) { + minStep = newStep; + newStep = testStep; + newError = testError; + } else { + maxStep = newStep; + newStep = testStep; + newError = testError; + } + } + } + return 0.0; +} + +/** + * Implement the nonlinear conjugate gradient method using the Polak-Ribiere + * formula suggested in + * http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method. + * + * The S (system) class requires three member functions: error(state), + * gradient(state) and advance(state, step-size, direction). The V class denotes + * the state or the solution. + * + * The last parameter is a switch between gradient-descent and conjugate + * gradient + */ +template +std::tuple nonlinearConjugateGradient( + const S &system, const V &initial, const NonlinearOptimizerParams ¶ms, + const bool singleIteration, const bool gradientDescent = false) { + // GTSAM_CONCEPT_MANIFOLD_TYPE(V) + + size_t iteration = 0; + + // check if we're already close enough + double currentError = system.error(initial); + if (currentError <= params.errorTol) { + if (params.verbosity >= NonlinearOptimizerParams::ERROR) { + std::cout << "Exiting, as error = " << currentError << " < " + << params.errorTol << std::endl; + } + return {initial, iteration}; + } + + V currentValues = initial; + VectorValues currentGradient = system.gradient(currentValues), prevGradient, + direction = currentGradient; + + /* do one step of gradient descent */ + V prevValues = currentValues; + double prevError = currentError; + double alpha = lineSearch(system, currentValues, direction); + currentValues = system.advance(prevValues, alpha, direction); + currentError = system.error(currentValues); + + // Maybe show output + if (params.verbosity >= NonlinearOptimizerParams::ERROR) + std::cout << "Initial error: " << currentError << std::endl; + + // Iterative loop + do { + if (gradientDescent == true) { + direction = system.gradient(currentValues); + } else { + prevGradient = currentGradient; + currentGradient = system.gradient(currentValues); + // Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1 + const double beta = + std::max(0.0, currentGradient.dot(currentGradient - prevGradient) / + prevGradient.dot(prevGradient)); + direction = currentGradient + (beta * direction); + } + + alpha = lineSearch(system, currentValues, direction); + + prevValues = currentValues; + prevError = currentError; + + currentValues = system.advance(prevValues, alpha, direction); + currentError = system.error(currentValues); + + // User hook: + if (params.iterationHook) + params.iterationHook(iteration, prevError, currentError); + + // Maybe show output + if (params.verbosity >= NonlinearOptimizerParams::ERROR) + std::cout << "iteration: " << iteration + << ", currentError: " << currentError << std::endl; + } while (++iteration < params.maxIterations && !singleIteration && + !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, + params.errorTol, prevError, currentError, + params.verbosity)); + + // Printing if verbose + if (params.verbosity >= NonlinearOptimizerParams::ERROR && + iteration >= params.maxIterations) + std::cout << "nonlinearConjugateGradient: Terminating because reached " + "maximum iterations" + << std::endl; + + return {currentValues, iteration}; +} + } // namespace gtsam From f5f7590e78e13d218759b21a64459972d0c7c728 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 16 Oct 2024 09:47:23 -0400 Subject: [PATCH 134/204] kill methods with same name in favor of using variables directly --- gtsam/linear/ConjugateGradientSolver.h | 8 -------- 1 file changed, 8 deletions(-) diff --git a/gtsam/linear/ConjugateGradientSolver.h b/gtsam/linear/ConjugateGradientSolver.h index 9cdb41382..46d49ca4f 100644 --- a/gtsam/linear/ConjugateGradientSolver.h +++ b/gtsam/linear/ConjugateGradientSolver.h @@ -70,14 +70,6 @@ struct GTSAM_EXPORT ConjugateGradientParameters blas_kernel(GTSAM) {} #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V43 - /* general interface */ - inline size_t minIterations() const { return minIterations; } - inline size_t maxIterations() const { return maxIterations; } - inline size_t reset() const { return reset; } - inline double epsilon() const { return epsilon_rel; } - inline double epsilon_rel() const { return epsilon_rel; } - inline double epsilon_abs() const { return epsilon_abs; } - inline size_t getMinIterations() const { return minIterations; } inline size_t getMaxIterations() const { return maxIterations; } inline size_t getReset() const { return reset; } From d883e16cd1c6fe770d81ee188349a946c0254bc3 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 16 Oct 2024 10:20:45 -0400 Subject: [PATCH 135/204] remove extra methods --- .../NonlinearConjugateGradientOptimizer.h | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 1858fbc2e..369291fae 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -74,22 +74,6 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer * variable assignments. */ const Values &optimize() override; - - /** Implement the golden-section line search algorithm */ - double lineSearch(const Values ¤tValues, - const VectorValues &gradient) const; - - /** - * Implement the nonlinear conjugate gradient method using the Polak-Ribiere - * formula suggested in - * http://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method. - * - * The last parameter is a switch between gradient-descent and conjugate - * gradient - */ - std::tuple nonlinearConjugateGradient( - const Values &initial, const NonlinearOptimizerParams ¶ms, - const bool singleIteration, const bool gradientDescent = false) const; }; /** Implement the golden-section line search algorithm */ From 2d3a296d068159a8968a096364faf40bc100512c Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 17 Oct 2024 11:09:52 -0400 Subject: [PATCH 136/204] nonlinearConjugateGradient accepts direction method --- .../NonlinearConjugateGradientOptimizer.cpp | 35 ++++++------ .../NonlinearConjugateGradientOptimizer.h | 57 +++++++++++++++---- 2 files changed, 63 insertions(+), 29 deletions(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index fd0e60bc8..438eac7a8 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -28,18 +28,18 @@ namespace gtsam { typedef internal::NonlinearOptimizerState State; -/// Fletcher-Reeves formula for computing β, the direction of steepest descent. -static double FletcherReeves(const VectorValues& currentGradient, - const VectorValues& prevGradient) { +/* ************************************************************************* */ +double FletcherReeves(const VectorValues& currentGradient, + const VectorValues& prevGradient) { // Fletcher-Reeves: beta = g_n'*g_n/g_n-1'*g_n-1 const double beta = std::max(0.0, currentGradient.dot(currentGradient) / prevGradient.dot(prevGradient)); return beta; } -/// Polak-Ribiere formula for computing β, the direction of steepest descent. -static double PolakRibiere(const VectorValues& currentGradient, - const VectorValues& prevGradient) { +/* ************************************************************************* */ +double PolakRibiere(const VectorValues& currentGradient, + const VectorValues& prevGradient) { // Polak-Ribiere: beta = g_n'*(g_n-g_n-1)/g_n-1'*g_n-1 const double beta = std::max(0.0, currentGradient.dot(currentGradient - prevGradient) / @@ -47,20 +47,20 @@ static double PolakRibiere(const VectorValues& currentGradient, return beta; } -/// The Hestenes-Stiefel formula for computing β, the direction of steepest descent. -static double HestenesStiefel(const VectorValues& currentGradient, - const VectorValues& prevGradient, - const VectorValues& direction) { +/* ************************************************************************* */ +double HestenesStiefel(const VectorValues& currentGradient, + const VectorValues& prevGradient, + const VectorValues& direction) { // Hestenes-Stiefel: beta = g_n'*(g_n-g_n-1)/(-s_n-1')*(g_n-g_n-1) VectorValues d = currentGradient - prevGradient; const double beta = std::max(0.0, currentGradient.dot(d) / -direction.dot(d)); return beta; } -/// The Dai-Yuan formula for computing β, the direction of steepest descent. -static double DaiYuan(const VectorValues& currentGradient, - const VectorValues& prevGradient, - const VectorValues& direction) { +/* ************************************************************************* */ +double DaiYuan(const VectorValues& currentGradient, + const VectorValues& prevGradient, + const VectorValues& direction) { // Dai-Yuan: beta = g_n'*g_n/(-s_n-1')*(g_n-g_n-1) const double beta = std::max(0.0, currentGradient.dot(currentGradient) / @@ -110,7 +110,8 @@ NonlinearConjugateGradientOptimizer::System::advance(const State& current, GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { const auto [newValues, dummy] = nonlinearConjugateGradient( - System(graph_), state_->values, params_, true /* single iteration */); + System(graph_), state_->values, params_, true /* single iteration */, + directionMethod_); state_.reset( new State(newValues, graph_.error(newValues), state_->iterations + 1)); @@ -121,8 +122,8 @@ GaussianFactorGraph::shared_ptr NonlinearConjugateGradientOptimizer::iterate() { const Values& NonlinearConjugateGradientOptimizer::optimize() { // Optimize until convergence System system(graph_); - const auto [newValues, iterations] = - nonlinearConjugateGradient(system, state_->values, params_, false); + const auto [newValues, iterations] = nonlinearConjugateGradient( + system, state_->values, params_, false, directionMethod_); state_.reset( new State(std::move(newValues), graph_.error(newValues), iterations)); return state_->values; diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 78f11a600..09b932d42 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -23,6 +23,31 @@ namespace gtsam { +/// Fletcher-Reeves formula for computing β, the direction of steepest descent. +double FletcherReeves(const VectorValues ¤tGradient, + const VectorValues &prevGradient); + +/// Polak-Ribiere formula for computing β, the direction of steepest descent. +double PolakRibiere(const VectorValues ¤tGradient, + const VectorValues &prevGradient); + +/// The Hestenes-Stiefel formula for computing β, +/// the direction of steepest descent. +double HestenesStiefel(const VectorValues ¤tGradient, + const VectorValues &prevGradient, + const VectorValues &direction); + +/// The Dai-Yuan formula for computing β, the direction of steepest descent. +double DaiYuan(const VectorValues ¤tGradient, + const VectorValues &prevGradient, const VectorValues &direction); + +enum class DirectionMethod { + FletcherReeves, + PolakRibiere, + HestenesStiefel, + DaiYuan +}; + /** An implementation of the nonlinear CG method using the template below */ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer : public NonlinearOptimizer { @@ -49,13 +74,6 @@ class GTSAM_EXPORT NonlinearConjugateGradientOptimizer typedef NonlinearOptimizerParams Parameters; typedef std::shared_ptr shared_ptr; - enum class DirectionMethod { - FletcherReeves, - PolakRibiere, - HestenesStiefel, - DaiYuan - }; - protected: Parameters params_; DirectionMethod directionMethod_ = DirectionMethod::PolakRibiere; @@ -149,7 +167,9 @@ double lineSearch(const S &system, const V currentValues, const W &gradient) { template std::tuple nonlinearConjugateGradient( const S &system, const V &initial, const NonlinearOptimizerParams ¶ms, - const bool singleIteration, const bool gradientDescent = false) { + const bool singleIteration, + const DirectionMethod &directionMethod = DirectionMethod::PolakRibiere, + const bool gradientDescent = false) { // GTSAM_CONCEPT_MANIFOLD_TYPE(V) size_t iteration = 0; @@ -186,10 +206,23 @@ std::tuple nonlinearConjugateGradient( } else { prevGradient = currentGradient; currentGradient = system.gradient(currentValues); - // Polak-Ribiere: beta = g'*(g_n-g_n-1)/g_n-1'*g_n-1 - const double beta = - std::max(0.0, currentGradient.dot(currentGradient - prevGradient) / - prevGradient.dot(prevGradient)); + + double beta; + switch (directionMethod) { + case DirectionMethod::FletcherReeves: + beta = FletcherReeves(currentGradient, prevGradient); + break; + case DirectionMethod::PolakRibiere: + beta = PolakRibiere(currentGradient, prevGradient); + break; + case DirectionMethod::HestenesStiefel: + beta = HestenesStiefel(currentGradient, prevGradient, direction); + break; + case DirectionMethod::DaiYuan: + beta = DaiYuan(currentGradient, prevGradient, direction); + break; + } + direction = currentGradient + (beta * direction); } From 872eae951073867543a7db828e17b8f61323a29e Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 16 Oct 2024 18:19:33 -0400 Subject: [PATCH 137/204] Rosenbrock function --- ...estNonlinearConjugateGradientOptimizer.cpp | 149 ++++++++++++++++++ 1 file changed, 149 insertions(+) diff --git a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp index 36673c7a0..2cd705e88 100644 --- a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp @@ -8,9 +8,12 @@ #include #include +#include #include #include +#include #include +#include #include using namespace std; @@ -79,6 +82,152 @@ TEST(NonlinearConjugateGradientOptimizer, Optimize) { EXPECT_DOUBLES_EQUAL(0.0, graph.error(result), 1e-4); } +namespace rosenbrock { + +/// Alias for the first term in the Rosenbrock function +// using Rosenbrock1Factor = PriorFactor; + +using symbol_shorthand::X; +using symbol_shorthand::Y; + +class Rosenbrock1Factor : public NoiseModelFactorN { + private: + typedef Rosenbrock1Factor This; + typedef NoiseModelFactorN Base; + + double a_; + + public: + /** Constructor: key is x */ + Rosenbrock1Factor(Key key, double a, const SharedNoiseModel& model = nullptr) + : Base(model, key), a_(a) {} + + /// evaluate error + Vector evaluateError(const double& x, OptionalMatrixType H) const override { + double sqrt_2 = 1.4142135623730951; + if (H) (*H) = sqrt_2 * Vector::Ones(1).transpose(); + return sqrt_2 * Vector1(x - a_); + } +}; + +/** + * @brief Factor for the second term of the Rosenbrock function. + * f2 = (x*x - y) + * + * We use the noise model to premultiply with `b`. + */ +class Rosenbrock2Factor : public NoiseModelFactorN { + private: + typedef Rosenbrock2Factor This; + typedef NoiseModelFactorN Base; + + public: + /** Constructor: key1 is x, key2 is y */ + Rosenbrock2Factor(Key key1, Key key2, const SharedNoiseModel& model = nullptr) + : Base(model, key1, key2) {} + + /// evaluate error + Vector evaluateError(const double& p1, const double& p2, + OptionalMatrixType H1, + OptionalMatrixType H2) const override { + double sqrt_2 = 1.4142135623730951; + if (H1) (*H1) = sqrt_2 * p1 * Vector::Ones(1).transpose(); + if (H2) (*H2) = -sqrt_2 * Vector::Ones(1).transpose(); + return sqrt_2 * Vector1((p1 * p1) - p2); + } +}; + +/** + * @brief Get a nonlinear factor graph representing + * the Rosenbrock Banana function. + * + * More details: https://en.wikipedia.org/wiki/Rosenbrock_function + * + * @param a + * @param b + * @return NonlinearFactorGraph + */ +static NonlinearFactorGraph GetRosenbrockGraph(double a = 1.0, + double b = 100.0) { + NonlinearFactorGraph graph; + graph.emplace_shared(X(0), a, noiseModel::Unit::Create(1)); + graph.emplace_shared( + X(0), Y(0), noiseModel::Isotropic::Precision(1, b)); + + return graph; +} + +/// Compute the Rosenbrock function error given the nonlinear factor graph +/// and input values. +double f(const NonlinearFactorGraph& graph, double x, double y) { + Values initial; + initial.insert(X(0), x); + initial.insert(Y(0), y); + + return graph.error(initial); +} + +/// True Rosenbrock Banana function. +double rosenbrock_func(double x, double y, double a = 1.0, double b = 100.0) { + double m = (a - x) * (a - x); + double n = b * (y - x * x) * (y - x * x); + return m + n; +} +} // namespace rosenbrock + +/* ************************************************************************* */ +// Test whether the 2 factors are properly implemented. +TEST(NonlinearConjugateGradientOptimizer, Rosenbrock) { + using namespace rosenbrock; + double a = 1.0, b = 100.0; + Rosenbrock1Factor f1(X(0), a, noiseModel::Unit::Create(1)); + Rosenbrock2Factor f2(X(0), Y(0), noiseModel::Isotropic::Sigma(1, b)); + Values values; + values.insert(X(0), 0.0); + values.insert(Y(0), 0.0); + EXPECT_CORRECT_FACTOR_JACOBIANS(f1, values, 1e-7, 1e-5); + EXPECT_CORRECT_FACTOR_JACOBIANS(f2, values, 1e-7, 1e-5); + + std::mt19937 rng(42); + std::uniform_real_distribution dist(0.0, 100.0); + for (size_t i = 0; i < 50; ++i) { + double x = dist(rng); + double y = dist(rng); + + auto graph = GetRosenbrockGraph(a, b); + EXPECT_DOUBLES_EQUAL(rosenbrock_func(x, y, a, b), f(graph, x, y), 1e-5); + } +} + +/* ************************************************************************* */ +// Optimize the Rosenbrock function to verify optimizer works +TEST(NonlinearConjugateGradientOptimizer, Optimization) { + using namespace rosenbrock; + + double a = 1; + auto graph = GetRosenbrockGraph(a); + + // Assert that error at global minimum is 0. + double error = f(graph, a, a * a); + EXPECT_DOUBLES_EQUAL(0.0, error, 1e-12); + + NonlinearOptimizerParams param; + param.maxIterations = 16; + // param.verbosity = NonlinearOptimizerParams::LINEAR; + param.verbosity = NonlinearOptimizerParams::SILENT; + + Values initialEstimate; + initialEstimate.insert(X(0), 0.0); + initialEstimate.insert(Y(0), 0.0); + + // std::cout << f(graph, 11.0, 90.0) << std::endl; + + NonlinearConjugateGradientOptimizer optimizer(graph, initialEstimate, param); + Values result = optimizer.optimize(); + result.print(); + cout << "cg final error = " << graph.error(result) << endl; +} + /* ************************************************************************* */ int main() { TestResult tr; From 591e1ee4f8aa7f3a3738c737d11fe72cf7ca26bb Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 16 Oct 2024 21:31:52 -0400 Subject: [PATCH 138/204] lots of small improvements --- ...estNonlinearConjugateGradientOptimizer.cpp | 41 +++++++++++-------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp index 2cd705e88..75aff560f 100644 --- a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp @@ -105,14 +105,15 @@ class Rosenbrock1Factor : public NoiseModelFactorN { /// evaluate error Vector evaluateError(const double& x, OptionalMatrixType H) const override { double sqrt_2 = 1.4142135623730951; - if (H) (*H) = sqrt_2 * Vector::Ones(1).transpose(); - return sqrt_2 * Vector1(x - a_); + double d = a_ - x; + if (H) (*H) = -2 * d * Vector::Ones(1).transpose(); + return Vector1(sqrt_2 * d); } }; /** * @brief Factor for the second term of the Rosenbrock function. - * f2 = (x*x - y) + * f2 = (y - x*x) * * We use the noise model to premultiply with `b`. */ @@ -121,19 +122,22 @@ class Rosenbrock2Factor : public NoiseModelFactorN { typedef Rosenbrock2Factor This; typedef NoiseModelFactorN Base; + double b_; + public: /** Constructor: key1 is x, key2 is y */ - Rosenbrock2Factor(Key key1, Key key2, const SharedNoiseModel& model = nullptr) - : Base(model, key1, key2) {} + Rosenbrock2Factor(Key key1, Key key2, double b, + const SharedNoiseModel& model = nullptr) + : Base(model, key1, key2), b_(b) {} /// evaluate error - Vector evaluateError(const double& p1, const double& p2, - OptionalMatrixType H1, + Vector evaluateError(const double& x, const double& y, OptionalMatrixType H1, OptionalMatrixType H2) const override { double sqrt_2 = 1.4142135623730951; - if (H1) (*H1) = sqrt_2 * p1 * Vector::Ones(1).transpose(); - if (H2) (*H2) = -sqrt_2 * Vector::Ones(1).transpose(); - return sqrt_2 * Vector1((p1 * p1) - p2); + double x2 = x * x, d = y - x2; + if (H1) (*H1) = -2 * b_ * d * 2 * x * Vector::Ones(1).transpose(); + if (H2) (*H2) = 2 * b_ * d * Vector::Ones(1).transpose(); + return Vector1(sqrt_2 * d); } }; @@ -152,7 +156,7 @@ static NonlinearFactorGraph GetRosenbrockGraph(double a = 1.0, NonlinearFactorGraph graph; graph.emplace_shared(X(0), a, noiseModel::Unit::Create(1)); graph.emplace_shared( - X(0), Y(0), noiseModel::Isotropic::Precision(1, b)); + X(0), Y(0), b, noiseModel::Isotropic::Precision(1, b)); return graph; } @@ -181,12 +185,12 @@ TEST(NonlinearConjugateGradientOptimizer, Rosenbrock) { using namespace rosenbrock; double a = 1.0, b = 100.0; Rosenbrock1Factor f1(X(0), a, noiseModel::Unit::Create(1)); - Rosenbrock2Factor f2(X(0), Y(0), noiseModel::Isotropic::Sigma(1, b)); + Rosenbrock2Factor f2(X(0), Y(0), b, noiseModel::Isotropic::Sigma(1, b)); Values values; values.insert(X(0), 0.0); values.insert(Y(0), 0.0); - EXPECT_CORRECT_FACTOR_JACOBIANS(f1, values, 1e-7, 1e-5); - EXPECT_CORRECT_FACTOR_JACOBIANS(f2, values, 1e-7, 1e-5); + // EXPECT_CORRECT_FACTOR_JACOBIANS(f1, values, 1e-7, 1e-5); + // EXPECT_CORRECT_FACTOR_JACOBIANS(f2, values, 1e-7, 1e-5); std::mt19937 rng(42); std::uniform_real_distribution dist(0.0, 100.0); @@ -213,13 +217,18 @@ TEST(NonlinearConjugateGradientOptimizer, Optimization) { NonlinearOptimizerParams param; param.maxIterations = 16; - // param.verbosity = NonlinearOptimizerParams::LINEAR; - param.verbosity = NonlinearOptimizerParams::SILENT; + param.verbosity = NonlinearOptimizerParams::LINEAR; + // param.verbosity = NonlinearOptimizerParams::SILENT; Values initialEstimate; initialEstimate.insert(X(0), 0.0); initialEstimate.insert(Y(0), 0.0); + // std::cout << f(graph, 0, 0) << std::endl; + GaussianFactorGraph::shared_ptr linear = graph.linearize(initialEstimate); + linear->print(); + linear->gradientAtZero().print("Gradient: "); + // std::cout << f(graph, 11.0, 90.0) << std::endl; NonlinearConjugateGradientOptimizer optimizer(graph, initialEstimate, param); From 83e3fdaa2488fd0e8a8281c09abf7654a79dccb8 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 17 Oct 2024 10:22:49 -0400 Subject: [PATCH 139/204] Rosenbrock function implemented as factor graph --- ...estNonlinearConjugateGradientOptimizer.cpp | 50 +++++++++---------- 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp index 75aff560f..1e16c6509 100644 --- a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp @@ -90,6 +90,8 @@ namespace rosenbrock { using symbol_shorthand::X; using symbol_shorthand::Y; +constexpr double sqrt_2 = 1.4142135623730951; + class Rosenbrock1Factor : public NoiseModelFactorN { private: typedef Rosenbrock1Factor This; @@ -104,9 +106,9 @@ class Rosenbrock1Factor : public NoiseModelFactorN { /// evaluate error Vector evaluateError(const double& x, OptionalMatrixType H) const override { - double sqrt_2 = 1.4142135623730951; - double d = a_ - x; - if (H) (*H) = -2 * d * Vector::Ones(1).transpose(); + double d = x - a_; + // Because linearized gradient is -A'b, it will multiply by d + if (H) (*H) = Vector1(2 / sqrt_2).transpose(); return Vector1(sqrt_2 * d); } }; @@ -122,21 +124,18 @@ class Rosenbrock2Factor : public NoiseModelFactorN { typedef Rosenbrock2Factor This; typedef NoiseModelFactorN Base; - double b_; - public: /** Constructor: key1 is x, key2 is y */ - Rosenbrock2Factor(Key key1, Key key2, double b, - const SharedNoiseModel& model = nullptr) - : Base(model, key1, key2), b_(b) {} + Rosenbrock2Factor(Key key1, Key key2, const SharedNoiseModel& model = nullptr) + : Base(model, key1, key2) {} /// evaluate error Vector evaluateError(const double& x, const double& y, OptionalMatrixType H1, OptionalMatrixType H2) const override { - double sqrt_2 = 1.4142135623730951; - double x2 = x * x, d = y - x2; - if (H1) (*H1) = -2 * b_ * d * 2 * x * Vector::Ones(1).transpose(); - if (H2) (*H2) = 2 * b_ * d * Vector::Ones(1).transpose(); + double x2 = x * x, d = x2 - y; + // Because linearized gradient is -A'b, it will multiply by d + if (H1) (*H1) = Vector1(4 * x / sqrt_2).transpose(); + if (H2) (*H2) = Vector1(-2 / sqrt_2).transpose(); return Vector1(sqrt_2 * d); } }; @@ -156,7 +155,7 @@ static NonlinearFactorGraph GetRosenbrockGraph(double a = 1.0, NonlinearFactorGraph graph; graph.emplace_shared(X(0), a, noiseModel::Unit::Create(1)); graph.emplace_shared( - X(0), Y(0), b, noiseModel::Isotropic::Precision(1, b)); + X(0), Y(0), noiseModel::Isotropic::Precision(1, b)); return graph; } @@ -185,12 +184,12 @@ TEST(NonlinearConjugateGradientOptimizer, Rosenbrock) { using namespace rosenbrock; double a = 1.0, b = 100.0; Rosenbrock1Factor f1(X(0), a, noiseModel::Unit::Create(1)); - Rosenbrock2Factor f2(X(0), Y(0), b, noiseModel::Isotropic::Sigma(1, b)); + Rosenbrock2Factor f2(X(0), Y(0), noiseModel::Isotropic::Sigma(1, b)); Values values; values.insert(X(0), 0.0); values.insert(Y(0), 0.0); - // EXPECT_CORRECT_FACTOR_JACOBIANS(f1, values, 1e-7, 1e-5); - // EXPECT_CORRECT_FACTOR_JACOBIANS(f2, values, 1e-7, 1e-5); + EXPECT_CORRECT_FACTOR_JACOBIANS(f1, values, 1e-7, 1e-5); + EXPECT_CORRECT_FACTOR_JACOBIANS(f2, values, 1e-7, 1e-5); std::mt19937 rng(42); std::uniform_real_distribution dist(0.0, 100.0); @@ -208,7 +207,7 @@ TEST(NonlinearConjugateGradientOptimizer, Rosenbrock) { TEST(NonlinearConjugateGradientOptimizer, Optimization) { using namespace rosenbrock; - double a = 1; + double a = 7; auto graph = GetRosenbrockGraph(a); // Assert that error at global minimum is 0. @@ -216,21 +215,20 @@ TEST(NonlinearConjugateGradientOptimizer, Optimization) { EXPECT_DOUBLES_EQUAL(0.0, error, 1e-12); NonlinearOptimizerParams param; - param.maxIterations = 16; - param.verbosity = NonlinearOptimizerParams::LINEAR; - // param.verbosity = NonlinearOptimizerParams::SILENT; + param.maxIterations = 50; + // param.verbosity = NonlinearOptimizerParams::LINEAR; + param.verbosity = NonlinearOptimizerParams::SILENT; + double x = 3.0, y = 5.0; Values initialEstimate; - initialEstimate.insert(X(0), 0.0); - initialEstimate.insert(Y(0), 0.0); + initialEstimate.insert(X(0), x); + initialEstimate.insert(Y(0), y); - // std::cout << f(graph, 0, 0) << std::endl; + std::cout << f(graph, x, y) << std::endl; GaussianFactorGraph::shared_ptr linear = graph.linearize(initialEstimate); - linear->print(); + // linear->print(); linear->gradientAtZero().print("Gradient: "); - // std::cout << f(graph, 11.0, 90.0) << std::endl; - NonlinearConjugateGradientOptimizer optimizer(graph, initialEstimate, param); Values result = optimizer.optimize(); result.print(); From 8a0bbe9666ef486d91a5d8b2f39080937e3e6188 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 17 Oct 2024 13:28:06 -0400 Subject: [PATCH 140/204] test optimization with Rosenbrock function --- .../testNonlinearConjugateGradientOptimizer.cpp | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp index 1e16c6509..76edd3756 100644 --- a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp @@ -207,7 +207,7 @@ TEST(NonlinearConjugateGradientOptimizer, Rosenbrock) { TEST(NonlinearConjugateGradientOptimizer, Optimization) { using namespace rosenbrock; - double a = 7; + double a = 12; auto graph = GetRosenbrockGraph(a); // Assert that error at global minimum is 0. @@ -215,7 +215,7 @@ TEST(NonlinearConjugateGradientOptimizer, Optimization) { EXPECT_DOUBLES_EQUAL(0.0, error, 1e-12); NonlinearOptimizerParams param; - param.maxIterations = 50; + param.maxIterations = 350; // param.verbosity = NonlinearOptimizerParams::LINEAR; param.verbosity = NonlinearOptimizerParams::SILENT; @@ -224,15 +224,20 @@ TEST(NonlinearConjugateGradientOptimizer, Optimization) { initialEstimate.insert(X(0), x); initialEstimate.insert(Y(0), y); - std::cout << f(graph, x, y) << std::endl; GaussianFactorGraph::shared_ptr linear = graph.linearize(initialEstimate); + // std::cout << "error: " << f(graph, x, y) << std::endl; // linear->print(); - linear->gradientAtZero().print("Gradient: "); + // linear->gradientAtZero().print("Gradient: "); NonlinearConjugateGradientOptimizer optimizer(graph, initialEstimate, param); Values result = optimizer.optimize(); - result.print(); - cout << "cg final error = " << graph.error(result) << endl; + // result.print(); + // cout << "cg final error = " << graph.error(result) << endl; + + Values expected; + expected.insert(X(0), a); + expected.insert(Y(0), a * a); + EXPECT(assert_equal(expected, result, 1e-1)); } /* ************************************************************************* */ From 631fa2b564ca703e5e6887955a443da59c75602f Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 17 Oct 2024 13:38:11 -0400 Subject: [PATCH 141/204] minor improvements --- .../tests/testNonlinearConjugateGradientOptimizer.cpp | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp index 76edd3756..9681caff0 100644 --- a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp @@ -19,6 +19,9 @@ using namespace std; using namespace gtsam; +using symbol_shorthand::X; +using symbol_shorthand::Y; + // Generate a small PoseSLAM problem std::tuple generateProblem() { // 1. Create graph container and add factors to it @@ -84,12 +87,6 @@ TEST(NonlinearConjugateGradientOptimizer, Optimize) { namespace rosenbrock { -/// Alias for the first term in the Rosenbrock function -// using Rosenbrock1Factor = PriorFactor; - -using symbol_shorthand::X; -using symbol_shorthand::Y; - constexpr double sqrt_2 = 1.4142135623730951; class Rosenbrock1Factor : public NoiseModelFactorN { From 58ae5c6d08decc28ab7466a89de96567ede6c3bc Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 17 Oct 2024 18:27:39 -0400 Subject: [PATCH 142/204] fix test --- .../nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp index 9681caff0..4dfa7d912 100644 --- a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp @@ -181,7 +181,7 @@ TEST(NonlinearConjugateGradientOptimizer, Rosenbrock) { using namespace rosenbrock; double a = 1.0, b = 100.0; Rosenbrock1Factor f1(X(0), a, noiseModel::Unit::Create(1)); - Rosenbrock2Factor f2(X(0), Y(0), noiseModel::Isotropic::Sigma(1, b)); + Rosenbrock2Factor f2(X(0), Y(0), noiseModel::Isotropic::Precision(1, b)); Values values; values.insert(X(0), 0.0); values.insert(Y(0), 0.0); From b5b5e1544398acebd9ff1c963d078e8eec4d5950 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 17 Oct 2024 18:48:53 -0400 Subject: [PATCH 143/204] move 2 to the Precision matrix and check if error is correct --- ...estNonlinearConjugateGradientOptimizer.cpp | 25 ++++++++++--------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp index 4dfa7d912..ad6cc76bf 100644 --- a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp @@ -69,7 +69,7 @@ std::tuple generateProblem() { } /* ************************************************************************* */ -TEST(NonlinearConjugateGradientOptimizer, Optimize) { +TEST_DISABLED(NonlinearConjugateGradientOptimizer, Optimize) { const auto [graph, initialEstimate] = generateProblem(); // cout << "initial error = " << graph.error(initialEstimate) << endl; @@ -106,7 +106,7 @@ class Rosenbrock1Factor : public NoiseModelFactorN { double d = x - a_; // Because linearized gradient is -A'b, it will multiply by d if (H) (*H) = Vector1(2 / sqrt_2).transpose(); - return Vector1(sqrt_2 * d); + return Vector1(d); } }; @@ -133,7 +133,7 @@ class Rosenbrock2Factor : public NoiseModelFactorN { // Because linearized gradient is -A'b, it will multiply by d if (H1) (*H1) = Vector1(4 * x / sqrt_2).transpose(); if (H2) (*H2) = Vector1(-2 / sqrt_2).transpose(); - return Vector1(sqrt_2 * d); + return Vector1(d); } }; @@ -150,9 +150,10 @@ class Rosenbrock2Factor : public NoiseModelFactorN { static NonlinearFactorGraph GetRosenbrockGraph(double a = 1.0, double b = 100.0) { NonlinearFactorGraph graph; - graph.emplace_shared(X(0), a, noiseModel::Unit::Create(1)); + graph.emplace_shared( + X(0), a, noiseModel::Isotropic::Precision(1, 2)); graph.emplace_shared( - X(0), Y(0), noiseModel::Isotropic::Precision(1, b)); + X(0), Y(0), noiseModel::Isotropic::Precision(1, 2 * b)); return graph; } @@ -180,13 +181,13 @@ double rosenbrock_func(double x, double y, double a = 1.0, double b = 100.0) { TEST(NonlinearConjugateGradientOptimizer, Rosenbrock) { using namespace rosenbrock; double a = 1.0, b = 100.0; - Rosenbrock1Factor f1(X(0), a, noiseModel::Unit::Create(1)); - Rosenbrock2Factor f2(X(0), Y(0), noiseModel::Isotropic::Precision(1, b)); + Rosenbrock1Factor f1(X(0), a, noiseModel::Isotropic::Precision(1, 2)); + Rosenbrock2Factor f2(X(0), Y(0), noiseModel::Isotropic::Precision(1, 2 * b)); Values values; - values.insert(X(0), 0.0); - values.insert(Y(0), 0.0); - EXPECT_CORRECT_FACTOR_JACOBIANS(f1, values, 1e-7, 1e-5); - EXPECT_CORRECT_FACTOR_JACOBIANS(f2, values, 1e-7, 1e-5); + values.insert(X(0), 3.0); + values.insert(Y(0), 5.0); + // EXPECT_CORRECT_FACTOR_JACOBIANS(f1, values, 1e-7, 1e-5); + // EXPECT_CORRECT_FACTOR_JACOBIANS(f2, values, 1e-7, 1e-5); std::mt19937 rng(42); std::uniform_real_distribution dist(0.0, 100.0); @@ -201,7 +202,7 @@ TEST(NonlinearConjugateGradientOptimizer, Rosenbrock) { /* ************************************************************************* */ // Optimize the Rosenbrock function to verify optimizer works -TEST(NonlinearConjugateGradientOptimizer, Optimization) { +TEST_DISABLED(NonlinearConjugateGradientOptimizer, Optimization) { using namespace rosenbrock; double a = 12; From 6d6d39287e96aaa10704a00376611a9f0b588ab0 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 17 Oct 2024 18:57:32 -0400 Subject: [PATCH 144/204] fix jacobians --- ...estNonlinearConjugateGradientOptimizer.cpp | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp index ad6cc76bf..1742dc0cb 100644 --- a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp @@ -69,7 +69,7 @@ std::tuple generateProblem() { } /* ************************************************************************* */ -TEST_DISABLED(NonlinearConjugateGradientOptimizer, Optimize) { +TEST(NonlinearConjugateGradientOptimizer, Optimize) { const auto [graph, initialEstimate] = generateProblem(); // cout << "initial error = " << graph.error(initialEstimate) << endl; @@ -104,8 +104,8 @@ class Rosenbrock1Factor : public NoiseModelFactorN { /// evaluate error Vector evaluateError(const double& x, OptionalMatrixType H) const override { double d = x - a_; - // Because linearized gradient is -A'b, it will multiply by d - if (H) (*H) = Vector1(2 / sqrt_2).transpose(); + // Because linearized gradient is -A'b/sigma, it will multiply by d + if (H) (*H) = Vector1(1).transpose(); return Vector1(d); } }; @@ -130,9 +130,9 @@ class Rosenbrock2Factor : public NoiseModelFactorN { Vector evaluateError(const double& x, const double& y, OptionalMatrixType H1, OptionalMatrixType H2) const override { double x2 = x * x, d = x2 - y; - // Because linearized gradient is -A'b, it will multiply by d - if (H1) (*H1) = Vector1(4 * x / sqrt_2).transpose(); - if (H2) (*H2) = Vector1(-2 / sqrt_2).transpose(); + // Because linearized gradient is -A'b/sigma, it will multiply by d + if (H1) (*H1) = Vector1(2 * x).transpose(); + if (H2) (*H2) = Vector1(-1).transpose(); return Vector1(d); } }; @@ -181,13 +181,16 @@ double rosenbrock_func(double x, double y, double a = 1.0, double b = 100.0) { TEST(NonlinearConjugateGradientOptimizer, Rosenbrock) { using namespace rosenbrock; double a = 1.0, b = 100.0; - Rosenbrock1Factor f1(X(0), a, noiseModel::Isotropic::Precision(1, 2)); - Rosenbrock2Factor f2(X(0), Y(0), noiseModel::Isotropic::Precision(1, 2 * b)); + auto graph = GetRosenbrockGraph(a, b); + Rosenbrock1Factor f1 = + *std::static_pointer_cast(graph.at(0)); + Rosenbrock2Factor f2 = + *std::static_pointer_cast(graph.at(1)); Values values; values.insert(X(0), 3.0); values.insert(Y(0), 5.0); - // EXPECT_CORRECT_FACTOR_JACOBIANS(f1, values, 1e-7, 1e-5); - // EXPECT_CORRECT_FACTOR_JACOBIANS(f2, values, 1e-7, 1e-5); + EXPECT_CORRECT_FACTOR_JACOBIANS(f1, values, 1e-7, 1e-5); + EXPECT_CORRECT_FACTOR_JACOBIANS(f2, values, 1e-7, 1e-5); std::mt19937 rng(42); std::uniform_real_distribution dist(0.0, 100.0); @@ -202,7 +205,7 @@ TEST(NonlinearConjugateGradientOptimizer, Rosenbrock) { /* ************************************************************************* */ // Optimize the Rosenbrock function to verify optimizer works -TEST_DISABLED(NonlinearConjugateGradientOptimizer, Optimization) { +TEST(NonlinearConjugateGradientOptimizer, Optimization) { using namespace rosenbrock; double a = 12; From 893e69df2906a8abcb0bdfd7842d0bdb574926a2 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 17 Oct 2024 18:57:48 -0400 Subject: [PATCH 145/204] remove sqrt definition --- .../nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp index 1742dc0cb..bd1febcc6 100644 --- a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp @@ -87,8 +87,6 @@ TEST(NonlinearConjugateGradientOptimizer, Optimize) { namespace rosenbrock { -constexpr double sqrt_2 = 1.4142135623730951; - class Rosenbrock1Factor : public NoiseModelFactorN { private: typedef Rosenbrock1Factor This; From 2d4ee5057a9651bd3e9a3e392570ccb0d5d807ab Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Fri, 18 Oct 2024 10:30:01 -0400 Subject: [PATCH 146/204] template the directional methods --- .../NonlinearConjugateGradientOptimizer.cpp | 40 ----------------- .../NonlinearConjugateGradientOptimizer.h | 44 +++++++++++++++---- 2 files changed, 35 insertions(+), 49 deletions(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index 438eac7a8..8b8542779 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -28,46 +28,6 @@ namespace gtsam { typedef internal::NonlinearOptimizerState State; -/* ************************************************************************* */ -double FletcherReeves(const VectorValues& currentGradient, - const VectorValues& prevGradient) { - // Fletcher-Reeves: beta = g_n'*g_n/g_n-1'*g_n-1 - const double beta = std::max(0.0, currentGradient.dot(currentGradient) / - prevGradient.dot(prevGradient)); - return beta; -} - -/* ************************************************************************* */ -double PolakRibiere(const VectorValues& currentGradient, - const VectorValues& prevGradient) { - // Polak-Ribiere: beta = g_n'*(g_n-g_n-1)/g_n-1'*g_n-1 - const double beta = - std::max(0.0, currentGradient.dot(currentGradient - prevGradient) / - prevGradient.dot(prevGradient)); - return beta; -} - -/* ************************************************************************* */ -double HestenesStiefel(const VectorValues& currentGradient, - const VectorValues& prevGradient, - const VectorValues& direction) { - // Hestenes-Stiefel: beta = g_n'*(g_n-g_n-1)/(-s_n-1')*(g_n-g_n-1) - VectorValues d = currentGradient - prevGradient; - const double beta = std::max(0.0, currentGradient.dot(d) / -direction.dot(d)); - return beta; -} - -/* ************************************************************************* */ -double DaiYuan(const VectorValues& currentGradient, - const VectorValues& prevGradient, - const VectorValues& direction) { - // Dai-Yuan: beta = g_n'*g_n/(-s_n-1')*(g_n-g_n-1) - const double beta = - std::max(0.0, currentGradient.dot(currentGradient) / - -direction.dot(currentGradient - prevGradient)); - return beta; -} - /** * @brief Return the gradient vector of a nonlinear factor graph * @param nfg the graph diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 09b932d42..1aee33a72 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -24,22 +24,48 @@ namespace gtsam { /// Fletcher-Reeves formula for computing β, the direction of steepest descent. -double FletcherReeves(const VectorValues ¤tGradient, - const VectorValues &prevGradient); +template +double FletcherReeves(const Gradient ¤tGradient, + const Gradient &prevGradient) { + // Fletcher-Reeves: beta = g_n'*g_n/g_n-1'*g_n-1 + const double beta = + currentGradient.dot(currentGradient) / prevGradient.dot(prevGradient); + return beta; +} /// Polak-Ribiere formula for computing β, the direction of steepest descent. -double PolakRibiere(const VectorValues ¤tGradient, - const VectorValues &prevGradient); +template +double PolakRibiere(const Gradient ¤tGradient, + const Gradient &prevGradient) { + // Polak-Ribiere: beta = g_n'*(g_n-g_n-1)/g_n-1'*g_n-1 + const double beta = + std::max(0.0, currentGradient.dot(currentGradient - prevGradient) / + prevGradient.dot(prevGradient)); + return beta; +} /// The Hestenes-Stiefel formula for computing β, /// the direction of steepest descent. -double HestenesStiefel(const VectorValues ¤tGradient, - const VectorValues &prevGradient, - const VectorValues &direction); +template +double HestenesStiefel(const Gradient ¤tGradient, + const Gradient &prevGradient, + const Gradient &direction) { + // Hestenes-Stiefel: beta = g_n'*(g_n-g_n-1)/(-s_n-1')*(g_n-g_n-1) + VectorValues d = currentGradient - prevGradient; + const double beta = std::max(0.0, currentGradient.dot(d) / -direction.dot(d)); + return beta; +} /// The Dai-Yuan formula for computing β, the direction of steepest descent. -double DaiYuan(const VectorValues ¤tGradient, - const VectorValues &prevGradient, const VectorValues &direction); +template +double DaiYuan(const Gradient ¤tGradient, const Gradient &prevGradient, + const VectorValues &direction) { + // Dai-Yuan: beta = g_n'*g_n/(-s_n-1')*(g_n-g_n-1) + const double beta = + std::max(0.0, currentGradient.dot(currentGradient) / + -direction.dot(currentGradient - prevGradient)); + return beta; +} enum class DirectionMethod { FletcherReeves, From 07b11bc9f1d4050666d22f453a777a7f60b9d00c Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 19 Oct 2024 17:01:38 -0400 Subject: [PATCH 147/204] add test for different nonlinear CG direction methods --- ...estNonlinearConjugateGradientOptimizer.cpp | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp index 36673c7a0..f216e4a8c 100644 --- a/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/tests/testNonlinearConjugateGradientOptimizer.cpp @@ -79,6 +79,49 @@ TEST(NonlinearConjugateGradientOptimizer, Optimize) { EXPECT_DOUBLES_EQUAL(0.0, graph.error(result), 1e-4); } +/* ************************************************************************* */ +/// Test different direction methods +TEST(NonlinearConjugateGradientOptimizer, DirectionMethods) { + const auto [graph, initialEstimate] = generateProblem(); + + NonlinearOptimizerParams param; + param.maxIterations = + 500; /* requires a larger number of iterations to converge */ + param.verbosity = NonlinearOptimizerParams::SILENT; + + // Fletcher-Reeves + { + NonlinearConjugateGradientOptimizer optimizer( + graph, initialEstimate, param, DirectionMethod::FletcherReeves); + Values result = optimizer.optimize(); + + EXPECT_DOUBLES_EQUAL(0.0, graph.error(result), 1e-4); + } + // Polak-Ribiere + { + NonlinearConjugateGradientOptimizer optimizer( + graph, initialEstimate, param, DirectionMethod::PolakRibiere); + Values result = optimizer.optimize(); + + EXPECT_DOUBLES_EQUAL(0.0, graph.error(result), 1e-4); + } + // Hestenes-Stiefel + { + NonlinearConjugateGradientOptimizer optimizer( + graph, initialEstimate, param, DirectionMethod::HestenesStiefel); + Values result = optimizer.optimize(); + + EXPECT_DOUBLES_EQUAL(0.0, graph.error(result), 1e-4); + } + // Dai-Yuan + { + NonlinearConjugateGradientOptimizer optimizer(graph, initialEstimate, param, + DirectionMethod::DaiYuan); + Values result = optimizer.optimize(); + + EXPECT_DOUBLES_EQUAL(0.0, graph.error(result), 1e-4); + } +} /* ************************************************************************* */ int main() { TestResult tr; From 7bb98946a27dfdfd933b0d384a292944173141a1 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 19 Oct 2024 17:03:15 -0400 Subject: [PATCH 148/204] missed two templates --- gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 1aee33a72..bd106afbe 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -51,7 +51,7 @@ double HestenesStiefel(const Gradient ¤tGradient, const Gradient &prevGradient, const Gradient &direction) { // Hestenes-Stiefel: beta = g_n'*(g_n-g_n-1)/(-s_n-1')*(g_n-g_n-1) - VectorValues d = currentGradient - prevGradient; + Gradient d = currentGradient - prevGradient; const double beta = std::max(0.0, currentGradient.dot(d) / -direction.dot(d)); return beta; } @@ -59,7 +59,7 @@ double HestenesStiefel(const Gradient ¤tGradient, /// The Dai-Yuan formula for computing β, the direction of steepest descent. template double DaiYuan(const Gradient ¤tGradient, const Gradient &prevGradient, - const VectorValues &direction) { + const Gradient &direction) { // Dai-Yuan: beta = g_n'*g_n/(-s_n-1')*(g_n-g_n-1) const double beta = std::max(0.0, currentGradient.dot(currentGradient) / From b56595c6f804928bffcd1bd34ffc3282f7df4c9c Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 8 Oct 2024 18:10:27 +0900 Subject: [PATCH 149/204] Get rid of double storage --- gtsam/hybrid/HybridGaussianConditional.cpp | 67 +++++++++++----------- gtsam/hybrid/HybridGaussianConditional.h | 7 +-- 2 files changed, 37 insertions(+), 37 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index 58724163e..8ab00fb67 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -45,7 +45,6 @@ namespace gtsam { struct HybridGaussianConditional::Helper { std::optional nrFrontals; FactorValuePairs pairs; - Conditionals conditionals; double minNegLogConstant; using GC = GaussianConditional; @@ -70,14 +69,12 @@ struct HybridGaussianConditional::Helper { gcs.push_back(gaussianConditional); } - conditionals = Conditionals({mode}, gcs); pairs = FactorValuePairs({mode}, fvs); } /// Construct from tree of GaussianConditionals. explicit Helper(const Conditionals &conditionals) - : conditionals(conditionals), - minNegLogConstant(std::numeric_limits::infinity()) { + : minNegLogConstant(std::numeric_limits::infinity()) { auto func = [this](const GC::shared_ptr &gc) -> GaussianFactorValuePair { if (!gc) return {nullptr, std::numeric_limits::infinity()}; if (!nrFrontals) nrFrontals = gc->nrFrontals(); @@ -106,7 +103,6 @@ HybridGaussianConditional::HybridGaussianConditional( pair.second - helper.minNegLogConstant}; })), BaseConditional(*helper.nrFrontals), - conditionals_(helper.conditionals), negLogConstant_(helper.minNegLogConstant) {} HybridGaussianConditional::HybridGaussianConditional( @@ -143,24 +139,26 @@ HybridGaussianConditional::HybridGaussianConditional( : HybridGaussianConditional(discreteParents, Helper(conditionals)) {} /* *******************************************************************************/ -const HybridGaussianConditional::Conditionals & +const HybridGaussianConditional::Conditionals HybridGaussianConditional::conditionals() const { - return conditionals_; + return Conditionals(factors(), [](const auto& pair) { + return std::dynamic_pointer_cast(pair.first); + }); } /* *******************************************************************************/ size_t HybridGaussianConditional::nrComponents() const { size_t total = 0; - conditionals_.visit([&total](const GaussianFactor::shared_ptr &node) { - if (node) total += 1; + factors().visit([&total](const auto& node) { + if (node.first) total += 1; }); return total; } /* *******************************************************************************/ GaussianConditional::shared_ptr HybridGaussianConditional::choose( - const DiscreteValues &discreteValues) const { - auto &ptr = conditionals_(discreteValues); + const DiscreteValues& discreteValues) const { + auto& [ptr, _] = factors()(discreteValues); if (!ptr) return nullptr; auto conditional = std::dynamic_pointer_cast(ptr); if (conditional) @@ -176,18 +174,15 @@ bool HybridGaussianConditional::equals(const HybridFactor &lf, const This *e = dynamic_cast(&lf); if (e == nullptr) return false; - // This will return false if either conditionals_ is empty or e->conditionals_ - // is empty, but not if both are empty or both are not empty: - if (conditionals_.empty() ^ e->conditionals_.empty()) return false; - - // Check the base and the factors: - return BaseFactor::equals(*e, tol) && - conditionals_.equals(e->conditionals_, - [tol](const GaussianConditional::shared_ptr &f1, - const GaussianConditional::shared_ptr &f2) { - return (!f1 && !f2) || - (f1 && f2 && f1->equals(*f2, tol)); - }); + // Factors existence and scalar values are checked in BaseFactor::equals. + // Here we check additionally that the factors *are* conditionals and are equal. + auto compareFunc = [tol](const GaussianFactorValuePair& pair1, + const GaussianFactorValuePair& pair2) { + auto c1 = std::dynamic_pointer_cast(pair1.first), + c2 = std::dynamic_pointer_cast(pair2.first); + return (!c1 && !c2) || (c1 && c2 && c1->equals(*c2, tol)); + }; + return Base::equals(*e, tol) && factors().equals(e->factors(), compareFunc); } /* *******************************************************************************/ @@ -202,7 +197,7 @@ void HybridGaussianConditional::print(const std::string &s, std::cout << std::endl << " logNormalizationConstant: " << -negLogConstant() << std::endl << std::endl; - conditionals_.print( + conditionals().print( "", [&](Key k) { return formatter(k); }, [&](const GaussianConditional::shared_ptr &gf) -> std::string { RedirectCout rd; @@ -254,7 +249,7 @@ std::shared_ptr HybridGaussianConditional::likelihood( const DiscreteKeys discreteParentKeys = discreteKeys(); const KeyVector continuousParentKeys = continuousParents(); const HybridGaussianFactor::FactorValuePairs likelihoods( - conditionals_, + conditionals(), [&](const GaussianConditional::shared_ptr &conditional) -> GaussianFactorValuePair { const auto likelihood_m = conditional->likelihood(given); @@ -294,22 +289,30 @@ HybridGaussianConditional::shared_ptr HybridGaussianConditional::prune( return (max->evaluate(choices) == 0.0) ? nullptr : conditional; }; - auto pruned_conditionals = conditionals_.apply(pruner); + auto pruned_conditionals = conditionals().apply(pruner); return std::make_shared(discreteKeys(), pruned_conditionals); } /* *******************************************************************************/ double HybridGaussianConditional::logProbability( - const HybridValues &values) const { - auto conditional = conditionals_(values.discrete()); - return conditional->logProbability(values.continuous()); + const HybridValues& values) const { + auto [factor, _] = factors()(values.discrete()); + if (auto conditional = std::dynamic_pointer_cast(factor)) + return conditional->logProbability(values.continuous()); + else + throw std::logic_error( + "A HybridGaussianConditional unexpectedly contained a non-conditional"); } /* *******************************************************************************/ -double HybridGaussianConditional::evaluate(const HybridValues &values) const { - auto conditional = conditionals_(values.discrete()); - return conditional->evaluate(values.continuous()); +double HybridGaussianConditional::evaluate(const HybridValues& values) const { + auto [factor, _] = factors()(values.discrete()); + if (auto conditional = std::dynamic_pointer_cast(factor)) + return conditional->evaluate(values.continuous()); + else + throw std::logic_error( + "A HybridGaussianConditional unexpectedly contained a non-conditional"); } } // namespace gtsam diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index 4cc3d3196..6e0d2800c 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -64,8 +64,6 @@ class GTSAM_EXPORT HybridGaussianConditional using Conditionals = DecisionTree; private: - Conditionals conditionals_; ///< a decision tree of Gaussian conditionals. - ///< Negative-log of the normalization constant (log(\sqrt(|2πΣ|))). ///< Take advantage of the neg-log space so everything is a minimization double negLogConstant_; @@ -192,8 +190,8 @@ class GTSAM_EXPORT HybridGaussianConditional std::shared_ptr likelihood( const VectorValues &given) const; - /// Getter for the underlying Conditionals DecisionTree - const Conditionals &conditionals() const; + /// Get Conditionals DecisionTree (dynamic cast from factors) + const Conditionals conditionals() const; /** * @brief Compute the logProbability of this hybrid Gaussian conditional. @@ -241,7 +239,6 @@ class GTSAM_EXPORT HybridGaussianConditional void serialize(Archive &ar, const unsigned int /*version*/) { ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(BaseFactor); ar &BOOST_SERIALIZATION_BASE_OBJECT_NVP(BaseConditional); - ar &BOOST_SERIALIZATION_NVP(conditionals_); } #endif }; From 6c9b25c45eb96be682a9300acefaede8623d0bf4 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 15 Oct 2024 14:34:42 +0900 Subject: [PATCH 150/204] DT::split --- gtsam/discrete/DecisionTree-inl.h | 25 ++++++++++++++--- gtsam/discrete/DecisionTree.h | 22 +++++++++++---- gtsam/discrete/tests/testDecisionTree.cpp | 33 ++++++++++++++++++++++- 3 files changed, 71 insertions(+), 9 deletions(-) diff --git a/gtsam/discrete/DecisionTree-inl.h b/gtsam/discrete/DecisionTree-inl.h index 4266ace15..434a6beac 100644 --- a/gtsam/discrete/DecisionTree-inl.h +++ b/gtsam/discrete/DecisionTree-inl.h @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -482,8 +483,8 @@ namespace gtsam { /****************************************************************************/ // DecisionTree /****************************************************************************/ - template - DecisionTree::DecisionTree() {} + template + DecisionTree::DecisionTree() : root_(nullptr) {} template DecisionTree::DecisionTree(const NodePtr& root) : @@ -951,11 +952,16 @@ namespace gtsam { return root_->equals(*other.root_); } + /****************************************************************************/ template const Y& DecisionTree::operator()(const Assignment& x) const { + if (root_ == nullptr) + throw std::invalid_argument( + "DecisionTree::operator() called on empty tree"); return root_->operator ()(x); } + /****************************************************************************/ template DecisionTree DecisionTree::apply(const Unary& op) const { // It is unclear what should happen if tree is empty: @@ -966,6 +972,7 @@ namespace gtsam { return DecisionTree(root_->apply(op)); } + /****************************************************************************/ /// Apply unary operator with assignment template DecisionTree DecisionTree::apply( @@ -1049,6 +1056,18 @@ namespace gtsam { return ss.str(); } -/******************************************************************************/ + /******************************************************************************/ + template + template + std::pair, DecisionTree> DecisionTree::split( + std::function(const Y&)> AB_of_Y) { + using AB = std::pair; + const DecisionTree ab(*this, AB_of_Y); + const DecisionTree a(ab, [](const AB& p) { return p.first; }); + const DecisionTree b(ab, [](const AB& p) { return p.second; }); + return {a, b}; + } + + /******************************************************************************/ } // namespace gtsam diff --git a/gtsam/discrete/DecisionTree.h b/gtsam/discrete/DecisionTree.h index 6d8d86530..6a966e0dd 100644 --- a/gtsam/discrete/DecisionTree.h +++ b/gtsam/discrete/DecisionTree.h @@ -156,10 +156,10 @@ namespace gtsam { template static NodePtr build(It begin, It end, ValueIt beginY, ValueIt endY); - /** Internal helper function to create from - * keys, cardinalities, and Y values. - * Calls `build` which builds thetree bottom-up, - * before we prune in a top-down fashion. + /** + * Internal helper function to create a tree from keys, cardinalities, and Y + * values. Calls `build` which builds the tree bottom-up, before we prune in + * a top-down fashion. */ template static NodePtr create(It begin, It end, ValueIt beginY, ValueIt endY); @@ -239,7 +239,7 @@ namespace gtsam { DecisionTree(const DecisionTree& other, Func Y_of_X); /** - * @brief Convert from a different value type X to value type Y, also transate + * @brief Convert from a different value type X to value type Y, also translate * labels via map from type M to L. * * @tparam M Previous label type. @@ -406,6 +406,18 @@ namespace gtsam { const ValueFormatter& valueFormatter, bool showZero = true) const; + /** + * @brief Convert into two trees with value types A and B. + * + * @tparam A First new value type. + * @tparam B Second new value type. + * @param AB_of_Y Functor to convert from type X to std::pair. + * @return A pair of DecisionTrees with value types A and B respectively. + */ + template + std::pair, DecisionTree> split( + std::function(const Y&)> AB_of_Y); + /// @name Advanced Interface /// @{ diff --git a/gtsam/discrete/tests/testDecisionTree.cpp b/gtsam/discrete/tests/testDecisionTree.cpp index c625e1ba6..1382fc704 100644 --- a/gtsam/discrete/tests/testDecisionTree.cpp +++ b/gtsam/discrete/tests/testDecisionTree.cpp @@ -11,7 +11,7 @@ /* * @file testDecisionTree.cpp - * @brief Develop DecisionTree + * @brief DecisionTree unit tests * @author Frank Dellaert * @author Can Erdogan * @date Jan 30, 2012 @@ -271,6 +271,37 @@ TEST(DecisionTree, Example) { DOT(acnotb); } +/* ************************************************************************** */ +// Test that we can create two trees out of one, using a function that returns a pair. +TEST(DecisionTree, Split) { + // Create labels + string A("A"), B("B"); + + // Create a decision tree + DT original(A, DT(B, 1, 2), DT(B, 3, 4)); + + // Define a function that returns an int/bool pair + auto split_function = [](const int& value) -> std::pair { + return {value*3, value*3 % 2 == 0}; + }; + + // Split the original tree into two new trees + auto [la,lb] = original.split(split_function); + + // Check the first resulting tree + EXPECT_LONGS_EQUAL(3, la(Assignment{{A, 0}, {B, 0}})); + EXPECT_LONGS_EQUAL(6, la(Assignment{{A, 0}, {B, 1}})); + EXPECT_LONGS_EQUAL(9, la(Assignment{{A, 1}, {B, 0}})); + EXPECT_LONGS_EQUAL(12, la(Assignment{{A, 1}, {B, 1}})); + + // Check the second resulting tree + EXPECT(!lb(Assignment{{A, 0}, {B, 0}})); + EXPECT(lb(Assignment{{A, 0}, {B, 1}})); + EXPECT(!lb(Assignment{{A, 1}, {B, 0}})); + EXPECT(lb(Assignment{{A, 1}, {B, 1}})); +} + + /* ************************************************************************** */ // test Conversion of values bool bool_of_int(const int& y) { return y != 0; }; From 1fe09f5e09a1c68e0461f41167c4f2d897718de0 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 16 Oct 2024 19:53:36 +0900 Subject: [PATCH 151/204] Avoid using slow conditionals() --- gtsam/hybrid/HybridGaussianConditional.cpp | 42 +++++++++++++--------- gtsam/hybrid/HybridGaussianConditional.h | 9 +++++ 2 files changed, 35 insertions(+), 16 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index 8ab00fb67..6e5fe93c4 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -197,11 +197,11 @@ void HybridGaussianConditional::print(const std::string &s, std::cout << std::endl << " logNormalizationConstant: " << -negLogConstant() << std::endl << std::endl; - conditionals().print( + factors().print( "", [&](Key k) { return formatter(k); }, - [&](const GaussianConditional::shared_ptr &gf) -> std::string { + [&](const GaussianFactorValuePair &pair) -> std::string { RedirectCout rd; - if (gf && !gf->empty()) { + if (auto gf = std::dynamic_pointer_cast(pair.first)) { gf->print("", formatter); return rd.str(); } else { @@ -249,12 +249,18 @@ std::shared_ptr HybridGaussianConditional::likelihood( const DiscreteKeys discreteParentKeys = discreteKeys(); const KeyVector continuousParentKeys = continuousParents(); const HybridGaussianFactor::FactorValuePairs likelihoods( - conditionals(), - [&](const GaussianConditional::shared_ptr &conditional) - -> GaussianFactorValuePair { - const auto likelihood_m = conditional->likelihood(given); - const double Cgm_Kgcm = conditional->negLogConstant() - negLogConstant_; - return {likelihood_m, Cgm_Kgcm}; + factors(), + [&](const GaussianFactorValuePair &pair) -> GaussianFactorValuePair { + if (auto conditional = + std::dynamic_pointer_cast(pair.first)) { + const auto likelihood_m = conditional->likelihood(given); + // scalar is already correct. + assert(pair.second == + conditional->negLogConstant() - negLogConstant_); + return {likelihood_m, pair.second}; + } else { + return {nullptr, std::numeric_limits::infinity()}; + } }); return std::make_shared(discreteParentKeys, likelihoods); @@ -283,15 +289,19 @@ HybridGaussianConditional::shared_ptr HybridGaussianConditional::prune( // Check the max value for every combination of our keys. // If the max value is 0.0, we can prune the corresponding conditional. - auto pruner = [&](const Assignment &choices, - const GaussianConditional::shared_ptr &conditional) - -> GaussianConditional::shared_ptr { - return (max->evaluate(choices) == 0.0) ? nullptr : conditional; + auto pruner = + [&](const Assignment &choices, + const GaussianFactorValuePair &pair) -> GaussianFactorValuePair { + if (max->evaluate(choices) == 0.0) + return {nullptr, std::numeric_limits::infinity()}; + else + return pair; }; - auto pruned_conditionals = conditionals().apply(pruner); - return std::make_shared(discreteKeys(), - pruned_conditionals); + FactorValuePairs prunedConditionals = factors().apply(pruner); + return std::shared_ptr( + new HybridGaussianConditional(discreteKeys(), nrFrontals_, + prunedConditionals, negLogConstant_)); } /* *******************************************************************************/ diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index 6e0d2800c..38b7b9795 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -191,6 +191,7 @@ class GTSAM_EXPORT HybridGaussianConditional const VectorValues &given) const; /// Get Conditionals DecisionTree (dynamic cast from factors) + /// @note Slow: avoid using in favor of factors(), which uses existing tree. const Conditionals conditionals() const; /** @@ -229,6 +230,14 @@ class GTSAM_EXPORT HybridGaussianConditional HybridGaussianConditional(const DiscreteKeys &discreteParents, const Helper &helper); + /// Private constructor used when constants have already been calculated. + HybridGaussianConditional(const DiscreteKeys &discreteKeys, int nrFrontals, + const FactorValuePairs &factors, + double negLogConstant) + : BaseFactor(discreteKeys, factors), + BaseConditional(nrFrontals), + negLogConstant_(negLogConstant) {} + /// Check whether `given` has values for all frontal keys. bool allFrontalsGiven(const VectorValues &given) const; From 6592f8a8b4956dd74d5723e520d5a9595cc2553d Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 17 Oct 2024 02:05:56 +0900 Subject: [PATCH 152/204] Make sure we still subtract min() --- gtsam/hybrid/HybridGaussianConditional.cpp | 38 +++++++++++++++++----- gtsam/hybrid/HybridGaussianConditional.h | 21 +++++++----- 2 files changed, 42 insertions(+), 17 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index 6e5fe93c4..76a09bcf5 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -29,6 +29,8 @@ #include #include +#include +#include "gtsam/linear/GaussianConditional.h" namespace gtsam { /* *******************************************************************************/ @@ -38,14 +40,13 @@ namespace gtsam { * This struct contains the following fields: * - nrFrontals: Optional size_t for number of frontal variables * - pairs: FactorValuePairs for storing conditionals with their negLogConstant - * - conditionals: Conditionals for storing conditionals. TODO(frank): kill! * - minNegLogConstant: minimum negLogConstant, computed here, subtracted in * constructor */ struct HybridGaussianConditional::Helper { - std::optional nrFrontals; FactorValuePairs pairs; - double minNegLogConstant; + std::optional nrFrontals = {}; + double minNegLogConstant = std::numeric_limits::infinity(); using GC = GaussianConditional; using P = std::vector>; @@ -54,8 +55,6 @@ struct HybridGaussianConditional::Helper { template explicit Helper(const DiscreteKey &mode, const P &p, Args &&...args) { nrFrontals = 1; - minNegLogConstant = std::numeric_limits::infinity(); - std::vector fvs; std::vector gcs; fvs.reserve(p.size()); @@ -73,8 +72,7 @@ struct HybridGaussianConditional::Helper { } /// Construct from tree of GaussianConditionals. - explicit Helper(const Conditionals &conditionals) - : minNegLogConstant(std::numeric_limits::infinity()) { + explicit Helper(const Conditionals &conditionals) { auto func = [this](const GC::shared_ptr &gc) -> GaussianFactorValuePair { if (!gc) return {nullptr, std::numeric_limits::infinity()}; if (!nrFrontals) nrFrontals = gc->nrFrontals(); @@ -89,6 +87,25 @@ struct HybridGaussianConditional::Helper { "Provided conditionals do not contain any frontal variables."); } } + + /// Construct from tree of factor/scalar pairs. + explicit Helper(const FactorValuePairs &pairs) : pairs(pairs) { + auto func = [this](const GaussianFactorValuePair &pair) { + if (!pair.first) return; + auto gc = std::dynamic_pointer_cast(pair.first); + if (!gc) + throw std::runtime_error( + "HybridGaussianConditional called with non-conditional."); + if (!nrFrontals) nrFrontals = gc->nrFrontals(); + minNegLogConstant = std::min(minNegLogConstant, pair.second); + }; + pairs.visit(func); + if (!nrFrontals.has_value()) { + throw std::runtime_error( + "HybridGaussianConditional: need at least one frontal variable. " + "Provided conditionals do not contain any frontal variables."); + } + } }; /* *******************************************************************************/ @@ -138,6 +155,10 @@ HybridGaussianConditional::HybridGaussianConditional( const HybridGaussianConditional::Conditionals &conditionals) : HybridGaussianConditional(discreteParents, Helper(conditionals)) {} +HybridGaussianConditional::HybridGaussianConditional( + const DiscreteKeys &discreteParents, const FactorValuePairs &pairs) + : HybridGaussianConditional(discreteParents, Helper(pairs)) {} + /* *******************************************************************************/ const HybridGaussianConditional::Conditionals HybridGaussianConditional::conditionals() const { @@ -300,8 +321,7 @@ HybridGaussianConditional::shared_ptr HybridGaussianConditional::prune( FactorValuePairs prunedConditionals = factors().apply(pruner); return std::shared_ptr( - new HybridGaussianConditional(discreteKeys(), nrFrontals_, - prunedConditionals, negLogConstant_)); + new HybridGaussianConditional(discreteKeys(), prunedConditionals)); } /* *******************************************************************************/ diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index 38b7b9795..c38d8733c 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -141,6 +141,19 @@ class GTSAM_EXPORT HybridGaussianConditional HybridGaussianConditional(const DiscreteKeys &discreteParents, const Conditionals &conditionals); + /** + * @brief Construct from multiple discrete keys M and a tree of + * factor/scalar pairs, where the scalar is assumed to be the + * the negative log constant for each assignment m, up to a constant. + * + * @note Will throw if factors are not actually conditionals. + * + * @param discreteParents the discrete parents. Will be placed last. + * @param conditionalPairs Decision tree of GaussianFactor/scalar pairs. + */ + HybridGaussianConditional(const DiscreteKeys &discreteParents, + const FactorValuePairs &pairs); + /// @} /// @name Testable /// @{ @@ -230,14 +243,6 @@ class GTSAM_EXPORT HybridGaussianConditional HybridGaussianConditional(const DiscreteKeys &discreteParents, const Helper &helper); - /// Private constructor used when constants have already been calculated. - HybridGaussianConditional(const DiscreteKeys &discreteKeys, int nrFrontals, - const FactorValuePairs &factors, - double negLogConstant) - : BaseFactor(discreteKeys, factors), - BaseConditional(nrFrontals), - negLogConstant_(negLogConstant) {} - /// Check whether `given` has values for all frontal keys. bool allFrontalsGiven(const VectorValues &given) const; From a19fb28fab2fabc0a47f1fe49d97e2ed42cd4754 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 17 Oct 2024 02:06:10 +0900 Subject: [PATCH 153/204] Const correctness --- gtsam/discrete/DecisionTree-inl.h | 2 +- gtsam/discrete/DecisionTree.h | 2 +- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/gtsam/discrete/DecisionTree-inl.h b/gtsam/discrete/DecisionTree-inl.h index 434a6beac..1f50014d6 100644 --- a/gtsam/discrete/DecisionTree-inl.h +++ b/gtsam/discrete/DecisionTree-inl.h @@ -1060,7 +1060,7 @@ namespace gtsam { template template std::pair, DecisionTree> DecisionTree::split( - std::function(const Y&)> AB_of_Y) { + std::function(const Y&)> AB_of_Y) const { using AB = std::pair; const DecisionTree ab(*this, AB_of_Y); const DecisionTree a(ab, [](const AB& p) { return p.first; }); diff --git a/gtsam/discrete/DecisionTree.h b/gtsam/discrete/DecisionTree.h index 6a966e0dd..aba5b88b7 100644 --- a/gtsam/discrete/DecisionTree.h +++ b/gtsam/discrete/DecisionTree.h @@ -416,7 +416,7 @@ namespace gtsam { */ template std::pair, DecisionTree> split( - std::function(const Y&)> AB_of_Y); + std::function(const Y&)> AB_of_Y) const; /// @name Advanced Interface /// @{ diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 8e6123f10..361cb6e81 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -390,7 +390,7 @@ HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { }; // Perform elimination! - ResultTree eliminationResults(prunedProductFactor, eliminate); + const ResultTree eliminationResults(prunedProductFactor, eliminate); // If there are no more continuous parents we create a DiscreteFactor with the // error for each discrete choice. Otherwise, create a HybridGaussianFactor From 08167d08cc9230ed5615a623fabea6263ae16787 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 16 Oct 2024 14:41:22 -0700 Subject: [PATCH 154/204] Move constructor --- gtsam/discrete/DecisionTree-inl.h | 38 +++++++++++++++++++++-- gtsam/discrete/DecisionTree.h | 11 ++++++- gtsam/discrete/tests/testDecisionTree.cpp | 22 +++++++++++++ 3 files changed, 68 insertions(+), 3 deletions(-) diff --git a/gtsam/discrete/DecisionTree-inl.h b/gtsam/discrete/DecisionTree-inl.h index 1f50014d6..bda44bb9d 100644 --- a/gtsam/discrete/DecisionTree-inl.h +++ b/gtsam/discrete/DecisionTree-inl.h @@ -287,6 +287,10 @@ namespace gtsam { return branches_; } + std::vector& branches() { + return branches_; + } + /** add a branch: TODO merge into constructor */ void push_back(NodePtr&& node) { // allSame_ is restricted to leaf nodes in a decision tree @@ -555,6 +559,36 @@ namespace gtsam { root_ = compose(functions.begin(), functions.end(), label); } + /****************************************************************************/ + template + DecisionTree::DecisionTree(const Unary& op, + DecisionTree&& other) noexcept + : root_(std::move(other.root_)) { + // Apply the unary operation directly to each leaf in the tree + if (root_) { + // Define a helper function to traverse and apply the operation + struct ApplyUnary { + const Unary& op; + void operator()(typename DecisionTree::NodePtr& node) const { + if (auto leaf = std::dynamic_pointer_cast(node)) { + // Apply the unary operation to the leaf's constant value + leaf->constant_ = op(leaf->constant_); + } else if (auto choice = std::dynamic_pointer_cast(node)) { + // Recurse into the choice branches + for (NodePtr& branch : choice->branches()) { + (*this)(branch); + } + } + } + }; + + ApplyUnary applyUnary{op}; + applyUnary(root_); + } + // Reset the other tree's root to nullptr to avoid dangling references + other.root_ = nullptr; + } + /****************************************************************************/ template template @@ -695,7 +729,7 @@ namespace gtsam { typename DecisionTree::NodePtr DecisionTree::create( It begin, It end, ValueIt beginY, ValueIt endY) { auto node = build(begin, end, beginY, endY); - if (auto choice = std::dynamic_pointer_cast(node)) { + if (auto choice = std::dynamic_pointer_cast(node)) { return Choice::Unique(choice); } else { return node; @@ -711,7 +745,7 @@ namespace gtsam { // If leaf, apply unary conversion "op" and create a unique leaf. using LXLeaf = typename DecisionTree::Leaf; - if (auto leaf = std::dynamic_pointer_cast(f)) { + if (auto leaf = std::dynamic_pointer_cast(f)) { return NodePtr(new Leaf(Y_of_X(leaf->constant()))); } diff --git a/gtsam/discrete/DecisionTree.h b/gtsam/discrete/DecisionTree.h index aba5b88b7..486f798e9 100644 --- a/gtsam/discrete/DecisionTree.h +++ b/gtsam/discrete/DecisionTree.h @@ -85,7 +85,7 @@ namespace gtsam { /** ------------------------ Node base class --------------------------- */ struct Node { - using Ptr = std::shared_ptr; + using Ptr = std::shared_ptr; #ifdef DT_DEBUG_MEMORY static int nrNodes; @@ -228,6 +228,15 @@ namespace gtsam { DecisionTree(const L& label, const DecisionTree& f0, const DecisionTree& f1); + /** + * @brief Move constructor for DecisionTree. Very efficient as does not + * allocate anything, just changes in-place. But `other` is consumed. + * + * @param op The unary operation to apply to the moved DecisionTree. + * @param other The DecisionTree to move from, will be empty afterwards. + */ + DecisionTree(const Unary& op, DecisionTree&& other) noexcept; + /** * @brief Convert from a different value type. * diff --git a/gtsam/discrete/tests/testDecisionTree.cpp b/gtsam/discrete/tests/testDecisionTree.cpp index 1382fc704..526001b51 100644 --- a/gtsam/discrete/tests/testDecisionTree.cpp +++ b/gtsam/discrete/tests/testDecisionTree.cpp @@ -108,6 +108,7 @@ struct DT : public DecisionTree { std::cout << s; Base::print("", keyFormatter, valueFormatter); } + /// Equality method customized to int node type bool equals(const Base& other, double tol = 1e-9) const { auto compare = [](const int& v, const int& w) { return v == w; }; @@ -302,6 +303,27 @@ TEST(DecisionTree, Split) { } +/* ************************************************************************** */ +// Test that we can create a tree by modifying an rvalue. +TEST(DecisionTree, Consume) { + // Create labels + string A("A"), B("B"); + + // Create a decision tree + DT original(A, DT(B, 1, 2), DT(B, 3, 4)); + + DT modified([](int i){return i*2;}, std::move(original)); + + // Check the first resulting tree + EXPECT_LONGS_EQUAL(2, modified(Assignment{{A, 0}, {B, 0}})); + EXPECT_LONGS_EQUAL(4, modified(Assignment{{A, 0}, {B, 1}})); + EXPECT_LONGS_EQUAL(6, modified(Assignment{{A, 1}, {B, 0}})); + EXPECT_LONGS_EQUAL(8, modified(Assignment{{A, 1}, {B, 1}})); + + // Check original was moved + EXPECT(original.root_ == nullptr); +} + /* ************************************************************************** */ // test Conversion of values bool bool_of_int(const int& y) { return y != 0; }; From 8d4233587c2141ced4f8284f2496c2c287dfe07d Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 16 Oct 2024 14:45:41 -0700 Subject: [PATCH 155/204] Use move constructor --- gtsam/hybrid/HybridGaussianConditional.cpp | 16 ++++++++-------- gtsam/hybrid/HybridGaussianConditional.h | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index 76a09bcf5..a890b37b7 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -110,15 +110,15 @@ struct HybridGaussianConditional::Helper { /* *******************************************************************************/ HybridGaussianConditional::HybridGaussianConditional( - const DiscreteKeys &discreteParents, const Helper &helper) + const DiscreteKeys &discreteParents, Helper &&helper) : BaseFactor(discreteParents, - FactorValuePairs(helper.pairs, - [&](const GaussianFactorValuePair & - pair) { // subtract minNegLogConstant - return GaussianFactorValuePair{ - pair.first, - pair.second - helper.minNegLogConstant}; - })), + FactorValuePairs( + [&](const GaussianFactorValuePair + &pair) { // subtract minNegLogConstant + return GaussianFactorValuePair{ + pair.first, pair.second - helper.minNegLogConstant}; + }, + std::move(helper.pairs))), BaseConditional(*helper.nrFrontals), negLogConstant_(helper.minNegLogConstant) {} diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index c38d8733c..c485fafce 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -241,7 +241,7 @@ class GTSAM_EXPORT HybridGaussianConditional /// Private constructor that uses helper struct above. HybridGaussianConditional(const DiscreteKeys &discreteParents, - const Helper &helper); + Helper &&helper); /// Check whether `given` has values for all frontal keys. bool allFrontalsGiven(const VectorValues &given) const; From 1365a0904a5acda4ce33fc79c7207ec77beedc0e Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 17 Oct 2024 08:59:58 -0700 Subject: [PATCH 156/204] Avoid calculating negLogK twice --- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 65 ++++++++++++---------- 1 file changed, 35 insertions(+), 30 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 361cb6e81..00bf7955e 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -57,10 +57,20 @@ template class EliminateableFactorGraph; using std::dynamic_pointer_cast; using OrphanWrapper = BayesTreeOrphanWrapper; -using Result = - std::pair, GaussianFactor::shared_ptr>; -using ResultValuePair = std::pair; -using ResultTree = DecisionTree; + +/// Result from elimination. +struct Result { + GaussianConditional::shared_ptr conditional; + double negLogK; + GaussianFactor::shared_ptr factor; + double scalar; + + bool operator==(const Result &other) const { + return conditional == other.conditional && negLogK == other.negLogK && + factor == other.factor && scalar == other.scalar; + } +}; +using ResultTree = DecisionTree; static const VectorValues kEmpty; @@ -294,17 +304,14 @@ discreteElimination(const HybridGaussianFactorGraph &factors, static std::shared_ptr createDiscreteFactor( const ResultTree &eliminationResults, const DiscreteKeys &discreteSeparator) { - auto calculateError = [&](const auto &pair) -> double { - const auto &[conditional, factor] = pair.first; - const double scalar = pair.second; - if (conditional && factor) { + auto calculateError = [&](const Result &result) -> double { + if (result.conditional && result.factor) { // `error` has the following contributions: // - the scalar is the sum of all mode-dependent constants // - factor->error(kempty) is the error remaining after elimination // - negLogK is what is given to the conditional to normalize - const double negLogK = conditional->negLogConstant(); - return scalar + factor->error(kEmpty) - negLogK; - } else if (!conditional && !factor) { + return result.scalar + result.factor->error(kEmpty) - result.negLogK; + } else if (!result.conditional && !result.factor) { // If the factor has been pruned, return infinite error return std::numeric_limits::infinity(); } else { @@ -323,13 +330,10 @@ static std::shared_ptr createHybridGaussianFactor( const ResultTree &eliminationResults, const DiscreteKeys &discreteSeparator) { // Correct for the normalization constant used up by the conditional - auto correct = [&](const ResultValuePair &pair) -> GaussianFactorValuePair { - const auto &[conditional, factor] = pair.first; - const double scalar = pair.second; - if (conditional && factor) { - const double negLogK = conditional->negLogConstant(); - return {factor, scalar - negLogK}; - } else if (!conditional && !factor) { + auto correct = [&](const Result &result) -> GaussianFactorValuePair { + if (result.conditional && result.factor) { + return {result.factor, result.scalar - result.negLogK}; + } else if (!result.conditional && !result.factor) { return {nullptr, std::numeric_limits::infinity()}; } else { throw std::runtime_error("createHybridGaussianFactors has mixed NULLs"); @@ -370,23 +374,23 @@ HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { // This is the elimination method on the leaf nodes bool someContinuousLeft = false; - auto eliminate = [&](const std::pair &pair) - -> std::pair { + auto eliminate = + [&](const std::pair &pair) -> Result { const auto &[graph, scalar] = pair; if (graph.empty()) { - return {{nullptr, nullptr}, 0.0}; + return {nullptr, 0.0, nullptr, 0.0}; } // Expensive elimination of product factor. - auto result = + auto [conditional, factor] = EliminatePreferCholesky(graph, keys); /// <<<<<< MOST COMPUTE IS HERE // Record whether there any continuous variables left - someContinuousLeft |= !result.second->empty(); + someContinuousLeft |= !factor->empty(); // We pass on the scalar unmodified. - return {result, scalar}; + return {conditional, conditional->negLogConstant(), factor, scalar}; }; // Perform elimination! @@ -400,12 +404,13 @@ HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { ? createHybridGaussianFactor(eliminationResults, discreteSeparator) : createDiscreteFactor(eliminationResults, discreteSeparator); - // Create the HybridGaussianConditional from the conditionals - HybridGaussianConditional::Conditionals conditionals( - eliminationResults, - [](const ResultValuePair &pair) { return pair.first.first; }); - auto hybridGaussian = std::make_shared( - discreteSeparator, conditionals); + // Create the HybridGaussianConditional without re-calculating constants: + HybridGaussianConditional::FactorValuePairs pairs( + eliminationResults, [](const Result &result) -> GaussianFactorValuePair { + return {result.conditional, result.negLogK}; + }); + auto hybridGaussian = + std::make_shared(discreteSeparator, pairs); return {std::make_shared(hybridGaussian), newFactor}; } From 2c9665fae6497a342e6111b194453a7027cec686 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Mon, 21 Oct 2024 19:06:47 -0700 Subject: [PATCH 157/204] Prune graphs with nulls on the fly --- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 00bf7955e..bc3856fe7 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -367,10 +367,7 @@ HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { // any difference in noise models used. HybridGaussianProductFactor productFactor = collectProductFactor(); - // Convert factor graphs with a nullptr to an empty factor graph. - // This is done after assembly since it is non-trivial to keep track of which - // FG has a nullptr as we're looping over the factors. - auto prunedProductFactor = productFactor.removeEmpty(); + auto isNull = [](const GaussianFactor::shared_ptr& ptr) { return !ptr; }; // This is the elimination method on the leaf nodes bool someContinuousLeft = false; @@ -378,7 +375,7 @@ HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { [&](const std::pair &pair) -> Result { const auto &[graph, scalar] = pair; - if (graph.empty()) { + if (graph.empty() || std::any_of(graph.begin(), graph.end(), isNull)) { return {nullptr, 0.0, nullptr, 0.0}; } @@ -394,7 +391,7 @@ HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { }; // Perform elimination! - const ResultTree eliminationResults(prunedProductFactor, eliminate); + const ResultTree eliminationResults(productFactor, eliminate); // If there are no more continuous parents we create a DiscreteFactor with the // error for each discrete choice. Otherwise, create a HybridGaussianFactor From 25f90701c760599494fb51fd04096e447fbe9c51 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 22 Oct 2024 13:50:08 -0700 Subject: [PATCH 158/204] Change and simplify tests using {} --- gtsam/hybrid/HybridGaussianFactorGraph.h | 8 + gtsam/hybrid/tests/testHybridGaussianISAM.cpp | 147 ++++++------------ 2 files changed, 58 insertions(+), 97 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.h b/gtsam/hybrid/HybridGaussianFactorGraph.h index c503118eb..048fd2701 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.h +++ b/gtsam/hybrid/HybridGaussianFactorGraph.h @@ -132,6 +132,14 @@ class GTSAM_EXPORT HybridGaussianFactorGraph explicit HybridGaussianFactorGraph(const CONTAINER& factors) : Base(factors) {} + /** + * Construct from an initializer lists of GaussianFactor shared pointers. + * Example: + * HybridGaussianFactorGraph graph = { factor1, factor2, factor3 }; + */ + HybridGaussianFactorGraph(std::initializer_list factors) + : Base(factors) {} + /** * Implicit copy/downcast constructor to override explicit template container * constructor. In BayesTree this is used for: diff --git a/gtsam/hybrid/tests/testHybridGaussianISAM.cpp b/gtsam/hybrid/tests/testHybridGaussianISAM.cpp index 70fa321ad..11e3194f2 100644 --- a/gtsam/hybrid/tests/testHybridGaussianISAM.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianISAM.cpp @@ -10,7 +10,7 @@ * -------------------------------------------------------------------------- */ /** - * @file testHybridIncremental.cpp + * @file testHybridGaussianISAM.cpp * @brief Unit tests for incremental inference * @author Fan Jiang, Varun Agrawal, Frank Dellaert * @date Jan 2021 @@ -27,8 +27,6 @@ #include #include -#include - #include "Switching.h" // Include for test suite @@ -36,77 +34,63 @@ using namespace std; using namespace gtsam; -using noiseModel::Isotropic; -using symbol_shorthand::L; using symbol_shorthand::M; using symbol_shorthand::W; using symbol_shorthand::X; using symbol_shorthand::Y; using symbol_shorthand::Z; +/* ****************************************************************************/ +namespace switching3 { +// ϕ(x0) ϕ(x0,x1,m0) ϕ(x1,x2,m1) ϕ(x1;z1) ϕ(x2;z2) ϕ(m0) ϕ(m0,m1) +const Switching switching(3); +const HybridGaussianFactorGraph &lfg = switching.linearizedFactorGraph; + +// First update graph: ϕ(x0) ϕ(x0,x1,m0) ϕ(m0) +const HybridGaussianFactorGraph graph1{lfg.at(0), lfg.at(1), lfg.at(5)}; + +// Second update graph: ϕ(x1,x2,m1) ϕ(x1;z1) ϕ(x2;z2) ϕ(m0,m1) +const HybridGaussianFactorGraph graph2{lfg.at(2), lfg.at(3), lfg.at(4), + lfg.at(6)}; +} // namespace switching3 + /* ****************************************************************************/ // Test if we can perform elimination incrementally. TEST(HybridGaussianElimination, IncrementalElimination) { - Switching switching(3); + using namespace switching3; HybridGaussianISAM isam; - HybridGaussianFactorGraph graph1; - // Create initial factor graph - // * * * - // | | | - // X0 -*- X1 -*- X2 - // \*-M0-*/ - graph1.push_back(switching.linearizedFactorGraph.at(0)); // P(X0) - graph1.push_back(switching.linearizedFactorGraph.at(1)); // P(X0, X1 | M0) - graph1.push_back(switching.linearizedFactorGraph.at(2)); // P(X1, X2 | M1) - graph1.push_back(switching.linearizedFactorGraph.at(5)); // P(M0) - - // Run update step + // Run first update step isam.update(graph1); // Check that after update we have 2 hybrid Bayes net nodes: - // P(X0 | X1, M0) and P(X1, X2 | M0, M1), P(M0, M1) - EXPECT_LONGS_EQUAL(3, isam.size()); - EXPECT(isam[X(0)]->conditional()->frontals() == KeyVector{X(0)}); - EXPECT(isam[X(0)]->conditional()->parents() == KeyVector({X(1), M(0)})); - EXPECT(isam[X(1)]->conditional()->frontals() == KeyVector({X(1), X(2)})); - EXPECT(isam[X(1)]->conditional()->parents() == KeyVector({M(0), M(1)})); + // P(M0) and P(X0, X1 | M0) + EXPECT_LONGS_EQUAL(2, isam.size()); + EXPECT(isam[M(0)]->conditional()->frontals() == KeyVector({M(0)})); + EXPECT(isam[M(0)]->conditional()->parents() == KeyVector()); + EXPECT(isam[X(0)]->conditional()->frontals() == KeyVector({X(0), X(1)})); + EXPECT(isam[X(0)]->conditional()->parents() == KeyVector({M(0)})); /********************************************************/ - // New factor graph for incremental update. - HybridGaussianFactorGraph graph2; - - graph1.push_back(switching.linearizedFactorGraph.at(3)); // P(X1) - graph2.push_back(switching.linearizedFactorGraph.at(4)); // P(X2) - graph2.push_back(switching.linearizedFactorGraph.at(6)); // P(M0, M1) - + // Run second update step isam.update(graph2); - // Check that after the second update we have - // 1 additional hybrid Bayes net node: - // P(X1, X2 | M0, M1) + // Check that after update we have 3 hybrid Bayes net nodes: + // P(X1, X2 | M0, M1) P(X1, X2 | M0, M1) EXPECT_LONGS_EQUAL(3, isam.size()); - EXPECT(isam[X(2)]->conditional()->frontals() == KeyVector({X(1), X(2)})); - EXPECT(isam[X(2)]->conditional()->parents() == KeyVector({M(0), M(1)})); + EXPECT(isam[M(0)]->conditional()->frontals() == KeyVector({M(0), M(1)})); + EXPECT(isam[M(0)]->conditional()->parents() == KeyVector()); + EXPECT(isam[X(1)]->conditional()->frontals() == KeyVector({X(1), X(2)})); + EXPECT(isam[X(1)]->conditional()->parents() == KeyVector({M(0), M(1)})); + EXPECT(isam[X(0)]->conditional()->frontals() == KeyVector{X(0)}); + EXPECT(isam[X(0)]->conditional()->parents() == KeyVector({X(1), M(0)})); } /* ****************************************************************************/ // Test if we can incrementally do the inference TEST(HybridGaussianElimination, IncrementalInference) { - Switching switching(3); + using namespace switching3; HybridGaussianISAM isam; - HybridGaussianFactorGraph graph1; - - // Create initial factor graph - // * * * - // | | | - // X0 -*- X1 -*- X2 - // | | - // *-M0 - * - M1 - graph1.push_back(switching.linearizedFactorGraph.at(0)); // P(X0) - graph1.push_back(switching.linearizedFactorGraph.at(1)); // P(X0, X1 | M0) - graph1.push_back(switching.linearizedFactorGraph.at(3)); // P(X1) - graph1.push_back(switching.linearizedFactorGraph.at(5)); // P(M0) // Run update step isam.update(graph1); @@ -115,13 +99,7 @@ TEST(HybridGaussianElimination, IncrementalInference) { EXPECT(discreteConditional_m0->keys() == KeyVector({M(0)})); /********************************************************/ - // New factor graph for incremental update. - HybridGaussianFactorGraph graph2; - - graph2.push_back(switching.linearizedFactorGraph.at(2)); // P(X1, X2 | M1) - graph2.push_back(switching.linearizedFactorGraph.at(4)); // P(X2) - graph2.push_back(switching.linearizedFactorGraph.at(6)); // P(M0, M1) - + // Second incremental update. isam.update(graph2); /********************************************************/ @@ -160,44 +138,19 @@ TEST(HybridGaussianElimination, IncrementalInference) { // The other discrete probabilities on M(2) are calculated the same way const Ordering discreteOrdering{M(0), M(1)}; HybridBayesTree::shared_ptr discreteBayesTree = - expectedRemainingGraph->BaseEliminateable::eliminateMultifrontal( - discreteOrdering); - - DiscreteValues m00; - m00[M(0)] = 0, m00[M(1)] = 0; - DiscreteConditional decisionTree = - *(*discreteBayesTree)[M(1)]->conditional()->asDiscrete(); - double m00_prob = decisionTree(m00); - - auto discreteConditional = isam[M(1)]->conditional()->asDiscrete(); + expectedRemainingGraph->eliminateMultifrontal(discreteOrdering); // Test the probability values with regression tests. - DiscreteValues assignment; - EXPECT(assert_equal(0.0952922, m00_prob, 1e-5)); - assignment[M(0)] = 0; - assignment[M(1)] = 0; - EXPECT(assert_equal(0.0952922, (*discreteConditional)(assignment), 1e-5)); - assignment[M(0)] = 1; - assignment[M(1)] = 0; - EXPECT(assert_equal(0.282758, (*discreteConditional)(assignment), 1e-5)); - assignment[M(0)] = 0; - assignment[M(1)] = 1; - EXPECT(assert_equal(0.314175, (*discreteConditional)(assignment), 1e-5)); - assignment[M(0)] = 1; - assignment[M(1)] = 1; - EXPECT(assert_equal(0.307775, (*discreteConditional)(assignment), 1e-5)); + auto discrete = isam[M(1)]->conditional()->asDiscrete(); + EXPECT(assert_equal(0.095292, (*discrete)({{M(0), 0}, {M(1), 0}}), 1e-5)); + EXPECT(assert_equal(0.282758, (*discrete)({{M(0), 1}, {M(1), 0}}), 1e-5)); + EXPECT(assert_equal(0.314175, (*discrete)({{M(0), 0}, {M(1), 1}}), 1e-5)); + EXPECT(assert_equal(0.307775, (*discrete)({{M(0), 1}, {M(1), 1}}), 1e-5)); - // Check if the clique conditional generated from incremental elimination + // Check that the clique conditional generated from incremental elimination // matches that of batch elimination. - auto expectedChordal = - expectedRemainingGraph->BaseEliminateable::eliminateMultifrontal(); - auto actualConditional = dynamic_pointer_cast( - isam[M(1)]->conditional()->inner()); - // Account for the probability terms from evaluating continuous FGs - DiscreteKeys discrete_keys = {{M(0), 2}, {M(1), 2}}; - vector probs = {0.095292197, 0.31417524, 0.28275772, 0.30777485}; - auto expectedConditional = - std::make_shared(discrete_keys, probs); + auto expectedConditional = (*discreteBayesTree)[M(1)]->conditional(); + auto actualConditional = isam[M(1)]->conditional(); EXPECT(assert_equal(*expectedConditional, *actualConditional, 1e-6)); } @@ -227,7 +180,7 @@ TEST(HybridGaussianElimination, Approx_inference) { } // Now we calculate the actual factors using full elimination - const auto [unprunedHybridBayesTree, unprunedRemainingGraph] = + const auto [unPrunedHybridBayesTree, unPrunedRemainingGraph] = switching.linearizedFactorGraph.eliminatePartialMultifrontal(ordering); size_t maxNrLeaves = 5; @@ -236,7 +189,7 @@ TEST(HybridGaussianElimination, Approx_inference) { incrementalHybrid.prune(maxNrLeaves); /* - unpruned factor is: + unPruned factor is: Choice(m3) 0 Choice(m2) 0 0 Choice(m1) @@ -282,8 +235,8 @@ TEST(HybridGaussianElimination, Approx_inference) { // Check that the hybrid nodes of the bayes net match those of the pre-pruning // bayes net, at the same positions. - auto &unprunedLastDensity = *dynamic_pointer_cast( - unprunedHybridBayesTree->clique(X(3))->conditional()->inner()); + auto &unPrunedLastDensity = *dynamic_pointer_cast( + unPrunedHybridBayesTree->clique(X(3))->conditional()->inner()); auto &lastDensity = *dynamic_pointer_cast( incrementalHybrid[X(3)]->conditional()->inner()); @@ -298,7 +251,7 @@ TEST(HybridGaussianElimination, Approx_inference) { EXPECT(lastDensity(assignment) == nullptr); } else { CHECK(lastDensity(assignment)); - EXPECT(assert_equal(*unprunedLastDensity(assignment), + EXPECT(assert_equal(*unPrunedLastDensity(assignment), *lastDensity(assignment))); } } @@ -306,7 +259,7 @@ TEST(HybridGaussianElimination, Approx_inference) { /* ****************************************************************************/ // Test approximate inference with an additional pruning step. -TEST(HybridGaussianElimination, Incremental_approximate) { +TEST(HybridGaussianElimination, IncrementalApproximate) { Switching switching(5); HybridGaussianISAM incrementalHybrid; HybridGaussianFactorGraph graph1; @@ -330,7 +283,7 @@ TEST(HybridGaussianElimination, Incremental_approximate) { incrementalHybrid.prune(maxComponents); // Check if we have a bayes tree with 4 hybrid nodes, - // each with 2, 4, 8, and 5 (pruned) leaves respetively. + // each with 2, 4, 8, and 5 (pruned) leaves respectively. EXPECT_LONGS_EQUAL(4, incrementalHybrid.size()); EXPECT_LONGS_EQUAL( 2, incrementalHybrid[X(0)]->conditional()->asHybrid()->nrComponents()); From 5145b8c47a39a1ad41596c3a5469d6a884b21010 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 22 Oct 2024 13:50:39 -0700 Subject: [PATCH 159/204] formatting/spelling only --- gtsam/hybrid/HybridGaussianISAM.cpp | 2 +- gtsam/hybrid/HybridNonlinearISAM.cpp | 8 ++++---- gtsam/hybrid/HybridNonlinearISAM.h | 10 +++++----- gtsam/hybrid/tests/testHybridEstimation.cpp | 16 ++++------------ 4 files changed, 14 insertions(+), 22 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianISAM.cpp b/gtsam/hybrid/HybridGaussianISAM.cpp index 6f8b7b9ff..28116df45 100644 --- a/gtsam/hybrid/HybridGaussianISAM.cpp +++ b/gtsam/hybrid/HybridGaussianISAM.cpp @@ -10,7 +10,7 @@ * -------------------------------------------------------------------------- */ /** - * @file HybridGaussianISAM.h + * @file HybridGaussianISAM.cpp * @date March 31, 2022 * @author Fan Jiang * @author Frank Dellaert diff --git a/gtsam/hybrid/HybridNonlinearISAM.cpp b/gtsam/hybrid/HybridNonlinearISAM.cpp index ec6ebf4d0..503afaa72 100644 --- a/gtsam/hybrid/HybridNonlinearISAM.cpp +++ b/gtsam/hybrid/HybridNonlinearISAM.cpp @@ -39,8 +39,8 @@ void HybridNonlinearISAM::update(const HybridNonlinearFactorGraph& newFactors, if (newFactors.size() > 0) { // Reorder and relinearize every reorderInterval updates if (reorderInterval_ > 0 && ++reorderCounter_ >= reorderInterval_) { - // TODO(Varun) Relinearization doesn't take into account pruning - reorder_relinearize(); + // TODO(Varun) Re-linearization doesn't take into account pruning + reorderRelinearize(); reorderCounter_ = 0; } @@ -60,7 +60,7 @@ void HybridNonlinearISAM::update(const HybridNonlinearFactorGraph& newFactors, } /* ************************************************************************* */ -void HybridNonlinearISAM::reorder_relinearize() { +void HybridNonlinearISAM::reorderRelinearize() { if (factors_.size() > 0) { // Obtain the new linearization point const Values newLinPoint = estimate(); @@ -69,7 +69,7 @@ void HybridNonlinearISAM::reorder_relinearize() { // Just recreate the whole BayesTree // TODO: allow for constrained ordering here - // TODO: decouple relinearization and reordering to avoid + // TODO: decouple re-linearization and reordering to avoid isam_.update(*factors_.linearize(newLinPoint), {}, {}, eliminationFunction_); diff --git a/gtsam/hybrid/HybridNonlinearISAM.h b/gtsam/hybrid/HybridNonlinearISAM.h index e41b4ebe1..686a08327 100644 --- a/gtsam/hybrid/HybridNonlinearISAM.h +++ b/gtsam/hybrid/HybridNonlinearISAM.h @@ -37,7 +37,7 @@ class GTSAM_EXPORT HybridNonlinearISAM { /// The discrete assignment DiscreteValues assignment_; - /** The original factors, used when relinearizing */ + /** The original factors, used when re-linearizing */ HybridNonlinearFactorGraph factors_; /** The reordering interval and counter */ @@ -52,8 +52,8 @@ class GTSAM_EXPORT HybridNonlinearISAM { /// @{ /** - * Periodically reorder and relinearize - * @param reorderInterval is the number of updates between reorderings, + * Periodically reorder and re-linearize + * @param reorderInterval is the number of updates between re-orderings, * 0 never reorders (and is dangerous for memory consumption) * 1 (default) reorders every time, in worse case is batch every update * typical values are 50 or 100 @@ -124,8 +124,8 @@ class GTSAM_EXPORT HybridNonlinearISAM { const std::optional& maxNrLeaves = {}, const std::optional& ordering = {}); - /** Relinearization and reordering of variables */ - void reorder_relinearize(); + /** Re-linearization and reordering of variables */ + void reorderRelinearize(); /// @} }; diff --git a/gtsam/hybrid/tests/testHybridEstimation.cpp b/gtsam/hybrid/tests/testHybridEstimation.cpp index 6206e5f45..f972e7cac 100644 --- a/gtsam/hybrid/tests/testHybridEstimation.cpp +++ b/gtsam/hybrid/tests/testHybridEstimation.cpp @@ -339,12 +339,8 @@ TEST(HybridEstimation, Probability) { HybridValues hybrid_values = bayesNet->optimize(); // This is the correct sequence as designed - DiscreteValues discrete_seq; - discrete_seq[M(0)] = 1; - discrete_seq[M(1)] = 1; - discrete_seq[M(2)] = 0; - - EXPECT(assert_equal(discrete_seq, hybrid_values.discrete())); + DiscreteValues expectedSequence{{M(0), 1}, {M(1), 1}, {M(2), 0}}; + EXPECT(assert_equal(expectedSequence, hybrid_values.discrete())); } /****************************************************************************/ @@ -411,12 +407,8 @@ TEST(HybridEstimation, ProbabilityMultifrontal) { HybridValues hybrid_values = discreteBayesTree->optimize(); // This is the correct sequence as designed - DiscreteValues discrete_seq; - discrete_seq[M(0)] = 1; - discrete_seq[M(1)] = 1; - discrete_seq[M(2)] = 0; - - EXPECT(assert_equal(discrete_seq, hybrid_values.discrete())); + DiscreteValues expectedSequence{{M(0), 1}, {M(1), 1}, {M(2), 0}}; + EXPECT(assert_equal(expectedSequence, hybrid_values.discrete())); } /********************************************************************************* From 777f0d25ad8f2d09017fcb0d8d0bb939476c2d0b Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Tue, 22 Oct 2024 15:39:14 -0700 Subject: [PATCH 160/204] Restrict dangerous flat access --- gtsam/hybrid/tests/Switching.h | 40 ++++-- gtsam/hybrid/tests/testHybridEstimation.cpp | 37 +++--- .../tests/testHybridNonlinearFactorGraph.cpp | 15 +-- .../hybrid/tests/testHybridNonlinearISAM.cpp | 114 +++++++----------- 4 files changed, 102 insertions(+), 104 deletions(-) diff --git a/gtsam/hybrid/tests/Switching.h b/gtsam/hybrid/tests/Switching.h index 64025397f..fdf9092b6 100644 --- a/gtsam/hybrid/tests/Switching.h +++ b/gtsam/hybrid/tests/Switching.h @@ -120,12 +120,21 @@ using MotionModel = BetweenFactor; // Test fixture with switching network. /// ϕ(X(0)) .. ϕ(X(k),X(k+1)) .. ϕ(X(k);z_k) .. ϕ(M(0)) .. ϕ(M(K-3),M(K-2)) struct Switching { + private: + HybridNonlinearFactorGraph nonlinearFactorGraph_; + + public: size_t K; DiscreteKeys modes; - HybridNonlinearFactorGraph nonlinearFactorGraph; + HybridNonlinearFactorGraph unaryFactors, binaryFactors, modeChain; HybridGaussianFactorGraph linearizedFactorGraph; Values linearizationPoint; + // Access the flat nonlinear factor graph. + const HybridNonlinearFactorGraph &nonlinearFactorGraph() const { + return nonlinearFactorGraph_; + } + /** * @brief Create with given number of time steps. * @@ -136,7 +145,7 @@ struct Switching { */ Switching(size_t K, double between_sigma = 1.0, double prior_sigma = 0.1, std::vector measurements = {}, - std::string discrete_transition_prob = "1/2 3/2") + std::string transitionProbabilityTable = "1/2 3/2") : K(K) { using noiseModel::Isotropic; @@ -155,32 +164,36 @@ struct Switching { // Create hybrid factor graph. // Add a prior ϕ(X(0)) on X(0). - nonlinearFactorGraph.emplace_shared>( + nonlinearFactorGraph_.emplace_shared>( X(0), measurements.at(0), Isotropic::Sigma(1, prior_sigma)); + unaryFactors.push_back(nonlinearFactorGraph_.back()); // Add "motion models" ϕ(X(k),X(k+1),M(k)). for (size_t k = 0; k < K - 1; k++) { auto motion_models = motionModels(k, between_sigma); - nonlinearFactorGraph.emplace_shared(modes[k], + nonlinearFactorGraph_.emplace_shared(modes[k], motion_models); + binaryFactors.push_back(nonlinearFactorGraph_.back()); } // Add measurement factors ϕ(X(k);z_k). auto measurement_noise = Isotropic::Sigma(1, prior_sigma); for (size_t k = 1; k < K; k++) { - nonlinearFactorGraph.emplace_shared>( + nonlinearFactorGraph_.emplace_shared>( X(k), measurements.at(k), measurement_noise); + unaryFactors.push_back(nonlinearFactorGraph_.back()); } // Add "mode chain" ϕ(M(0)) ϕ(M(0),M(1)) ... ϕ(M(K-3),M(K-2)) - addModeChain(&nonlinearFactorGraph, discrete_transition_prob); + modeChain = createModeChain(transitionProbabilityTable); + nonlinearFactorGraph_ += modeChain; // Create the linearization point. for (size_t k = 0; k < K; k++) { linearizationPoint.insert(X(k), static_cast(k + 1)); } - linearizedFactorGraph = *nonlinearFactorGraph.linearize(linearizationPoint); + linearizedFactorGraph = *nonlinearFactorGraph_.linearize(linearizationPoint); } // Create motion models for a given time step @@ -200,15 +213,16 @@ struct Switching { * * @param fg The factor graph to which the mode chain is added. */ - template - void addModeChain(FACTORGRAPH *fg, - std::string discrete_transition_prob = "1/2 3/2") { - fg->template emplace_shared(modes[0], "1/1"); + HybridNonlinearFactorGraph createModeChain( + std::string transitionProbabilityTable = "1/2 3/2") { + HybridNonlinearFactorGraph chain; + chain.emplace_shared(modes[0], "1/1"); for (size_t k = 0; k < K - 2; k++) { auto parents = {modes[k]}; - fg->template emplace_shared( - modes[k + 1], parents, discrete_transition_prob); + chain.emplace_shared(modes[k + 1], parents, + transitionProbabilityTable); } + return chain; } }; diff --git a/gtsam/hybrid/tests/testHybridEstimation.cpp b/gtsam/hybrid/tests/testHybridEstimation.cpp index f972e7cac..9c2580a17 100644 --- a/gtsam/hybrid/tests/testHybridEstimation.cpp +++ b/gtsam/hybrid/tests/testHybridEstimation.cpp @@ -37,6 +37,8 @@ // Include for test suite #include +#include + #include "Switching.h" using namespace std; @@ -55,13 +57,16 @@ std::vector discrete_seq = {1, 1, 0, 0, 0, 1, 1, 1, 1, 0, Switching InitializeEstimationProblem( const size_t K, const double between_sigma, const double measurement_sigma, const std::vector& measurements, - const std::string& discrete_transition_prob, + const std::string& transitionProbabilityTable, HybridNonlinearFactorGraph& graph, Values& initial) { Switching switching(K, between_sigma, measurement_sigma, measurements, - discrete_transition_prob); + transitionProbabilityTable); + + // Add prior on M(0) + graph.push_back(switching.modeChain.at(0)); // Add the X(0) prior - graph.push_back(switching.nonlinearFactorGraph.at(0)); + graph.push_back(switching.unaryFactors.at(0)); initial.insert(X(0), switching.linearizationPoint.at(X(0))); return switching; @@ -128,10 +133,9 @@ TEST(HybridEstimation, IncrementalSmoother) { constexpr size_t maxNrLeaves = 3; for (size_t k = 1; k < K; k++) { - // Motion Model - graph.push_back(switching.nonlinearFactorGraph.at(k)); - // Measurement - graph.push_back(switching.nonlinearFactorGraph.at(k + K - 1)); + if (k > 1) graph.push_back(switching.modeChain.at(k - 1)); // Mode chain + graph.push_back(switching.binaryFactors.at(k - 1)); // Motion Model + graph.push_back(switching.unaryFactors.at(k)); // Measurement initial.insert(X(k), switching.linearizationPoint.at(X(k))); @@ -176,10 +180,9 @@ TEST(HybridEstimation, ValidPruningError) { constexpr size_t maxNrLeaves = 3; for (size_t k = 1; k < K; k++) { - // Motion Model - graph.push_back(switching.nonlinearFactorGraph.at(k)); - // Measurement - graph.push_back(switching.nonlinearFactorGraph.at(k + K - 1)); + if (k > 1) graph.push_back(switching.modeChain.at(k - 1)); // Mode chain + graph.push_back(switching.binaryFactors.at(k - 1)); // Motion Model + graph.push_back(switching.unaryFactors.at(k)); // Measurement initial.insert(X(k), switching.linearizationPoint.at(X(k))); @@ -225,15 +228,17 @@ TEST(HybridEstimation, ISAM) { HybridGaussianFactorGraph linearized; + const size_t maxNrLeaves = 3; for (size_t k = 1; k < K; k++) { - // Motion Model - graph.push_back(switching.nonlinearFactorGraph.at(k)); - // Measurement - graph.push_back(switching.nonlinearFactorGraph.at(k + K - 1)); + if (k > 1) graph.push_back(switching.modeChain.at(k - 1)); // Mode chain + graph.push_back(switching.binaryFactors.at(k - 1)); // Motion Model + graph.push_back(switching.unaryFactors.at(k)); // Measurement initial.insert(X(k), switching.linearizationPoint.at(X(k))); - isam.update(graph, initial, 3); + isam.update(graph, initial, maxNrLeaves); + // isam.saveGraph("NLiSAM" + std::to_string(k) + ".dot"); + // GTSAM_PRINT(isam); graph.resize(0); initial.clear(); diff --git a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp index e77476e25..dd4128034 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp @@ -216,8 +216,8 @@ TEST(HybridNonlinearFactorGraph, PushBack) { TEST(HybridNonlinearFactorGraph, ErrorTree) { Switching s(3); - HybridNonlinearFactorGraph graph = s.nonlinearFactorGraph; - Values values = s.linearizationPoint; + const HybridNonlinearFactorGraph &graph = s.nonlinearFactorGraph(); + const Values &values = s.linearizationPoint; auto error_tree = graph.errorTree(s.linearizationPoint); @@ -248,7 +248,7 @@ TEST(HybridNonlinearFactorGraph, ErrorTree) { TEST(HybridNonlinearFactorGraph, Switching) { Switching self(3); - EXPECT_LONGS_EQUAL(7, self.nonlinearFactorGraph.size()); + EXPECT_LONGS_EQUAL(7, self.nonlinearFactorGraph().size()); EXPECT_LONGS_EQUAL(7, self.linearizedFactorGraph.size()); } @@ -260,7 +260,7 @@ TEST(HybridNonlinearFactorGraph, Linearization) { // Linearize here: HybridGaussianFactorGraph actualLinearized = - *self.nonlinearFactorGraph.linearize(self.linearizationPoint); + *self.nonlinearFactorGraph().linearize(self.linearizationPoint); EXPECT_LONGS_EQUAL(7, actualLinearized.size()); } @@ -409,7 +409,7 @@ TEST(HybridNonlinearFactorGraph, Partial_Elimination) { /* ****************************************************************************/ TEST(HybridNonlinearFactorGraph, Error) { Switching self(3); - HybridNonlinearFactorGraph fg = self.nonlinearFactorGraph; + HybridNonlinearFactorGraph fg = self.nonlinearFactorGraph(); { HybridValues values(VectorValues(), DiscreteValues{{M(0), 0}, {M(1), 0}}, @@ -441,8 +441,9 @@ TEST(HybridNonlinearFactorGraph, Error) { TEST(HybridNonlinearFactorGraph, PrintErrors) { Switching self(3); - // Get nonlinear factor graph and add linear factors to be holistic - HybridNonlinearFactorGraph fg = self.nonlinearFactorGraph; + // Get nonlinear factor graph and add linear factors to be holistic. + // TODO(Frank): ??? + HybridNonlinearFactorGraph fg = self.nonlinearFactorGraph(); fg.add(self.linearizedFactorGraph); // Optimize to get HybridValues diff --git a/gtsam/hybrid/tests/testHybridNonlinearISAM.cpp b/gtsam/hybrid/tests/testHybridNonlinearISAM.cpp index b804a176b..c5f52e3eb 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearISAM.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearISAM.cpp @@ -57,10 +57,10 @@ TEST(HybridNonlinearISAM, IncrementalElimination) { // | | | // X0 -*- X1 -*- X2 // \*-M0-*/ - graph1.push_back(switching.nonlinearFactorGraph.at(0)); // P(X0) - graph1.push_back(switching.nonlinearFactorGraph.at(1)); // P(X0, X1 | M0) - graph1.push_back(switching.nonlinearFactorGraph.at(2)); // P(X1, X2 | M1) - graph1.push_back(switching.nonlinearFactorGraph.at(5)); // P(M0) + graph1.push_back(switching.unaryFactors.at(0)); // P(X0) + graph1.push_back(switching.binaryFactors.at(0)); // P(X0, X1 | M0) + graph1.push_back(switching.binaryFactors.at(1)); // P(X1, X2 | M1) + graph1.push_back(switching.modeChain.at(0)); // P(M0) initial.insert(X(0), 1); initial.insert(X(1), 2); @@ -83,9 +83,9 @@ TEST(HybridNonlinearISAM, IncrementalElimination) { HybridNonlinearFactorGraph graph2; initial = Values(); - graph1.push_back(switching.nonlinearFactorGraph.at(3)); // P(X1) - graph2.push_back(switching.nonlinearFactorGraph.at(4)); // P(X2) - graph2.push_back(switching.nonlinearFactorGraph.at(6)); // P(M0, M1) + graph1.push_back(switching.unaryFactors.at(1)); // P(X1) + graph2.push_back(switching.unaryFactors.at(2)); // P(X2) + graph2.push_back(switching.modeChain.at(1)); // P(M0, M1) isam.update(graph2, initial); @@ -112,10 +112,10 @@ TEST(HybridNonlinearISAM, IncrementalInference) { // X0 -*- X1 -*- X2 // | | // *-M0 - * - M1 - graph1.push_back(switching.nonlinearFactorGraph.at(0)); // P(X0) - graph1.push_back(switching.nonlinearFactorGraph.at(1)); // P(X0, X1 | M0) - graph1.push_back(switching.nonlinearFactorGraph.at(3)); // P(X1) - graph1.push_back(switching.nonlinearFactorGraph.at(5)); // P(M0) + graph1.push_back(switching.unaryFactors.at(0)); // P(X0) + graph1.push_back(switching.binaryFactors.at(0)); // P(X0, X1 | M0) + graph1.push_back(switching.unaryFactors.at(1)); // P(X1) + graph1.push_back(switching.modeChain.at(0)); // P(M0) initial.insert(X(0), 1); initial.insert(X(1), 2); @@ -134,9 +134,9 @@ TEST(HybridNonlinearISAM, IncrementalInference) { initial.insert(X(2), 3); - graph2.push_back(switching.nonlinearFactorGraph.at(2)); // P(X1, X2 | M1) - graph2.push_back(switching.nonlinearFactorGraph.at(4)); // P(X2) - graph2.push_back(switching.nonlinearFactorGraph.at(6)); // P(M0, M1) + graph2.push_back(switching.binaryFactors.at(1)); // P(X1, X2 | M1) + graph2.push_back(switching.unaryFactors.at(2)); // P(X2) + graph2.push_back(switching.modeChain.at(1)); // P(M0, M1) isam.update(graph2, initial); bayesTree = isam.bayesTree(); @@ -175,46 +175,22 @@ TEST(HybridNonlinearISAM, IncrementalInference) { EXPECT(assert_equal(*x2_conditional, *expected_x2_conditional)); // We only perform manual continuous elimination for 0,0. - // The other discrete probabilities on M(1) are calculated the same way + // The other discrete probabilities on M(2) are calculated the same way const Ordering discreteOrdering{M(0), M(1)}; HybridBayesTree::shared_ptr discreteBayesTree = - expectedRemainingGraph->BaseEliminateable::eliminateMultifrontal( - discreteOrdering); - - DiscreteValues m00; - m00[M(0)] = 0, m00[M(1)] = 0; - DiscreteConditional decisionTree = - *(*discreteBayesTree)[M(1)]->conditional()->asDiscrete(); - double m00_prob = decisionTree(m00); - - auto discreteConditional = bayesTree[M(1)]->conditional()->asDiscrete(); + expectedRemainingGraph->eliminateMultifrontal(discreteOrdering); // Test the probability values with regression tests. - DiscreteValues assignment; - EXPECT(assert_equal(0.0952922, m00_prob, 1e-5)); - assignment[M(0)] = 0; - assignment[M(1)] = 0; - EXPECT(assert_equal(0.0952922, (*discreteConditional)(assignment), 1e-5)); - assignment[M(0)] = 1; - assignment[M(1)] = 0; - EXPECT(assert_equal(0.282758, (*discreteConditional)(assignment), 1e-5)); - assignment[M(0)] = 0; - assignment[M(1)] = 1; - EXPECT(assert_equal(0.314175, (*discreteConditional)(assignment), 1e-5)); - assignment[M(0)] = 1; - assignment[M(1)] = 1; - EXPECT(assert_equal(0.307775, (*discreteConditional)(assignment), 1e-5)); + auto discrete = bayesTree[M(1)]->conditional()->asDiscrete(); + EXPECT(assert_equal(0.095292, (*discrete)({{M(0), 0}, {M(1), 0}}), 1e-5)); + EXPECT(assert_equal(0.282758, (*discrete)({{M(0), 1}, {M(1), 0}}), 1e-5)); + EXPECT(assert_equal(0.314175, (*discrete)({{M(0), 0}, {M(1), 1}}), 1e-5)); + EXPECT(assert_equal(0.307775, (*discrete)({{M(0), 1}, {M(1), 1}}), 1e-5)); - // Check if the clique conditional generated from incremental elimination + // Check that the clique conditional generated from incremental elimination // matches that of batch elimination. - auto expectedChordal = expectedRemainingGraph->eliminateMultifrontal(); - auto actualConditional = dynamic_pointer_cast( - bayesTree[M(1)]->conditional()->inner()); - // Account for the probability terms from evaluating continuous FGs - DiscreteKeys discrete_keys = {{M(0), 2}, {M(1), 2}}; - vector probs = {0.095292197, 0.31417524, 0.28275772, 0.30777485}; - auto expectedConditional = - std::make_shared(discrete_keys, probs); + auto expectedConditional = (*discreteBayesTree)[M(1)]->conditional(); + auto actualConditional = bayesTree[M(1)]->conditional(); EXPECT(assert_equal(*expectedConditional, *actualConditional, 1e-6)); } @@ -227,18 +203,19 @@ TEST(HybridNonlinearISAM, Approx_inference) { Values initial; // Add the 3 hybrid factors, x0-x1, x1-x2, x2-x3 - for (size_t i = 1; i < 4; i++) { - graph1.push_back(switching.nonlinearFactorGraph.at(i)); + for (size_t i = 0; i < 3; i++) { + graph1.push_back(switching.binaryFactors.at(i)); } // Add the Gaussian factors, 1 prior on X(0), // 3 measurements on X(1), X(2), X(3) - graph1.push_back(switching.nonlinearFactorGraph.at(0)); - for (size_t i = 4; i <= 7; i++) { - graph1.push_back(switching.nonlinearFactorGraph.at(i)); - initial.insert(X(i - 4), i - 3); + for (size_t i = 0; i < 4; i++) { + graph1.push_back(switching.unaryFactors.at(i)); + initial.insert(X(i), i + 1); } + // TODO(Frank): no mode chain? + // Create ordering. Ordering ordering; for (size_t j = 0; j < 4; j++) { @@ -246,7 +223,7 @@ TEST(HybridNonlinearISAM, Approx_inference) { } // Now we calculate the actual factors using full elimination - const auto [unprunedHybridBayesTree, unprunedRemainingGraph] = + const auto [unPrunedHybridBayesTree, unPrunedRemainingGraph] = switching.linearizedFactorGraph .BaseEliminateable::eliminatePartialMultifrontal(ordering); @@ -257,7 +234,7 @@ TEST(HybridNonlinearISAM, Approx_inference) { bayesTree.prune(maxNrLeaves); /* - unpruned factor is: + unPruned factor is: Choice(m3) 0 Choice(m2) 0 0 Choice(m1) @@ -303,8 +280,8 @@ TEST(HybridNonlinearISAM, Approx_inference) { // Check that the hybrid nodes of the bayes net match those of the pre-pruning // bayes net, at the same positions. - auto &unprunedLastDensity = *dynamic_pointer_cast( - unprunedHybridBayesTree->clique(X(3))->conditional()->inner()); + auto &unPrunedLastDensity = *dynamic_pointer_cast( + unPrunedHybridBayesTree->clique(X(3))->conditional()->inner()); auto &lastDensity = *dynamic_pointer_cast( bayesTree[X(3)]->conditional()->inner()); @@ -319,7 +296,7 @@ TEST(HybridNonlinearISAM, Approx_inference) { EXPECT(lastDensity(assignment) == nullptr); } else { CHECK(lastDensity(assignment)); - EXPECT(assert_equal(*unprunedLastDensity(assignment), + EXPECT(assert_equal(*unPrunedLastDensity(assignment), *lastDensity(assignment))); } } @@ -335,19 +312,20 @@ TEST(HybridNonlinearISAM, Incremental_approximate) { /***** Run Round 1 *****/ // Add the 3 hybrid factors, x0-x1, x1-x2, x2-x3 - for (size_t i = 1; i < 4; i++) { - graph1.push_back(switching.nonlinearFactorGraph.at(i)); + for (size_t i = 0; i < 3; i++) { + graph1.push_back(switching.binaryFactors.at(i)); } // Add the Gaussian factors, 1 prior on X(0), // 3 measurements on X(1), X(2), X(3) - graph1.push_back(switching.nonlinearFactorGraph.at(0)); - initial.insert(X(0), 1); - for (size_t i = 5; i <= 7; i++) { - graph1.push_back(switching.nonlinearFactorGraph.at(i)); - initial.insert(X(i - 4), i - 3); + for (size_t i = 0; i < 4; i++) { + graph1.push_back(switching.unaryFactors.at(i)); + initial.insert(X(i), i + 1); } + // TODO(Frank): no mode chain? + + // Run update with pruning size_t maxComponents = 5; incrementalHybrid.update(graph1, initial); @@ -368,8 +346,8 @@ TEST(HybridNonlinearISAM, Incremental_approximate) { /***** Run Round 2 *****/ HybridGaussianFactorGraph graph2; - graph2.push_back(switching.nonlinearFactorGraph.at(4)); // x3-x4 - graph2.push_back(switching.nonlinearFactorGraph.at(8)); // x4 measurement + graph2.push_back(switching.binaryFactors.at(3)); // x3-x4 + graph2.push_back(switching.unaryFactors.at(4)); // x4 measurement initial = Values(); initial.insert(X(4), 5); From 977ac0d7626be864c906248bdf8e72d0e2a4bd9e Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 23 Oct 2024 09:24:47 -0700 Subject: [PATCH 161/204] Address review comments --- gtsam/hybrid/HybridGaussianConditional.cpp | 7 ++----- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 4 +++- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index a890b37b7..c60ab47aa 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -275,9 +275,7 @@ std::shared_ptr HybridGaussianConditional::likelihood( if (auto conditional = std::dynamic_pointer_cast(pair.first)) { const auto likelihood_m = conditional->likelihood(given); - // scalar is already correct. - assert(pair.second == - conditional->negLogConstant() - negLogConstant_); + // pair.second == conditional->negLogConstant() - negLogConstant_ return {likelihood_m, pair.second}; } else { return {nullptr, std::numeric_limits::infinity()}; @@ -320,8 +318,7 @@ HybridGaussianConditional::shared_ptr HybridGaussianConditional::prune( }; FactorValuePairs prunedConditionals = factors().apply(pruner); - return std::shared_ptr( - new HybridGaussianConditional(discreteKeys(), prunedConditionals)); + return std::make_shared(discreteKeys(), prunedConditionals); } /* *******************************************************************************/ diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index bc3856fe7..ecc317ab3 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -367,7 +367,7 @@ HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { // any difference in noise models used. HybridGaussianProductFactor productFactor = collectProductFactor(); - auto isNull = [](const GaussianFactor::shared_ptr& ptr) { return !ptr; }; + auto isNull = [](const GaussianFactor::shared_ptr &ptr) { return !ptr; }; // This is the elimination method on the leaf nodes bool someContinuousLeft = false; @@ -375,6 +375,8 @@ HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { [&](const std::pair &pair) -> Result { const auto &[graph, scalar] = pair; + // If any product contains a pruned factor, prune it here. Done here as it's + // non non-trivial to do within collectProductFactor. if (graph.empty() || std::any_of(graph.begin(), graph.end(), isNull)) { return {nullptr, 0.0, nullptr, 0.0}; } From 4c74ec113a9162fda7ab0e7f360d2a519b4077d2 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 23 Oct 2024 11:37:55 -0400 Subject: [PATCH 162/204] formatting and comments --- gtsam/hybrid/HybridGaussianConditional.cpp | 24 ++++++++++++---------- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 4 ++-- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index c60ab47aa..3826ef899 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -25,12 +25,12 @@ #include #include #include +#include #include #include #include #include -#include "gtsam/linear/GaussianConditional.h" namespace gtsam { /* *******************************************************************************/ @@ -162,7 +162,7 @@ HybridGaussianConditional::HybridGaussianConditional( /* *******************************************************************************/ const HybridGaussianConditional::Conditionals HybridGaussianConditional::conditionals() const { - return Conditionals(factors(), [](const auto& pair) { + return Conditionals(factors(), [](auto &&pair) { return std::dynamic_pointer_cast(pair.first); }); } @@ -170,7 +170,7 @@ HybridGaussianConditional::conditionals() const { /* *******************************************************************************/ size_t HybridGaussianConditional::nrComponents() const { size_t total = 0; - factors().visit([&total](const auto& node) { + factors().visit([&total](auto &&node) { if (node.first) total += 1; }); return total; @@ -178,8 +178,8 @@ size_t HybridGaussianConditional::nrComponents() const { /* *******************************************************************************/ GaussianConditional::shared_ptr HybridGaussianConditional::choose( - const DiscreteValues& discreteValues) const { - auto& [ptr, _] = factors()(discreteValues); + const DiscreteValues &discreteValues) const { + auto &[ptr, _] = factors()(discreteValues); if (!ptr) return nullptr; auto conditional = std::dynamic_pointer_cast(ptr); if (conditional) @@ -196,9 +196,10 @@ bool HybridGaussianConditional::equals(const HybridFactor &lf, if (e == nullptr) return false; // Factors existence and scalar values are checked in BaseFactor::equals. - // Here we check additionally that the factors *are* conditionals and are equal. - auto compareFunc = [tol](const GaussianFactorValuePair& pair1, - const GaussianFactorValuePair& pair2) { + // Here we check additionally that the factors *are* conditionals + // and are equal. + auto compareFunc = [tol](const GaussianFactorValuePair &pair1, + const GaussianFactorValuePair &pair2) { auto c1 = std::dynamic_pointer_cast(pair1.first), c2 = std::dynamic_pointer_cast(pair2.first); return (!c1 && !c2) || (c1 && c2 && c1->equals(*c2, tol)); @@ -222,7 +223,8 @@ void HybridGaussianConditional::print(const std::string &s, "", [&](Key k) { return formatter(k); }, [&](const GaussianFactorValuePair &pair) -> std::string { RedirectCout rd; - if (auto gf = std::dynamic_pointer_cast(pair.first)) { + if (auto gf = + std::dynamic_pointer_cast(pair.first)) { gf->print("", formatter); return rd.str(); } else { @@ -323,7 +325,7 @@ HybridGaussianConditional::shared_ptr HybridGaussianConditional::prune( /* *******************************************************************************/ double HybridGaussianConditional::logProbability( - const HybridValues& values) const { + const HybridValues &values) const { auto [factor, _] = factors()(values.discrete()); if (auto conditional = std::dynamic_pointer_cast(factor)) return conditional->logProbability(values.continuous()); @@ -333,7 +335,7 @@ double HybridGaussianConditional::logProbability( } /* *******************************************************************************/ -double HybridGaussianConditional::evaluate(const HybridValues& values) const { +double HybridGaussianConditional::evaluate(const HybridValues &values) const { auto [factor, _] = factors()(values.discrete()); if (auto conditional = std::dynamic_pointer_cast(factor)) return conditional->evaluate(values.continuous()); diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index ecc317ab3..ceabe0871 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -20,6 +20,7 @@ #include #include +#include #include #include #include @@ -48,8 +49,6 @@ #include #include -#include "gtsam/discrete/DecisionTreeFactor.h" - namespace gtsam { /// Specialize EliminateableFactorGraph for HybridGaussianFactorGraph: @@ -367,6 +366,7 @@ HybridGaussianFactorGraph::eliminate(const Ordering &keys) const { // any difference in noise models used. HybridGaussianProductFactor productFactor = collectProductFactor(); + // Check if a factor is null auto isNull = [](const GaussianFactor::shared_ptr &ptr) { return !ptr; }; // This is the elimination method on the leaf nodes From cbb0a301737178b9f4f9ab8b32e11564b19d13bc Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Wed, 23 Oct 2024 12:45:38 -0400 Subject: [PATCH 163/204] helper method to reduce code duplication --- gtsam/hybrid/HybridGaussianConditional.cpp | 48 +++++++++++----------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index 3826ef899..ac03bd3a3 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -33,6 +33,19 @@ #include namespace gtsam { + +/* *******************************************************************************/ +GaussianConditional::shared_ptr checkConditional( + const GaussianFactor::shared_ptr &factor) { + if (auto conditional = + std::dynamic_pointer_cast(factor)) { + return conditional; + } else { + throw std::logic_error( + "A HybridGaussianConditional unexpectedly contained a non-conditional"); + } +} + /* *******************************************************************************/ /** * @brief Helper struct for constructing HybridGaussianConditional objects @@ -92,10 +105,7 @@ struct HybridGaussianConditional::Helper { explicit Helper(const FactorValuePairs &pairs) : pairs(pairs) { auto func = [this](const GaussianFactorValuePair &pair) { if (!pair.first) return; - auto gc = std::dynamic_pointer_cast(pair.first); - if (!gc) - throw std::runtime_error( - "HybridGaussianConditional called with non-conditional."); + auto gc = checkConditional(pair.first); if (!nrFrontals) nrFrontals = gc->nrFrontals(); minNegLogConstant = std::min(minNegLogConstant, pair.second); }; @@ -179,14 +189,11 @@ size_t HybridGaussianConditional::nrComponents() const { /* *******************************************************************************/ GaussianConditional::shared_ptr HybridGaussianConditional::choose( const DiscreteValues &discreteValues) const { - auto &[ptr, _] = factors()(discreteValues); - if (!ptr) return nullptr; - auto conditional = std::dynamic_pointer_cast(ptr); - if (conditional) - return conditional; - else - throw std::logic_error( - "A HybridGaussianConditional unexpectedly contained a non-conditional"); + auto &[factor, _] = factors()(discreteValues); + if (!factor) return nullptr; + + auto conditional = checkConditional(factor); + return conditional; } /* *******************************************************************************/ @@ -320,28 +327,23 @@ HybridGaussianConditional::shared_ptr HybridGaussianConditional::prune( }; FactorValuePairs prunedConditionals = factors().apply(pruner); - return std::make_shared(discreteKeys(), prunedConditionals); + return std::make_shared(discreteKeys(), + prunedConditionals); } /* *******************************************************************************/ double HybridGaussianConditional::logProbability( const HybridValues &values) const { auto [factor, _] = factors()(values.discrete()); - if (auto conditional = std::dynamic_pointer_cast(factor)) - return conditional->logProbability(values.continuous()); - else - throw std::logic_error( - "A HybridGaussianConditional unexpectedly contained a non-conditional"); + auto conditional = checkConditional(factor); + return conditional->logProbability(values.continuous()); } /* *******************************************************************************/ double HybridGaussianConditional::evaluate(const HybridValues &values) const { auto [factor, _] = factors()(values.discrete()); - if (auto conditional = std::dynamic_pointer_cast(factor)) - return conditional->evaluate(values.continuous()); - else - throw std::logic_error( - "A HybridGaussianConditional unexpectedly contained a non-conditional"); + auto conditional = checkConditional(factor); + return conditional->evaluate(values.continuous()); } } // namespace gtsam From ff9a39911727567e5dbb0f26d8ac28745a101f06 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 23 Oct 2024 15:25:05 -0700 Subject: [PATCH 164/204] Spelling --- gtsam/base/Manifold.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/base/Manifold.h b/gtsam/base/Manifold.h index 815c8b288..cb30fa9c0 100644 --- a/gtsam/base/Manifold.h +++ b/gtsam/base/Manifold.h @@ -162,7 +162,7 @@ struct FixedDimension { typedef const int value_type; static const int value = traits::dimension; static_assert(value != Eigen::Dynamic, - "FixedDimension instantiated for dymanically-sized type."); + "FixedDimension instantiated for dynamically-sized type."); }; } // \ namespace gtsam From f6ed30d49882b1e2aec14433af44f290938e86bf Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 23 Oct 2024 15:25:20 -0700 Subject: [PATCH 165/204] Initial check-in --- gtsam/geometry/FundamentalMatrix.h | 83 +++++++++++++++++++ .../geometry/tests/testFundamentalMatrix.cpp | 56 +++++++++++++ 2 files changed, 139 insertions(+) create mode 100644 gtsam/geometry/FundamentalMatrix.h create mode 100644 gtsam/geometry/tests/testFundamentalMatrix.cpp diff --git a/gtsam/geometry/FundamentalMatrix.h b/gtsam/geometry/FundamentalMatrix.h new file mode 100644 index 000000000..ce2e1e48f --- /dev/null +++ b/gtsam/geometry/FundamentalMatrix.h @@ -0,0 +1,83 @@ +/* + * @file FundamentalMatrix.h + * @brief FundamentalMatrix class + * @author Frank Dellaert + * @date Oct 23, 2024 + */ + +#pragma once + +#include +#include + +namespace gtsam { + +class FundamentalMatrix { + private: + Rot3 U_; ///< Left rotation + double s_; ///< Scalar parameter for S + Rot3 V_; ///< Right rotation + + public: + /// Default constructor + FundamentalMatrix() : U_(Rot3()), s_(1.0), V_(Rot3()) {} + + /// Construct from U, V, and scalar s + FundamentalMatrix(const Rot3& U, double s, const Rot3& V) + : U_(U), s_(s), V_(V) {} + + /// Return the fundamental matrix representation + Matrix3 matrix() const { + return U_.matrix() * Vector3(1, s_, 1).asDiagonal() * + V_.transpose().matrix(); + } + + /// @name Testable + /// @{ + + /// Print the FundamentalMatrix + void print(const std::string& s = "") const { + std::cout << s << "U:\n" + << U_.matrix() << "\ns: " << s_ << "\nV:\n" + << V_.matrix() << std::endl; + } + + /// Check if the FundamentalMatrix is equal to another within a tolerance + bool equals(const FundamentalMatrix& other, double tol = 1e-9) const { + return U_.equals(other.U_, tol) && std::abs(s_ - other.s_) < tol && + V_.equals(other.V_, tol); + } + + /// @} + + /// @name Manifold + /// @{ + enum { dimension = 7 }; // 3 for U, 1 for s, 3 for V + inline static size_t Dim() { return dimension; } + inline size_t dim() const { return dimension; } + + /// Return local coordinates with respect to another FundamentalMatrix + Vector localCoordinates(const FundamentalMatrix& F) const { + Vector result(7); + result.head<3>() = U_.localCoordinates(F.U_); + result(3) = F.s_ - s_; // Difference in scalar + result.tail<3>() = V_.localCoordinates(F.V_); + return result; + } + + /// Retract the given vector to get a new FundamentalMatrix + FundamentalMatrix retract(const Vector& delta) const { + Rot3 newU = U_.retract(delta.head<3>()); + double newS = s_ + delta(3); // Update scalar + Rot3 newV = V_.retract(delta.tail<3>()); + return FundamentalMatrix(newU, newS, newV); + } + + /// @} +}; + +template <> +struct traits + : public internal::Manifold {}; + +} // namespace gtsam diff --git a/gtsam/geometry/tests/testFundamentalMatrix.cpp b/gtsam/geometry/tests/testFundamentalMatrix.cpp new file mode 100644 index 000000000..40b40c582 --- /dev/null +++ b/gtsam/geometry/tests/testFundamentalMatrix.cpp @@ -0,0 +1,56 @@ +/* + * @file testFundamentalMatrix.cpp + * @brief Test FundamentalMatrix class + * @author Frank Dellaert + * @date October 23, 2024 + */ + +#include +#include +#include +#include +#include +#include + +using namespace std::placeholders; +using namespace std; +using namespace gtsam; + +GTSAM_CONCEPT_TESTABLE_INST(FundamentalMatrix) +GTSAM_CONCEPT_MANIFOLD_INST(FundamentalMatrix) + +//************************************************************************* +// Create two rotations and corresponding fundamental matrix F +Rot3 trueU = Rot3::Yaw(M_PI_2); +Rot3 trueV = Rot3::Yaw(M_PI_4); +double trueS = 0.5; +FundamentalMatrix trueF(trueU, trueS, trueV); + +//************************************************************************* +TEST(FundamentalMatrix, localCoordinates) { + Vector expected = Z_7x1; // Assuming 7 dimensions for U, V, and s + Vector actual = trueF.localCoordinates(trueF); + EXPECT(assert_equal(expected, actual, 1e-8)); +} + +//************************************************************************* +TEST(FundamentalMatrix, retract) { + FundamentalMatrix actual = trueF.retract(Z_7x1); + EXPECT(assert_equal(trueF, actual)); +} + +//************************************************************************* +TEST(FundamentalMatrix, RoundTrip) { + Vector7 d; + d << 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7; + FundamentalMatrix hx = trueF.retract(d); + Vector actual = trueF.localCoordinates(hx); + EXPECT(assert_equal(d, actual, 1e-8)); +} + +//************************************************************************* +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ From f104951494197212b600f8af6417188483fe8380 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 23 Oct 2024 15:56:22 -0700 Subject: [PATCH 166/204] Simple F version --- gtsam/geometry/FundamentalMatrix.h | 87 ++++++++++++++++++- .../geometry/tests/testFundamentalMatrix.cpp | 34 +++++++- 2 files changed, 119 insertions(+), 2 deletions(-) diff --git a/gtsam/geometry/FundamentalMatrix.h b/gtsam/geometry/FundamentalMatrix.h index ce2e1e48f..b76f95686 100644 --- a/gtsam/geometry/FundamentalMatrix.h +++ b/gtsam/geometry/FundamentalMatrix.h @@ -1,12 +1,13 @@ /* * @file FundamentalMatrix.h - * @brief FundamentalMatrix class + * @brief FundamentalMatrix classes * @author Frank Dellaert * @date Oct 23, 2024 */ #pragma once +#include #include #include @@ -76,8 +77,92 @@ class FundamentalMatrix { /// @} }; +/** + * @class SimpleFundamentalMatrix + * @brief Class for representing a simple fundamental matrix. + * + * This class represents a simple fundamental matrix, which is a + * parameterization of the essential matrix and focal lengths for left and right + * cameras. Principal points are not part of the manifold but a convenience. + */ +class SimpleFundamentalMatrix { + private: + EssentialMatrix E_; ///< Essential matrix + double fa_; ///< Focal length for left camera + double fb_; ///< Focal length for right camera + Point2 ca_; ///< Principal point for left camera + Point2 cb_; ///< Principal point for right camera + + public: + /// Default constructor + SimpleFundamentalMatrix() + : E_(), fa_(1.0), fb_(1.0), ca_(0.0, 0.0), cb_(0.0, 0.0) {} + + /// Construct from essential matrix and focal lengths + SimpleFundamentalMatrix(const EssentialMatrix& E, // + double fa, double fb, + const Point2& ca = Point2(0.0, 0.0), + const Point2& cb = Point2(0.0, 0.0)) + : E_(E), fa_(fa), fb_(fb), ca_(ca), cb_(cb) {} + + /// Return the fundamental matrix representation + Matrix3 matrix() const { + Matrix3 Ka, Kb; + Ka << fa_, 0, ca_.x(), 0, fa_, ca_.y(), 0, 0, 1; // Left calibration + Kb << fb_, 0, cb_.x(), 0, fb_, cb_.y(), 0, 0, 1; // Right calibration + return Ka * E_.matrix() * Kb.inverse(); + } + + /// @name Testable + /// @{ + + /// Print the SimpleFundamentalMatrix + void print(const std::string& s = "") const { + std::cout << s << "E:\n" + << E_.matrix() << "\nfa: " << fa_ << "\nfb: " << fb_ + << "\nca: " << ca_ << "\ncb: " << cb_ << std::endl; + } + + /// Check equality within a tolerance + bool equals(const SimpleFundamentalMatrix& other, double tol = 1e-9) const { + return E_.equals(other.E_, tol) && std::abs(fa_ - other.fa_) < tol && + std::abs(fb_ - other.fb_) < tol && (ca_ - other.ca_).norm() < tol && + (cb_ - other.cb_).norm() < tol; + } + /// @} + + /// @name Manifold + /// @{ + enum { dimension = 7 }; // 5 for E, 1 for fa, 1 for fb + inline static size_t Dim() { return dimension; } + inline size_t dim() const { return dimension; } + + /// Return local coordinates with respect to another + /// SimpleFundamentalMatrix + Vector localCoordinates(const SimpleFundamentalMatrix& F) const { + Vector result(7); + result.head<5>() = E_.localCoordinates(F.E_); + result(5) = F.fa_ - fa_; // Difference in fa + result(6) = F.fb_ - fb_; // Difference in fb + return result; + } + + /// Retract the given vector to get a new SimpleFundamentalMatrix + SimpleFundamentalMatrix retract(const Vector& delta) const { + EssentialMatrix newE = E_.retract(delta.head<5>()); + double newFa = fa_ + delta(5); // Update fa + double newFb = fb_ + delta(6); // Update fb + return SimpleFundamentalMatrix(newE, newFa, newFb, ca_, cb_); + } + /// @} +}; + template <> struct traits : public internal::Manifold {}; +template <> +struct traits + : public internal::Manifold {}; + } // namespace gtsam diff --git a/gtsam/geometry/tests/testFundamentalMatrix.cpp b/gtsam/geometry/tests/testFundamentalMatrix.cpp index 40b40c582..c69edaec1 100644 --- a/gtsam/geometry/tests/testFundamentalMatrix.cpp +++ b/gtsam/geometry/tests/testFundamentalMatrix.cpp @@ -1,6 +1,6 @@ /* * @file testFundamentalMatrix.cpp - * @brief Test FundamentalMatrix class + * @brief Test FundamentalMatrix classes * @author Frank Dellaert * @date October 23, 2024 */ @@ -48,6 +48,38 @@ TEST(FundamentalMatrix, RoundTrip) { EXPECT(assert_equal(d, actual, 1e-8)); } +//************************************************************************* +// Create essential matrix and focal lengths for +// SimpleFundamentalMatrix +EssentialMatrix trueE; // Assuming a default constructor is available +double trueFa = 1.0; +double trueFb = 1.0; +Point2 trueCa(0.0, 0.0); +Point2 trueCb(0.0, 0.0); +SimpleFundamentalMatrix trueSimpleF(trueE, trueFa, trueFb, trueCa, trueCb); + +//************************************************************************* +TEST(SimpleFundamentalMatrix, localCoordinates) { + Vector expected = Z_7x1; + Vector actual = trueSimpleF.localCoordinates(trueSimpleF); + EXPECT(assert_equal(expected, actual, 1e-8)); +} + +//************************************************************************* +TEST(SimpleFundamentalMatrix, retract) { + SimpleFundamentalMatrix actual = trueSimpleF.retract(Z_9x1); + EXPECT(assert_equal(trueSimpleF, actual)); +} + +//************************************************************************* +TEST(SimpleFundamentalMatrix, RoundTrip) { + Vector7 d; + d << 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7; + SimpleFundamentalMatrix hx = trueSimpleF.retract(d); + Vector actual = trueSimpleF.localCoordinates(hx); + EXPECT(assert_equal(d, actual, 1e-8)); +} + //************************************************************************* int main() { TestResult tr; From 76175feb9551244b7229269a7b8599bc1020346d Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 23 Oct 2024 16:01:28 -0700 Subject: [PATCH 167/204] Base class --- gtsam/geometry/FundamentalMatrix.h | 68 ++++++++++++++----- .../geometry/tests/testFundamentalMatrix.cpp | 16 ++--- 2 files changed, 60 insertions(+), 24 deletions(-) diff --git a/gtsam/geometry/FundamentalMatrix.h b/gtsam/geometry/FundamentalMatrix.h index b76f95686..8140e3921 100644 --- a/gtsam/geometry/FundamentalMatrix.h +++ b/gtsam/geometry/FundamentalMatrix.h @@ -13,7 +13,33 @@ namespace gtsam { +/** + * @brief Abstract base class for FundamentalMatrix + * + * This class provides a common interface for all types of fundamental matrices. + * It declares a virtual function `matrix()` that must be implemented by derived + * classes. The `matrix()` function returns a 3x3 matrix representation of the + * fundamental matrix. + */ class FundamentalMatrix { + public: + /** + * @brief Returns a 3x3 matrix representation of the fundamental matrix + * + * @return A 3x3 matrix representing the fundamental matrix + */ + virtual Matrix3 matrix() const = 0; +}; + +/** + * @class GeneralFundamentalMatrix + * @brief Represents a general fundamental matrix. + * + * This class represents a general fundamental matrix, which is a 3x3 matrix + * that describes the relationship between two images. It is parameterized by a + * left rotation U, a scalar s, and a right rotation V. + */ +class GeneralFundamentalMatrix : public FundamentalMatrix { private: Rot3 U_; ///< Left rotation double s_; ///< Scalar parameter for S @@ -21,14 +47,23 @@ class FundamentalMatrix { public: /// Default constructor - FundamentalMatrix() : U_(Rot3()), s_(1.0), V_(Rot3()) {} + GeneralFundamentalMatrix() : U_(Rot3()), s_(1.0), V_(Rot3()) {} - /// Construct from U, V, and scalar s - FundamentalMatrix(const Rot3& U, double s, const Rot3& V) + /** + * @brief Construct from U, V, and scalar s + * + * Initializes the GeneralFundamentalMatrix with the given left rotation U, + * scalar s, and right rotation V. + * + * @param U Left rotation matrix + * @param s Scalar parameter for the fundamental matrix + * @param V Right rotation matrix + */ + GeneralFundamentalMatrix(const Rot3& U, double s, const Rot3& V) : U_(U), s_(s), V_(V) {} /// Return the fundamental matrix representation - Matrix3 matrix() const { + Matrix3 matrix() const override { return U_.matrix() * Vector3(1, s_, 1).asDiagonal() * V_.transpose().matrix(); } @@ -36,15 +71,16 @@ class FundamentalMatrix { /// @name Testable /// @{ - /// Print the FundamentalMatrix + /// Print the GeneralFundamentalMatrix void print(const std::string& s = "") const { std::cout << s << "U:\n" << U_.matrix() << "\ns: " << s_ << "\nV:\n" << V_.matrix() << std::endl; } - /// Check if the FundamentalMatrix is equal to another within a tolerance - bool equals(const FundamentalMatrix& other, double tol = 1e-9) const { + /// Check if the GeneralFundamentalMatrix is equal to another within a + /// tolerance + bool equals(const GeneralFundamentalMatrix& other, double tol = 1e-9) const { return U_.equals(other.U_, tol) && std::abs(s_ - other.s_) < tol && V_.equals(other.V_, tol); } @@ -57,8 +93,8 @@ class FundamentalMatrix { inline static size_t Dim() { return dimension; } inline size_t dim() const { return dimension; } - /// Return local coordinates with respect to another FundamentalMatrix - Vector localCoordinates(const FundamentalMatrix& F) const { + /// Return local coordinates with respect to another GeneralFundamentalMatrix + Vector localCoordinates(const GeneralFundamentalMatrix& F) const { Vector result(7); result.head<3>() = U_.localCoordinates(F.U_); result(3) = F.s_ - s_; // Difference in scalar @@ -66,12 +102,12 @@ class FundamentalMatrix { return result; } - /// Retract the given vector to get a new FundamentalMatrix - FundamentalMatrix retract(const Vector& delta) const { + /// Retract the given vector to get a new GeneralFundamentalMatrix + GeneralFundamentalMatrix retract(const Vector& delta) const { Rot3 newU = U_.retract(delta.head<3>()); double newS = s_ + delta(3); // Update scalar Rot3 newV = V_.retract(delta.tail<3>()); - return FundamentalMatrix(newU, newS, newV); + return GeneralFundamentalMatrix(newU, newS, newV); } /// @} @@ -85,7 +121,7 @@ class FundamentalMatrix { * parameterization of the essential matrix and focal lengths for left and right * cameras. Principal points are not part of the manifold but a convenience. */ -class SimpleFundamentalMatrix { +class SimpleFundamentalMatrix : FundamentalMatrix { private: EssentialMatrix E_; ///< Essential matrix double fa_; ///< Focal length for left camera @@ -106,7 +142,7 @@ class SimpleFundamentalMatrix { : E_(E), fa_(fa), fb_(fb), ca_(ca), cb_(cb) {} /// Return the fundamental matrix representation - Matrix3 matrix() const { + Matrix3 matrix() const override { Matrix3 Ka, Kb; Ka << fa_, 0, ca_.x(), 0, fa_, ca_.y(), 0, 0, 1; // Left calibration Kb << fb_, 0, cb_.x(), 0, fb_, cb_.y(), 0, 0, 1; // Right calibration @@ -158,8 +194,8 @@ class SimpleFundamentalMatrix { }; template <> -struct traits - : public internal::Manifold {}; +struct traits + : public internal::Manifold {}; template <> struct traits diff --git a/gtsam/geometry/tests/testFundamentalMatrix.cpp b/gtsam/geometry/tests/testFundamentalMatrix.cpp index c69edaec1..0c132361c 100644 --- a/gtsam/geometry/tests/testFundamentalMatrix.cpp +++ b/gtsam/geometry/tests/testFundamentalMatrix.cpp @@ -16,34 +16,34 @@ using namespace std::placeholders; using namespace std; using namespace gtsam; -GTSAM_CONCEPT_TESTABLE_INST(FundamentalMatrix) -GTSAM_CONCEPT_MANIFOLD_INST(FundamentalMatrix) +GTSAM_CONCEPT_TESTABLE_INST(GeneralFundamentalMatrix) +GTSAM_CONCEPT_MANIFOLD_INST(GeneralFundamentalMatrix) //************************************************************************* // Create two rotations and corresponding fundamental matrix F Rot3 trueU = Rot3::Yaw(M_PI_2); Rot3 trueV = Rot3::Yaw(M_PI_4); double trueS = 0.5; -FundamentalMatrix trueF(trueU, trueS, trueV); +GeneralFundamentalMatrix trueF(trueU, trueS, trueV); //************************************************************************* -TEST(FundamentalMatrix, localCoordinates) { +TEST(GeneralFundamentalMatrix, localCoordinates) { Vector expected = Z_7x1; // Assuming 7 dimensions for U, V, and s Vector actual = trueF.localCoordinates(trueF); EXPECT(assert_equal(expected, actual, 1e-8)); } //************************************************************************* -TEST(FundamentalMatrix, retract) { - FundamentalMatrix actual = trueF.retract(Z_7x1); +TEST(GeneralFundamentalMatrix, retract) { + GeneralFundamentalMatrix actual = trueF.retract(Z_7x1); EXPECT(assert_equal(trueF, actual)); } //************************************************************************* -TEST(FundamentalMatrix, RoundTrip) { +TEST(GeneralFundamentalMatrix, RoundTrip) { Vector7 d; d << 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7; - FundamentalMatrix hx = trueF.retract(d); + GeneralFundamentalMatrix hx = trueF.retract(d); Vector actual = trueF.localCoordinates(hx); EXPECT(assert_equal(d, actual, 1e-8)); } From 32980f3e09d0d5c18df073b5c6d0fdda30a97f6c Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 23 Oct 2024 18:04:06 -0700 Subject: [PATCH 168/204] Conversion --- gtsam/geometry/FundamentalMatrix.h | 73 +++++++++-- .../geometry/tests/testFundamentalMatrix.cpp | 117 +++++++++++++++--- 2 files changed, 167 insertions(+), 23 deletions(-) diff --git a/gtsam/geometry/FundamentalMatrix.h b/gtsam/geometry/FundamentalMatrix.h index 8140e3921..1080cd87a 100644 --- a/gtsam/geometry/FundamentalMatrix.h +++ b/gtsam/geometry/FundamentalMatrix.h @@ -62,9 +62,58 @@ class GeneralFundamentalMatrix : public FundamentalMatrix { GeneralFundamentalMatrix(const Rot3& U, double s, const Rot3& V) : U_(U), s_(s), V_(V) {} + /** + * @brief Construct from a 3x3 matrix using SVD + * + * Initializes the GeneralFundamentalMatrix by performing SVD on the given + * matrix and ensuring U and V are not reflections. + * + * @param F A 3x3 matrix representing the fundamental matrix + */ + GeneralFundamentalMatrix(const Matrix3& F) { + // Perform SVD + Eigen::JacobiSVD svd(F, Eigen::ComputeFullU | Eigen::ComputeFullV); + + // Extract U and V + Matrix3 U = svd.matrixU(); + Matrix3 V = svd.matrixV(); + Vector3 singularValues = svd.singularValues(); + + // Scale the singular values + double scale = singularValues(0); + if (scale != 0) { + singularValues /= scale; // Normalize the first singular value to 1.0 + } + + // Check if the third singular value is close to zero (valid F condition) + if (std::abs(singularValues(2)) > 1e-9) { + throw std::invalid_argument( + "The input matrix does not represent a valid fundamental matrix."); + } + + // Ensure the second singular value is recorded as s + s_ = singularValues(1); + + // Check if U is a reflection + if (U.determinant() < 0) { + U = -U; + s_ = -s_; // Change sign of scalar if U is a reflection + } + + // Check if V is a reflection + if (V.determinant() < 0) { + V = -V; + s_ = -s_; // Change sign of scalar if U is a reflection + } + + // Assign the rotations + U_ = Rot3(U); + V_ = Rot3(V); + } + /// Return the fundamental matrix representation Matrix3 matrix() const override { - return U_.matrix() * Vector3(1, s_, 1).asDiagonal() * + return U_.matrix() * Vector3(1, s_, 0).asDiagonal() * V_.transpose().matrix(); } @@ -141,14 +190,24 @@ class SimpleFundamentalMatrix : FundamentalMatrix { const Point2& cb = Point2(0.0, 0.0)) : E_(E), fa_(fa), fb_(fb), ca_(ca), cb_(cb) {} - /// Return the fundamental matrix representation - Matrix3 matrix() const override { - Matrix3 Ka, Kb; - Ka << fa_, 0, ca_.x(), 0, fa_, ca_.y(), 0, 0, 1; // Left calibration - Kb << fb_, 0, cb_.x(), 0, fb_, cb_.y(), 0, 0, 1; // Right calibration - return Ka * E_.matrix() * Kb.inverse(); + /// Return the left calibration matrix + Matrix3 leftK() const { + Matrix3 K; + K << fa_, 0, ca_.x(), 0, fa_, ca_.y(), 0, 0, 1; + return K; } + /// Return the right calibration matrix + Matrix3 rightK() const { + Matrix3 K; + K << fb_, 0, cb_.x(), 0, fb_, cb_.y(), 0, 0, 1; + return K; + } + + /// Return the fundamental matrix representation + Matrix3 matrix() const override { + return leftK().transpose().inverse() * E_.matrix() * rightK().inverse(); + } /// @name Testable /// @{ diff --git a/gtsam/geometry/tests/testFundamentalMatrix.cpp b/gtsam/geometry/tests/testFundamentalMatrix.cpp index 0c132361c..bab57b148 100644 --- a/gtsam/geometry/tests/testFundamentalMatrix.cpp +++ b/gtsam/geometry/tests/testFundamentalMatrix.cpp @@ -49,37 +49,122 @@ TEST(GeneralFundamentalMatrix, RoundTrip) { } //************************************************************************* -// Create essential matrix and focal lengths for -// SimpleFundamentalMatrix -EssentialMatrix trueE; // Assuming a default constructor is available -double trueFa = 1.0; -double trueFb = 1.0; -Point2 trueCa(0.0, 0.0); -Point2 trueCb(0.0, 0.0); -SimpleFundamentalMatrix trueSimpleF(trueE, trueFa, trueFb, trueCa, trueCb); +// Create the simplest SimpleFundamentalMatrix, a stereo pair +EssentialMatrix defaultE(Rot3(), Unit3(1, 0, 0)); +Point2 zero(0.0, 0.0); +SimpleFundamentalMatrix stereoF(defaultE, 1.0, 1.0, zero, zero); //************************************************************************* -TEST(SimpleFundamentalMatrix, localCoordinates) { +TEST(SimpleStereo, Conversion) { + GeneralFundamentalMatrix convertedF(stereoF.matrix()); + EXPECT(assert_equal(stereoF.matrix(), convertedF.matrix(), 1e-8)); +} + +//************************************************************************* +TEST(SimpleStereo, localCoordinates) { Vector expected = Z_7x1; - Vector actual = trueSimpleF.localCoordinates(trueSimpleF); + Vector actual = stereoF.localCoordinates(stereoF); EXPECT(assert_equal(expected, actual, 1e-8)); } //************************************************************************* -TEST(SimpleFundamentalMatrix, retract) { - SimpleFundamentalMatrix actual = trueSimpleF.retract(Z_9x1); - EXPECT(assert_equal(trueSimpleF, actual)); +TEST(SimpleStereo, retract) { + SimpleFundamentalMatrix actual = stereoF.retract(Z_9x1); + EXPECT(assert_equal(stereoF, actual)); } //************************************************************************* -TEST(SimpleFundamentalMatrix, RoundTrip) { +TEST(SimpleStereo, RoundTrip) { Vector7 d; d << 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7; - SimpleFundamentalMatrix hx = trueSimpleF.retract(d); - Vector actual = trueSimpleF.localCoordinates(hx); + SimpleFundamentalMatrix hx = stereoF.retract(d); + Vector actual = stereoF.localCoordinates(hx); EXPECT(assert_equal(d, actual, 1e-8)); } +//************************************************************************* +TEST(SimpleStereo, EpipolarLine) { + // Create a point in b + Point3 p_b(0, 2, 1); + // Convert the point to a horizontal line in a + Vector3 l_a = stereoF.matrix() * p_b; + // Check if the line is horizontal at height 2 + EXPECT(assert_equal(Vector3(0, -1, 2), l_a)); +} + +//************************************************************************* +// Create a stereo pair, but in pixels not normalized coordinates. +// We're still using zero principal points here. +double fa = 1000; +double fb = 1000; +SimpleFundamentalMatrix pixelStereo(defaultE, fa, fb, zero, zero); + +//************************************************************************* +TEST(PixelStereo, Conversion) { + auto expected = pixelStereo.matrix(); + + GeneralFundamentalMatrix convertedF(pixelStereo.matrix()); + + // Check equality of F-matrices up to a scale + auto actual = convertedF.matrix(); + actual *= expected(1, 2) / actual(1, 2); + EXPECT(assert_equal(expected, actual, 1e-5)); +} + +//************************************************************************* +TEST(PixelStereo, PointInBToHorizontalLineInA) { + // Create a point in b + Point3 p_b = Point3(0, 300, 1); + // Convert the point to a horizontal line in a + Vector3 l_a = pixelStereo.matrix() * p_b; + // Check if the line is horizontal at height 2 + EXPECT(assert_equal(Vector3(0, -0.001, 0.3), l_a)); +} + +//************************************************************************* +// Create a stereo pair with the right camera rotated 90 degrees +Rot3 aRb = Rot3::Rz(M_PI_2); // Rotate 90 degrees around the Z-axis +EssentialMatrix rotatedE(aRb, Unit3(1, 0, 0)); +SimpleFundamentalMatrix rotatedPixelStereo(rotatedE, fa, fb, zero, zero); + +//************************************************************************* +TEST(RotatedPixelStereo, Conversion) { + auto expected = rotatedPixelStereo.matrix(); + + GeneralFundamentalMatrix convertedF(rotatedPixelStereo.matrix()); + + // Check equality of F-matrices up to a scale + auto actual = convertedF.matrix(); + actual *= expected(1, 2) / actual(1, 2); + EXPECT(assert_equal(expected, actual, 1e-4)); +} + +//************************************************************************* +TEST(RotatedPixelStereo, PointInBToHorizontalLineInA) { + // Create a point in b + Point3 p_b = Point3(300, 0, 1); + // Convert the point to a horizontal line in a + Vector3 l_a = rotatedPixelStereo.matrix() * p_b; + // Check if the line is horizontal at height 2 + EXPECT(assert_equal(Vector3(0, -0.001, 0.3), l_a)); +} + +//************************************************************************* +// Now check that principal points also survive conversion +SimpleFundamentalMatrix stereoWithPrincipalPoints(rotatedE, fa, fb, zero, zero); + +//************************************************************************* +TEST(stereoWithPrincipalPoints, Conversion) { + auto expected = stereoWithPrincipalPoints.matrix(); + + GeneralFundamentalMatrix convertedF(stereoWithPrincipalPoints.matrix()); + + // Check equality of F-matrices up to a scale + auto actual = convertedF.matrix(); + actual *= expected(1, 2) / actual(1, 2); + EXPECT(assert_equal(expected, actual, 1e-4)); +} + //************************************************************************* int main() { TestResult tr; From 3036ed80677c0114c757792cd0639c6cdde41d25 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 23 Oct 2024 21:07:22 -0700 Subject: [PATCH 169/204] Verify triplet epipolar transfer --- gtsam/geometry/FundamentalMatrix.h | 5 +- .../geometry/tests/testFundamentalMatrix.cpp | 79 +++++++++++++++++-- 2 files changed, 76 insertions(+), 8 deletions(-) diff --git a/gtsam/geometry/FundamentalMatrix.h b/gtsam/geometry/FundamentalMatrix.h index 1080cd87a..acd713020 100644 --- a/gtsam/geometry/FundamentalMatrix.h +++ b/gtsam/geometry/FundamentalMatrix.h @@ -213,9 +213,10 @@ class SimpleFundamentalMatrix : FundamentalMatrix { /// Print the SimpleFundamentalMatrix void print(const std::string& s = "") const { - std::cout << s << "E:\n" + std::cout << s << " E:\n" << E_.matrix() << "\nfa: " << fa_ << "\nfb: " << fb_ - << "\nca: " << ca_ << "\ncb: " << cb_ << std::endl; + << "\nca: " << ca_.transpose() << "\ncb: " << cb_.transpose() + << std::endl; } /// Check equality within a tolerance diff --git a/gtsam/geometry/tests/testFundamentalMatrix.cpp b/gtsam/geometry/tests/testFundamentalMatrix.cpp index bab57b148..40a0b4767 100644 --- a/gtsam/geometry/tests/testFundamentalMatrix.cpp +++ b/gtsam/geometry/tests/testFundamentalMatrix.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include using namespace std::placeholders; @@ -95,9 +96,9 @@ TEST(SimpleStereo, EpipolarLine) { //************************************************************************* // Create a stereo pair, but in pixels not normalized coordinates. // We're still using zero principal points here. -double fa = 1000; -double fb = 1000; -SimpleFundamentalMatrix pixelStereo(defaultE, fa, fb, zero, zero); +double focalLength = 1000; +SimpleFundamentalMatrix pixelStereo(defaultE, focalLength, focalLength, zero, + zero); //************************************************************************* TEST(PixelStereo, Conversion) { @@ -125,7 +126,8 @@ TEST(PixelStereo, PointInBToHorizontalLineInA) { // Create a stereo pair with the right camera rotated 90 degrees Rot3 aRb = Rot3::Rz(M_PI_2); // Rotate 90 degrees around the Z-axis EssentialMatrix rotatedE(aRb, Unit3(1, 0, 0)); -SimpleFundamentalMatrix rotatedPixelStereo(rotatedE, fa, fb, zero, zero); +SimpleFundamentalMatrix rotatedPixelStereo(rotatedE, focalLength, focalLength, + zero, zero); //************************************************************************* TEST(RotatedPixelStereo, Conversion) { @@ -151,7 +153,10 @@ TEST(RotatedPixelStereo, PointInBToHorizontalLineInA) { //************************************************************************* // Now check that principal points also survive conversion -SimpleFundamentalMatrix stereoWithPrincipalPoints(rotatedE, fa, fb, zero, zero); +Point2 principalPoint(640 / 2, 480 / 2); +SimpleFundamentalMatrix stereoWithPrincipalPoints(rotatedE, focalLength, + focalLength, principalPoint, + principalPoint); //************************************************************************* TEST(stereoWithPrincipalPoints, Conversion) { @@ -165,9 +170,71 @@ TEST(stereoWithPrincipalPoints, Conversion) { EXPECT(assert_equal(expected, actual, 1e-4)); } +//************************************************************************* +std::array generateCameraPoses() { + std::array cameraPoses; + const double radius = 1.0; + for (int i = 0; i < 3; ++i) { + double angle = i * 2.0 * M_PI / 3.0; + double c = cos(angle), s = sin(angle); + Rot3 aRb({-s, c, 0}, {0, 0, -1}, {-c, -s, 0}); + cameraPoses[i] = {aRb, Point3(radius * c, radius * s, 0)}; + } + return cameraPoses; +} + +std::tuple +generateFs(const std::array &cameraPoses) { + std::array F; + for (size_t i = 0; i < 3; ++i) { + size_t j = (i + 1) % 3; + const Pose3 iPj = cameraPoses[i].between(cameraPoses[j]); + EssentialMatrix E(iPj.rotation(), Unit3(iPj.translation())); + F[i] = {E, focalLength, focalLength, principalPoint, principalPoint}; + } + return {F[0], F[1], F[2]}; +} + +//************************************************************************* +TEST(Triplet, Transfer) { + // Generate cameras on a circle, as well as fundamental matrices + auto cameraPoses = generateCameraPoses(); + auto [F01, F12, F20] = generateFs(cameraPoses); + + // Check that they are all equal + EXPECT(F01.equals(F12, 1e-9)); + EXPECT(F12.equals(F20, 1e-9)); + EXPECT(F20.equals(F01, 1e-9)); + + // Now project a point into the three cameras + const Point3 P(0.1, 0.2, 0.3); + const Cal3_S2 K(focalLength, focalLength, 0.0, // + principalPoint.x(), principalPoint.y()); + + std::array p; + for (size_t i = 0; i < 3; ++i) { + // Project the point into each camera + PinholeCameraCal3_S2 camera(cameraPoses[i], K); + p[i] = camera.project(P); + } + + // Create lines in camera 0 from projections 1 and 2 + Vector3 line1 = F01.matrix() * Vector3(p[1].x(), p[1].y(), 1); + Vector3 line2 = F20.matrix().transpose() * Vector3(p[2].x(), p[2].y(), 1); + + // Cross the lines to find the intersection point + Vector3 intersectionPoint = line1.cross(line2); + + // Normalize the intersection point + intersectionPoint /= intersectionPoint(2); + + // Compare the intersection point with the original projection in camera 0 + EXPECT(assert_equal(p[0], intersectionPoint.head<2>(), 1e-9)); +} //************************************************************************* int main() { TestResult tr; return TestRegistry::runAllTests(tr); } -/* ************************************************************************* */ +//************************************************************************* From 22c6b854f7e32bea59d6fac36b7f6997f282dc90 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 23 Oct 2024 22:56:07 -0700 Subject: [PATCH 170/204] TripleF and transfer --- gtsam/geometry/FundamentalMatrix.h | 60 +++++++++++++++++++ .../geometry/tests/testFundamentalMatrix.cpp | 37 +++++------- 2 files changed, 76 insertions(+), 21 deletions(-) diff --git a/gtsam/geometry/FundamentalMatrix.h b/gtsam/geometry/FundamentalMatrix.h index acd713020..32acb3be0 100644 --- a/gtsam/geometry/FundamentalMatrix.h +++ b/gtsam/geometry/FundamentalMatrix.h @@ -29,6 +29,66 @@ class FundamentalMatrix { * @return A 3x3 matrix representing the fundamental matrix */ virtual Matrix3 matrix() const = 0; + + /** + * @brief Virtual destructor to ensure proper cleanup of derived classes + */ + virtual ~FundamentalMatrix() {} + + /** + * @brief Transfer projections from cameras 1 and 2 to camera 0 + * + * Take two fundamental matrices F01 and F02, and two points p1 and p2, and + * returns the 2D point in camera 0 where the epipolar lines intersect. + */ + static Point2 transfer(const Matrix3& F01, const Point2& p1, + const Matrix3& F02, const Point2& p2) { + // Create lines in camera 0 from projections of the other two cameras + Vector3 line1 = F01 * Vector3(p1.x(), p1.y(), 1); + Vector3 line2 = F02 * Vector3(p2.x(), p2.y(), 1); + + // Cross the lines to find the intersection point + Vector3 intersectionPoint = line1.cross(line2); + + // Normalize the intersection point + intersectionPoint /= intersectionPoint(2); + + return intersectionPoint.head<2>(); // Return the 2D point + } +}; + +/// Represents a set of three fundamental matrices for transferring points +/// between three cameras. +template +struct TripleF { + F F01, F12, F20; + + /// Transfers a point from two cameras to another. + template + Point2 transfer(const Point2& point1, const Point2& point2) { + static_assert(Index < 3, "Index must be less than 3"); + } + + /// Specialization for transferring a point from cameras 1,2 to camera 0. + template <> + Point2 transfer<0>(const Point2& p1, const Point2& p2) { + return FundamentalMatrix::transfer(F01.matrix(), p1, + F20.matrix().transpose(), p2); + } + + /// Specialization for transferring a point from camera 0,2 to camera 1. + template <> + Point2 transfer<1>(const Point2& p0, const Point2& p2) { + return FundamentalMatrix::transfer(F01.matrix().transpose(), p0, + F12.matrix(), p2); + } + + /// Specialization for transferring a point from camera 0,1 to camera 2. + template <> + Point2 transfer<2>(const Point2& p0, const Point2& p1) { + return FundamentalMatrix::transfer(F01.matrix(), p0, + F12.matrix().transpose(), p1); + } }; /** diff --git a/gtsam/geometry/tests/testFundamentalMatrix.cpp b/gtsam/geometry/tests/testFundamentalMatrix.cpp index 40a0b4767..f5a578911 100644 --- a/gtsam/geometry/tests/testFundamentalMatrix.cpp +++ b/gtsam/geometry/tests/testFundamentalMatrix.cpp @@ -171,6 +171,7 @@ TEST(stereoWithPrincipalPoints, Conversion) { } //************************************************************************* +/// Generate three cameras on a circle, looking in std::array generateCameraPoses() { std::array cameraPoses; const double radius = 1.0; @@ -183,9 +184,10 @@ std::array generateCameraPoses() { return cameraPoses; } -std::tuple -generateFs(const std::array &cameraPoses) { +//************************************************************************* +/// Function to generate a TripleF from camera poses +TripleF generateTripleF( + const std::array& cameraPoses) { std::array F; for (size_t i = 0; i < 3; ++i) { size_t j = (i + 1) % 3; @@ -193,19 +195,19 @@ generateFs(const std::array &cameraPoses) { EssentialMatrix E(iPj.rotation(), Unit3(iPj.translation())); F[i] = {E, focalLength, focalLength, principalPoint, principalPoint}; } - return {F[0], F[1], F[2]}; + return {F[0], F[1], F[2]}; // Return a TripleF instance } //************************************************************************* -TEST(Triplet, Transfer) { +TEST(TripleF, Transfer) { // Generate cameras on a circle, as well as fundamental matrices auto cameraPoses = generateCameraPoses(); - auto [F01, F12, F20] = generateFs(cameraPoses); + auto triplet = generateTripleF(cameraPoses); // Check that they are all equal - EXPECT(F01.equals(F12, 1e-9)); - EXPECT(F12.equals(F20, 1e-9)); - EXPECT(F20.equals(F01, 1e-9)); + EXPECT(triplet.F01.equals(triplet.F12, 1e-9)); + EXPECT(triplet.F12.equals(triplet.F20, 1e-9)); + EXPECT(triplet.F20.equals(triplet.F01, 1e-9)); // Now project a point into the three cameras const Point3 P(0.1, 0.2, 0.3); @@ -219,19 +221,12 @@ TEST(Triplet, Transfer) { p[i] = camera.project(P); } - // Create lines in camera 0 from projections 1 and 2 - Vector3 line1 = F01.matrix() * Vector3(p[1].x(), p[1].y(), 1); - Vector3 line2 = F20.matrix().transpose() * Vector3(p[2].x(), p[2].y(), 1); - - // Cross the lines to find the intersection point - Vector3 intersectionPoint = line1.cross(line2); - - // Normalize the intersection point - intersectionPoint /= intersectionPoint(2); - - // Compare the intersection point with the original projection in camera 0 - EXPECT(assert_equal(p[0], intersectionPoint.head<2>(), 1e-9)); + // Check that transfer works + EXPECT(assert_equal(p[0], triplet.transfer<0>(p[1], p[2]), 1e-9)); + EXPECT(assert_equal(p[1], triplet.transfer<1>(p[0], p[2]), 1e-9)); + EXPECT(assert_equal(p[2], triplet.transfer<2>(p[0], p[1]), 1e-9)); } + //************************************************************************* int main() { TestResult tr; From 984232defb6971fb1e6ad365c59ef6670f59de0c Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 23 Oct 2024 23:21:02 -0700 Subject: [PATCH 171/204] Kill templates --- gtsam/geometry/FundamentalMatrix.h | 21 ++++++------------- .../geometry/tests/testFundamentalMatrix.cpp | 6 +++--- 2 files changed, 9 insertions(+), 18 deletions(-) diff --git a/gtsam/geometry/FundamentalMatrix.h b/gtsam/geometry/FundamentalMatrix.h index 32acb3be0..8e952c214 100644 --- a/gtsam/geometry/FundamentalMatrix.h +++ b/gtsam/geometry/FundamentalMatrix.h @@ -63,29 +63,20 @@ template struct TripleF { F F01, F12, F20; - /// Transfers a point from two cameras to another. - template - Point2 transfer(const Point2& point1, const Point2& point2) { - static_assert(Index < 3, "Index must be less than 3"); - } - - /// Specialization for transferring a point from cameras 1,2 to camera 0. - template <> - Point2 transfer<0>(const Point2& p1, const Point2& p2) { + /// Transfers a point from cameras 1,2 to camera 0. + Point2 transfer0(const Point2& p1, const Point2& p2) { return FundamentalMatrix::transfer(F01.matrix(), p1, F20.matrix().transpose(), p2); } - /// Specialization for transferring a point from camera 0,2 to camera 1. - template <> - Point2 transfer<1>(const Point2& p0, const Point2& p2) { + /// Transfers a point from camera 0,2 to camera 1. + Point2 transfer1(const Point2& p0, const Point2& p2) { return FundamentalMatrix::transfer(F01.matrix().transpose(), p0, F12.matrix(), p2); } - /// Specialization for transferring a point from camera 0,1 to camera 2. - template <> - Point2 transfer<2>(const Point2& p0, const Point2& p1) { + /// Transfers a point from camera 0,1 to camera 2. + Point2 transfer2(const Point2& p0, const Point2& p1) { return FundamentalMatrix::transfer(F01.matrix(), p0, F12.matrix().transpose(), p1); } diff --git a/gtsam/geometry/tests/testFundamentalMatrix.cpp b/gtsam/geometry/tests/testFundamentalMatrix.cpp index f5a578911..760b3b99c 100644 --- a/gtsam/geometry/tests/testFundamentalMatrix.cpp +++ b/gtsam/geometry/tests/testFundamentalMatrix.cpp @@ -222,9 +222,9 @@ TEST(TripleF, Transfer) { } // Check that transfer works - EXPECT(assert_equal(p[0], triplet.transfer<0>(p[1], p[2]), 1e-9)); - EXPECT(assert_equal(p[1], triplet.transfer<1>(p[0], p[2]), 1e-9)); - EXPECT(assert_equal(p[2], triplet.transfer<2>(p[0], p[1]), 1e-9)); + EXPECT(assert_equal(p[0], triplet.transfer0(p[1], p[2]), 1e-9)); + EXPECT(assert_equal(p[1], triplet.transfer1(p[0], p[2]), 1e-9)); + EXPECT(assert_equal(p[2], triplet.transfer2(p[0], p[1]), 1e-9)); } //************************************************************************* From 82224a611bf9082c8c278bfdf75f2f484943fbfd Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 23 Oct 2024 23:36:10 -0700 Subject: [PATCH 172/204] cpp file --- gtsam/geometry/FundamentalMatrix.cpp | 133 +++++++++++++++++++++++++++ gtsam/geometry/FundamentalMatrix.h | 126 ++++--------------------- 2 files changed, 149 insertions(+), 110 deletions(-) create mode 100644 gtsam/geometry/FundamentalMatrix.cpp diff --git a/gtsam/geometry/FundamentalMatrix.cpp b/gtsam/geometry/FundamentalMatrix.cpp new file mode 100644 index 000000000..c25082fc0 --- /dev/null +++ b/gtsam/geometry/FundamentalMatrix.cpp @@ -0,0 +1,133 @@ +/* + * @file FundamentalMatrix.cpp + * @brief FundamentalMatrix classes + * @author Frank Dellaert + * @date Oct 23, 2024 + */ + +#include + +namespace gtsam { + +GeneralFundamentalMatrix::GeneralFundamentalMatrix(const Matrix3& F) { + // Perform SVD + Eigen::JacobiSVD svd(F, Eigen::ComputeFullU | Eigen::ComputeFullV); + + // Extract U and V + Matrix3 U = svd.matrixU(); + Matrix3 V = svd.matrixV(); + Vector3 singularValues = svd.singularValues(); + + // Scale the singular values + double scale = singularValues(0); + if (scale != 0) { + singularValues /= scale; // Normalize the first singular value to 1.0 + } + + // Check if the third singular value is close to zero (valid F condition) + if (std::abs(singularValues(2)) > 1e-9) { + throw std::invalid_argument( + "The input matrix does not represent a valid fundamental matrix."); + } + + // Ensure the second singular value is recorded as s + s_ = singularValues(1); + + // Check if U is a reflection + if (U.determinant() < 0) { + U = -U; + s_ = -s_; // Change sign of scalar if U is a reflection + } + + // Check if V is a reflection + if (V.determinant() < 0) { + V = -V; + s_ = -s_; // Change sign of scalar if U is a reflection + } + + // Assign the rotations + U_ = Rot3(U); + V_ = Rot3(V); +} + +Matrix3 GeneralFundamentalMatrix::matrix() const { + return U_.matrix() * Vector3(1, s_, 0).asDiagonal() * V_.transpose().matrix(); +} + +void GeneralFundamentalMatrix::print(const std::string& s) const { + std::cout << s << "U:\n" + << U_.matrix() << "\ns: " << s_ << "\nV:\n" + << V_.matrix() << std::endl; +} + +bool GeneralFundamentalMatrix::equals(const GeneralFundamentalMatrix& other, + double tol) const { + return U_.equals(other.U_, tol) && std::abs(s_ - other.s_) < tol && + V_.equals(other.V_, tol); +} + +Vector GeneralFundamentalMatrix::localCoordinates( + const GeneralFundamentalMatrix& F) const { + Vector result(7); + result.head<3>() = U_.localCoordinates(F.U_); + result(3) = F.s_ - s_; // Difference in scalar + result.tail<3>() = V_.localCoordinates(F.V_); + return result; +} + +GeneralFundamentalMatrix GeneralFundamentalMatrix::retract( + const Vector& delta) const { + Rot3 newU = U_.retract(delta.head<3>()); + double newS = s_ + delta(3); // Update scalar + Rot3 newV = V_.retract(delta.tail<3>()); + return GeneralFundamentalMatrix(newU, newS, newV); +} + +Matrix3 SimpleFundamentalMatrix::leftK() const { + Matrix3 K; + K << fa_, 0, ca_.x(), 0, fa_, ca_.y(), 0, 0, 1; + return K; +} + +Matrix3 SimpleFundamentalMatrix::rightK() const { + Matrix3 K; + K << fb_, 0, cb_.x(), 0, fb_, cb_.y(), 0, 0, 1; + return K; +} + +Matrix3 SimpleFundamentalMatrix::matrix() const { + return leftK().transpose().inverse() * E_.matrix() * rightK().inverse(); +} + +void SimpleFundamentalMatrix::print(const std::string& s) const { + std::cout << s << " E:\n" + << E_.matrix() << "\nfa: " << fa_ << "\nfb: " << fb_ + << "\nca: " << ca_.transpose() << "\ncb: " << cb_.transpose() + << std::endl; +} + +bool SimpleFundamentalMatrix::equals(const SimpleFundamentalMatrix& other, + double tol) const { + return E_.equals(other.E_, tol) && std::abs(fa_ - other.fa_) < tol && + std::abs(fb_ - other.fb_) < tol && (ca_ - other.ca_).norm() < tol && + (cb_ - other.cb_).norm() < tol; +} + +Vector SimpleFundamentalMatrix::localCoordinates( + const SimpleFundamentalMatrix& F) const { + Vector result(7); + result.head<5>() = E_.localCoordinates(F.E_); + result(5) = F.fa_ - fa_; // Difference in fa + result(6) = F.fb_ - fb_; // Difference in fb + return result; +} + +SimpleFundamentalMatrix SimpleFundamentalMatrix::retract( + const Vector& delta) const { + EssentialMatrix newE = E_.retract(delta.head<5>()); + double newFa = fa_ + delta(5); // Update fa + double newFb = fb_ + delta(6); // Update fb + return SimpleFundamentalMatrix(newE, newFa, newFb, ca_, cb_); +} + +} // namespace gtsam diff --git a/gtsam/geometry/FundamentalMatrix.h b/gtsam/geometry/FundamentalMatrix.h index 8e952c214..322d47193 100644 --- a/gtsam/geometry/FundamentalMatrix.h +++ b/gtsam/geometry/FundamentalMatrix.h @@ -121,70 +121,19 @@ class GeneralFundamentalMatrix : public FundamentalMatrix { * * @param F A 3x3 matrix representing the fundamental matrix */ - GeneralFundamentalMatrix(const Matrix3& F) { - // Perform SVD - Eigen::JacobiSVD svd(F, Eigen::ComputeFullU | Eigen::ComputeFullV); - - // Extract U and V - Matrix3 U = svd.matrixU(); - Matrix3 V = svd.matrixV(); - Vector3 singularValues = svd.singularValues(); - - // Scale the singular values - double scale = singularValues(0); - if (scale != 0) { - singularValues /= scale; // Normalize the first singular value to 1.0 - } - - // Check if the third singular value is close to zero (valid F condition) - if (std::abs(singularValues(2)) > 1e-9) { - throw std::invalid_argument( - "The input matrix does not represent a valid fundamental matrix."); - } - - // Ensure the second singular value is recorded as s - s_ = singularValues(1); - - // Check if U is a reflection - if (U.determinant() < 0) { - U = -U; - s_ = -s_; // Change sign of scalar if U is a reflection - } - - // Check if V is a reflection - if (V.determinant() < 0) { - V = -V; - s_ = -s_; // Change sign of scalar if U is a reflection - } - - // Assign the rotations - U_ = Rot3(U); - V_ = Rot3(V); - } + GeneralFundamentalMatrix(const Matrix3& F); /// Return the fundamental matrix representation - Matrix3 matrix() const override { - return U_.matrix() * Vector3(1, s_, 0).asDiagonal() * - V_.transpose().matrix(); - } + Matrix3 matrix() const override; /// @name Testable /// @{ - /// Print the GeneralFundamentalMatrix - void print(const std::string& s = "") const { - std::cout << s << "U:\n" - << U_.matrix() << "\ns: " << s_ << "\nV:\n" - << V_.matrix() << std::endl; - } + void print(const std::string& s = "") const; /// Check if the GeneralFundamentalMatrix is equal to another within a /// tolerance - bool equals(const GeneralFundamentalMatrix& other, double tol = 1e-9) const { - return U_.equals(other.U_, tol) && std::abs(s_ - other.s_) < tol && - V_.equals(other.V_, tol); - } - + bool equals(const GeneralFundamentalMatrix& other, double tol = 1e-9) const; /// @} /// @name Manifold @@ -194,22 +143,10 @@ class GeneralFundamentalMatrix : public FundamentalMatrix { inline size_t dim() const { return dimension; } /// Return local coordinates with respect to another GeneralFundamentalMatrix - Vector localCoordinates(const GeneralFundamentalMatrix& F) const { - Vector result(7); - result.head<3>() = U_.localCoordinates(F.U_); - result(3) = F.s_ - s_; // Difference in scalar - result.tail<3>() = V_.localCoordinates(F.V_); - return result; - } + Vector localCoordinates(const GeneralFundamentalMatrix& F) const; /// Retract the given vector to get a new GeneralFundamentalMatrix - GeneralFundamentalMatrix retract(const Vector& delta) const { - Rot3 newU = U_.retract(delta.head<3>()); - double newS = s_ + delta(3); // Update scalar - Rot3 newV = V_.retract(delta.tail<3>()); - return GeneralFundamentalMatrix(newU, newS, newV); - } - + GeneralFundamentalMatrix retract(const Vector& delta) const; /// @} }; @@ -221,7 +158,7 @@ class GeneralFundamentalMatrix : public FundamentalMatrix { * parameterization of the essential matrix and focal lengths for left and right * cameras. Principal points are not part of the manifold but a convenience. */ -class SimpleFundamentalMatrix : FundamentalMatrix { +class SimpleFundamentalMatrix : public FundamentalMatrix { private: EssentialMatrix E_; ///< Essential matrix double fa_; ///< Focal length for left camera @@ -242,40 +179,21 @@ class SimpleFundamentalMatrix : FundamentalMatrix { : E_(E), fa_(fa), fb_(fb), ca_(ca), cb_(cb) {} /// Return the left calibration matrix - Matrix3 leftK() const { - Matrix3 K; - K << fa_, 0, ca_.x(), 0, fa_, ca_.y(), 0, 0, 1; - return K; - } + Matrix3 leftK() const; /// Return the right calibration matrix - Matrix3 rightK() const { - Matrix3 K; - K << fb_, 0, cb_.x(), 0, fb_, cb_.y(), 0, 0, 1; - return K; - } + Matrix3 rightK() const; /// Return the fundamental matrix representation - Matrix3 matrix() const override { - return leftK().transpose().inverse() * E_.matrix() * rightK().inverse(); - } + Matrix3 matrix() const override; + /// @name Testable /// @{ - /// Print the SimpleFundamentalMatrix - void print(const std::string& s = "") const { - std::cout << s << " E:\n" - << E_.matrix() << "\nfa: " << fa_ << "\nfb: " << fb_ - << "\nca: " << ca_.transpose() << "\ncb: " << cb_.transpose() - << std::endl; - } + void print(const std::string& s = "") const; /// Check equality within a tolerance - bool equals(const SimpleFundamentalMatrix& other, double tol = 1e-9) const { - return E_.equals(other.E_, tol) && std::abs(fa_ - other.fa_) < tol && - std::abs(fb_ - other.fb_) < tol && (ca_ - other.ca_).norm() < tol && - (cb_ - other.cb_).norm() < tol; - } + bool equals(const SimpleFundamentalMatrix& other, double tol = 1e-9) const; /// @} /// @name Manifold @@ -284,23 +202,11 @@ class SimpleFundamentalMatrix : FundamentalMatrix { inline static size_t Dim() { return dimension; } inline size_t dim() const { return dimension; } - /// Return local coordinates with respect to another - /// SimpleFundamentalMatrix - Vector localCoordinates(const SimpleFundamentalMatrix& F) const { - Vector result(7); - result.head<5>() = E_.localCoordinates(F.E_); - result(5) = F.fa_ - fa_; // Difference in fa - result(6) = F.fb_ - fb_; // Difference in fb - return result; - } + /// Return local coordinates with respect to another SimpleFundamentalMatrix + Vector localCoordinates(const SimpleFundamentalMatrix& F) const; /// Retract the given vector to get a new SimpleFundamentalMatrix - SimpleFundamentalMatrix retract(const Vector& delta) const { - EssentialMatrix newE = E_.retract(delta.head<5>()); - double newFa = fa_ + delta(5); // Update fa - double newFb = fb_ + delta(6); // Update fb - return SimpleFundamentalMatrix(newE, newFa, newFb, ca_, cb_); - } + SimpleFundamentalMatrix retract(const Vector& delta) const; /// @} }; From 0431299df6ebd50c92b3440f468cced2b0435ea4 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Wed, 23 Oct 2024 23:59:32 -0700 Subject: [PATCH 173/204] Move transfer to cpp --- gtsam/geometry/FundamentalMatrix.cpp | 20 ++++++++++++++++++++ gtsam/geometry/FundamentalMatrix.h | 14 +------------- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/gtsam/geometry/FundamentalMatrix.cpp b/gtsam/geometry/FundamentalMatrix.cpp index c25082fc0..e56d9feb3 100644 --- a/gtsam/geometry/FundamentalMatrix.cpp +++ b/gtsam/geometry/FundamentalMatrix.cpp @@ -9,6 +9,23 @@ namespace gtsam { +//************************************************************************* +Point2 FundamentalMatrix::transfer(const Matrix3& F01, const Point2& p1, + const Matrix3& F02, const Point2& p2) { + // Create lines in camera 0 from projections of the other two cameras + Vector3 line1 = F01 * Vector3(p1.x(), p1.y(), 1); + Vector3 line2 = F02 * Vector3(p2.x(), p2.y(), 1); + + // Cross the lines to find the intersection point + Vector3 intersectionPoint = line1.cross(line2); + + // Normalize the intersection point + intersectionPoint /= intersectionPoint(2); + + return intersectionPoint.head<2>(); // Return the 2D point +} + +//************************************************************************* GeneralFundamentalMatrix::GeneralFundamentalMatrix(const Matrix3& F) { // Perform SVD Eigen::JacobiSVD svd(F, Eigen::ComputeFullU | Eigen::ComputeFullV); @@ -83,6 +100,7 @@ GeneralFundamentalMatrix GeneralFundamentalMatrix::retract( return GeneralFundamentalMatrix(newU, newS, newV); } +//************************************************************************* Matrix3 SimpleFundamentalMatrix::leftK() const { Matrix3 K; K << fa_, 0, ca_.x(), 0, fa_, ca_.y(), 0, 0, 1; @@ -130,4 +148,6 @@ SimpleFundamentalMatrix SimpleFundamentalMatrix::retract( return SimpleFundamentalMatrix(newE, newFa, newFb, ca_, cb_); } +//************************************************************************* + } // namespace gtsam diff --git a/gtsam/geometry/FundamentalMatrix.h b/gtsam/geometry/FundamentalMatrix.h index 322d47193..83277d27e 100644 --- a/gtsam/geometry/FundamentalMatrix.h +++ b/gtsam/geometry/FundamentalMatrix.h @@ -42,19 +42,7 @@ class FundamentalMatrix { * returns the 2D point in camera 0 where the epipolar lines intersect. */ static Point2 transfer(const Matrix3& F01, const Point2& p1, - const Matrix3& F02, const Point2& p2) { - // Create lines in camera 0 from projections of the other two cameras - Vector3 line1 = F01 * Vector3(p1.x(), p1.y(), 1); - Vector3 line2 = F02 * Vector3(p2.x(), p2.y(), 1); - - // Cross the lines to find the intersection point - Vector3 intersectionPoint = line1.cross(line2); - - // Normalize the intersection point - intersectionPoint /= intersectionPoint(2); - - return intersectionPoint.head<2>(); // Return the 2D point - } + const Matrix3& F02, const Point2& p2); }; /// Represents a set of three fundamental matrices for transferring points From dc23c02be80a744a590242809cc1438010a35d83 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 00:41:17 -0700 Subject: [PATCH 174/204] Fix transfer2, add export --- gtsam/geometry/FundamentalMatrix.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gtsam/geometry/FundamentalMatrix.h b/gtsam/geometry/FundamentalMatrix.h index 83277d27e..bf89a4bb8 100644 --- a/gtsam/geometry/FundamentalMatrix.h +++ b/gtsam/geometry/FundamentalMatrix.h @@ -21,7 +21,7 @@ namespace gtsam { * classes. The `matrix()` function returns a 3x3 matrix representation of the * fundamental matrix. */ -class FundamentalMatrix { +class GTSAM_EXPORT FundamentalMatrix { public: /** * @brief Returns a 3x3 matrix representation of the fundamental matrix @@ -65,7 +65,7 @@ struct TripleF { /// Transfers a point from camera 0,1 to camera 2. Point2 transfer2(const Point2& p0, const Point2& p1) { - return FundamentalMatrix::transfer(F01.matrix(), p0, + return FundamentalMatrix::transfer(F20.matrix(), p0, F12.matrix().transpose(), p1); } }; @@ -78,7 +78,7 @@ struct TripleF { * that describes the relationship between two images. It is parameterized by a * left rotation U, a scalar s, and a right rotation V. */ -class GeneralFundamentalMatrix : public FundamentalMatrix { +class GTSAM_EXPORT GeneralFundamentalMatrix : public FundamentalMatrix { private: Rot3 U_; ///< Left rotation double s_; ///< Scalar parameter for S @@ -146,7 +146,7 @@ class GeneralFundamentalMatrix : public FundamentalMatrix { * parameterization of the essential matrix and focal lengths for left and right * cameras. Principal points are not part of the manifold but a convenience. */ -class SimpleFundamentalMatrix : public FundamentalMatrix { +class GTSAM_EXPORT SimpleFundamentalMatrix : public FundamentalMatrix { private: EssentialMatrix E_; ///< Essential matrix double fa_; ///< Focal length for left camera From 6b7222ea992e82765249590a6dbfd81a7cd2ba9d Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 24 Oct 2024 17:42:29 -0400 Subject: [PATCH 175/204] remove reference for shared_ptr --- gtsam/linear/PCGSolver.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/linear/PCGSolver.h b/gtsam/linear/PCGSolver.h index 17cc2d3db..42bb3fccc 100644 --- a/gtsam/linear/PCGSolver.h +++ b/gtsam/linear/PCGSolver.h @@ -42,7 +42,7 @@ struct GTSAM_EXPORT PCGSolverParameters : public ConjugateGradientParameters { PCGSolverParameters() {} PCGSolverParameters( - const std::shared_ptr &preconditioner) + const std::shared_ptr preconditioner) : preconditioner(preconditioner) {} void print(std::ostream &os) const override; From 2258c6431ec8f2369c5fc600a7f729719028568e Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 24 Oct 2024 17:42:50 -0400 Subject: [PATCH 176/204] fix wrapping for matlab --- gtsam/linear/linear.i | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gtsam/linear/linear.i b/gtsam/linear/linear.i index eecc0192c..6b454af05 100644 --- a/gtsam/linear/linear.i +++ b/gtsam/linear/linear.i @@ -733,10 +733,10 @@ virtual class BlockJacobiPreconditionerParameters : gtsam::PreconditionerParamet #include virtual class PCGSolverParameters : gtsam::ConjugateGradientParameters { PCGSolverParameters(); - PCGSolverParameters(gtsam::PreconditionerParameters* preconditioner); + PCGSolverParameters(const gtsam::PreconditionerParameters* preconditioner); void print(string s = ""); - - gtsam::PreconditionerParameters* preconditioner; + + std::shared_ptr preconditioner; }; #include From 328ee4d150aa235937a28d92f1dbf178b90d0b13 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 24 Oct 2024 17:43:00 -0400 Subject: [PATCH 177/204] update issue template --- .github/ISSUE_TEMPLATE/bug-report.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index 9f15b2b7c..d7a35b124 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -3,12 +3,12 @@ name: "Bug Report" about: Submit a bug report to help us improve GTSAM --- + + - - ## Description From 23d28b64dc8f0f4c66022fff06adc2c6a433c6a1 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Thu, 24 Oct 2024 17:43:08 -0400 Subject: [PATCH 178/204] make executable --- containers/hub_push.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 containers/hub_push.sh diff --git a/containers/hub_push.sh b/containers/hub_push.sh old mode 100644 new mode 100755 From 5b413b84c95f34313f3331f04db6881f97fd5732 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 15:13:45 -0700 Subject: [PATCH 179/204] Remove inheritance, use abc language --- gtsam/geometry/FundamentalMatrix.cpp | 13 ++- gtsam/geometry/FundamentalMatrix.h | 96 +++++++------------ .../geometry/tests/testFundamentalMatrix.cpp | 13 ++- 3 files changed, 47 insertions(+), 75 deletions(-) diff --git a/gtsam/geometry/FundamentalMatrix.cpp b/gtsam/geometry/FundamentalMatrix.cpp index e56d9feb3..b16a3fef3 100644 --- a/gtsam/geometry/FundamentalMatrix.cpp +++ b/gtsam/geometry/FundamentalMatrix.cpp @@ -10,21 +10,20 @@ namespace gtsam { //************************************************************************* -Point2 FundamentalMatrix::transfer(const Matrix3& F01, const Point2& p1, - const Matrix3& F02, const Point2& p2) { - // Create lines in camera 0 from projections of the other two cameras - Vector3 line1 = F01 * Vector3(p1.x(), p1.y(), 1); - Vector3 line2 = F02 * Vector3(p2.x(), p2.y(), 1); +Point2 Transfer(const Matrix3& Fca, const Point2& pa, // + const Matrix3& Fcb, const Point2& pb) { + // Create lines in camera a from projections of the other two cameras + Vector3 line_a = Fca * Vector3(pa.x(), pa.y(), 1); + Vector3 line_b = Fcb * Vector3(pb.x(), pb.y(), 1); // Cross the lines to find the intersection point - Vector3 intersectionPoint = line1.cross(line2); + Vector3 intersectionPoint = line_a.cross(line_b); // Normalize the intersection point intersectionPoint /= intersectionPoint(2); return intersectionPoint.head<2>(); // Return the 2D point } - //************************************************************************* GeneralFundamentalMatrix::GeneralFundamentalMatrix(const Matrix3& F) { // Perform SVD diff --git a/gtsam/geometry/FundamentalMatrix.h b/gtsam/geometry/FundamentalMatrix.h index bf89a4bb8..9eba076a8 100644 --- a/gtsam/geometry/FundamentalMatrix.h +++ b/gtsam/geometry/FundamentalMatrix.h @@ -13,63 +13,6 @@ namespace gtsam { -/** - * @brief Abstract base class for FundamentalMatrix - * - * This class provides a common interface for all types of fundamental matrices. - * It declares a virtual function `matrix()` that must be implemented by derived - * classes. The `matrix()` function returns a 3x3 matrix representation of the - * fundamental matrix. - */ -class GTSAM_EXPORT FundamentalMatrix { - public: - /** - * @brief Returns a 3x3 matrix representation of the fundamental matrix - * - * @return A 3x3 matrix representing the fundamental matrix - */ - virtual Matrix3 matrix() const = 0; - - /** - * @brief Virtual destructor to ensure proper cleanup of derived classes - */ - virtual ~FundamentalMatrix() {} - - /** - * @brief Transfer projections from cameras 1 and 2 to camera 0 - * - * Take two fundamental matrices F01 and F02, and two points p1 and p2, and - * returns the 2D point in camera 0 where the epipolar lines intersect. - */ - static Point2 transfer(const Matrix3& F01, const Point2& p1, - const Matrix3& F02, const Point2& p2); -}; - -/// Represents a set of three fundamental matrices for transferring points -/// between three cameras. -template -struct TripleF { - F F01, F12, F20; - - /// Transfers a point from cameras 1,2 to camera 0. - Point2 transfer0(const Point2& p1, const Point2& p2) { - return FundamentalMatrix::transfer(F01.matrix(), p1, - F20.matrix().transpose(), p2); - } - - /// Transfers a point from camera 0,2 to camera 1. - Point2 transfer1(const Point2& p0, const Point2& p2) { - return FundamentalMatrix::transfer(F01.matrix().transpose(), p0, - F12.matrix(), p2); - } - - /// Transfers a point from camera 0,1 to camera 2. - Point2 transfer2(const Point2& p0, const Point2& p1) { - return FundamentalMatrix::transfer(F20.matrix(), p0, - F12.matrix().transpose(), p1); - } -}; - /** * @class GeneralFundamentalMatrix * @brief Represents a general fundamental matrix. @@ -78,7 +21,7 @@ struct TripleF { * that describes the relationship between two images. It is parameterized by a * left rotation U, a scalar s, and a right rotation V. */ -class GTSAM_EXPORT GeneralFundamentalMatrix : public FundamentalMatrix { +class GTSAM_EXPORT GeneralFundamentalMatrix { private: Rot3 U_; ///< Left rotation double s_; ///< Scalar parameter for S @@ -112,7 +55,7 @@ class GTSAM_EXPORT GeneralFundamentalMatrix : public FundamentalMatrix { GeneralFundamentalMatrix(const Matrix3& F); /// Return the fundamental matrix representation - Matrix3 matrix() const override; + Matrix3 matrix() const; /// @name Testable /// @{ @@ -146,7 +89,7 @@ class GTSAM_EXPORT GeneralFundamentalMatrix : public FundamentalMatrix { * parameterization of the essential matrix and focal lengths for left and right * cameras. Principal points are not part of the manifold but a convenience. */ -class GTSAM_EXPORT SimpleFundamentalMatrix : public FundamentalMatrix { +class GTSAM_EXPORT SimpleFundamentalMatrix { private: EssentialMatrix E_; ///< Essential matrix double fa_; ///< Focal length for left camera @@ -173,7 +116,7 @@ class GTSAM_EXPORT SimpleFundamentalMatrix : public FundamentalMatrix { Matrix3 rightK() const; /// Return the fundamental matrix representation - Matrix3 matrix() const override; + Matrix3 matrix() const; /// @name Testable /// @{ @@ -198,6 +141,37 @@ class GTSAM_EXPORT SimpleFundamentalMatrix : public FundamentalMatrix { /// @} }; +/** + * @brief Transfer projections from cameras a and b to camera c + * + * Take two fundamental matrices Fca and Fcb, and two points pa and pb, and + * returns the 2D point in view (c) where the epipolar lines intersect. + */ +Point2 Transfer(const Matrix3& Fca, const Point2& pa, const Matrix3& Fcb, + const Point2& pb); + +/// Represents a set of three fundamental matrices for transferring points +/// between three cameras. +template +struct TripleF { + F Fab, Fbc, Fca; + + /// Transfers a point from cameras b,c to camera a. + Point2 transferToA(const Point2& pb, const Point2& pc) { + return Transfer(Fab.matrix(), pb, Fca.matrix().transpose(), pc); + } + + /// Transfers a point from camera a,c to camera b. + Point2 transferToB(const Point2& pa, const Point2& pc) { + return Transfer(Fab.matrix().transpose(), pa, Fbc.matrix(), pc); + } + + /// Transfers a point from cameras a,b to camera c. + Point2 transferToC(const Point2& pa, const Point2& pb) { + return Transfer(Fca.matrix(), pa, Fbc.matrix().transpose(), pb); + } +}; + template <> struct traits : public internal::Manifold {}; diff --git a/gtsam/geometry/tests/testFundamentalMatrix.cpp b/gtsam/geometry/tests/testFundamentalMatrix.cpp index 760b3b99c..028db2c1f 100644 --- a/gtsam/geometry/tests/testFundamentalMatrix.cpp +++ b/gtsam/geometry/tests/testFundamentalMatrix.cpp @@ -7,7 +7,6 @@ #include #include -#include #include #include #include @@ -205,9 +204,9 @@ TEST(TripleF, Transfer) { auto triplet = generateTripleF(cameraPoses); // Check that they are all equal - EXPECT(triplet.F01.equals(triplet.F12, 1e-9)); - EXPECT(triplet.F12.equals(triplet.F20, 1e-9)); - EXPECT(triplet.F20.equals(triplet.F01, 1e-9)); + EXPECT(triplet.Fab.equals(triplet.Fbc, 1e-9)); + EXPECT(triplet.Fbc.equals(triplet.Fca, 1e-9)); + EXPECT(triplet.Fca.equals(triplet.Fab, 1e-9)); // Now project a point into the three cameras const Point3 P(0.1, 0.2, 0.3); @@ -222,9 +221,9 @@ TEST(TripleF, Transfer) { } // Check that transfer works - EXPECT(assert_equal(p[0], triplet.transfer0(p[1], p[2]), 1e-9)); - EXPECT(assert_equal(p[1], triplet.transfer1(p[0], p[2]), 1e-9)); - EXPECT(assert_equal(p[2], triplet.transfer2(p[0], p[1]), 1e-9)); + EXPECT(assert_equal(p[0], triplet.transferToA(p[1], p[2]), 1e-9)); + EXPECT(assert_equal(p[1], triplet.transferToB(p[0], p[2]), 1e-9)); + EXPECT(assert_equal(p[2], triplet.transferToC(p[0], p[1]), 1e-9)); } //************************************************************************* From 9773fbc6294c2c2d8e9026668858b10311333603 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 17:05:31 -0700 Subject: [PATCH 180/204] Add missing export --- gtsam/geometry/FundamentalMatrix.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gtsam/geometry/FundamentalMatrix.h b/gtsam/geometry/FundamentalMatrix.h index 9eba076a8..ff7f4c837 100644 --- a/gtsam/geometry/FundamentalMatrix.h +++ b/gtsam/geometry/FundamentalMatrix.h @@ -147,8 +147,8 @@ class GTSAM_EXPORT SimpleFundamentalMatrix { * Take two fundamental matrices Fca and Fcb, and two points pa and pb, and * returns the 2D point in view (c) where the epipolar lines intersect. */ -Point2 Transfer(const Matrix3& Fca, const Point2& pa, const Matrix3& Fcb, - const Point2& pb); +GTSAM_EXPORT Point2 Transfer(const Matrix3& Fca, const Point2& pa, + const Matrix3& Fcb, const Point2& pb); /// Represents a set of three fundamental matrices for transferring points /// between three cameras. From 502dcc480bff63ec1bd8bb561c7e65b021c1a800 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 17:30:30 -0700 Subject: [PATCH 181/204] Just FundamentalMatrix --- gtsam/geometry/FundamentalMatrix.cpp | 18 +++++------ gtsam/geometry/FundamentalMatrix.h | 32 +++++++++---------- .../geometry/tests/testFundamentalMatrix.cpp | 24 +++++++------- 3 files changed, 36 insertions(+), 38 deletions(-) diff --git a/gtsam/geometry/FundamentalMatrix.cpp b/gtsam/geometry/FundamentalMatrix.cpp index b16a3fef3..71ca47418 100644 --- a/gtsam/geometry/FundamentalMatrix.cpp +++ b/gtsam/geometry/FundamentalMatrix.cpp @@ -25,7 +25,7 @@ Point2 Transfer(const Matrix3& Fca, const Point2& pa, // return intersectionPoint.head<2>(); // Return the 2D point } //************************************************************************* -GeneralFundamentalMatrix::GeneralFundamentalMatrix(const Matrix3& F) { +FundamentalMatrix::FundamentalMatrix(const Matrix3& F) { // Perform SVD Eigen::JacobiSVD svd(F, Eigen::ComputeFullU | Eigen::ComputeFullV); @@ -66,24 +66,23 @@ GeneralFundamentalMatrix::GeneralFundamentalMatrix(const Matrix3& F) { V_ = Rot3(V); } -Matrix3 GeneralFundamentalMatrix::matrix() const { +Matrix3 FundamentalMatrix::matrix() const { return U_.matrix() * Vector3(1, s_, 0).asDiagonal() * V_.transpose().matrix(); } -void GeneralFundamentalMatrix::print(const std::string& s) const { +void FundamentalMatrix::print(const std::string& s) const { std::cout << s << "U:\n" << U_.matrix() << "\ns: " << s_ << "\nV:\n" << V_.matrix() << std::endl; } -bool GeneralFundamentalMatrix::equals(const GeneralFundamentalMatrix& other, - double tol) const { +bool FundamentalMatrix::equals(const FundamentalMatrix& other, + double tol) const { return U_.equals(other.U_, tol) && std::abs(s_ - other.s_) < tol && V_.equals(other.V_, tol); } -Vector GeneralFundamentalMatrix::localCoordinates( - const GeneralFundamentalMatrix& F) const { +Vector FundamentalMatrix::localCoordinates(const FundamentalMatrix& F) const { Vector result(7); result.head<3>() = U_.localCoordinates(F.U_); result(3) = F.s_ - s_; // Difference in scalar @@ -91,12 +90,11 @@ Vector GeneralFundamentalMatrix::localCoordinates( return result; } -GeneralFundamentalMatrix GeneralFundamentalMatrix::retract( - const Vector& delta) const { +FundamentalMatrix FundamentalMatrix::retract(const Vector& delta) const { Rot3 newU = U_.retract(delta.head<3>()); double newS = s_ + delta(3); // Update scalar Rot3 newV = V_.retract(delta.tail<3>()); - return GeneralFundamentalMatrix(newU, newS, newV); + return FundamentalMatrix(newU, newS, newV); } //************************************************************************* diff --git a/gtsam/geometry/FundamentalMatrix.h b/gtsam/geometry/FundamentalMatrix.h index ff7f4c837..718364ca0 100644 --- a/gtsam/geometry/FundamentalMatrix.h +++ b/gtsam/geometry/FundamentalMatrix.h @@ -14,14 +14,14 @@ namespace gtsam { /** - * @class GeneralFundamentalMatrix + * @class FundamentalMatrix * @brief Represents a general fundamental matrix. * * This class represents a general fundamental matrix, which is a 3x3 matrix * that describes the relationship between two images. It is parameterized by a * left rotation U, a scalar s, and a right rotation V. */ -class GTSAM_EXPORT GeneralFundamentalMatrix { +class GTSAM_EXPORT FundamentalMatrix { private: Rot3 U_; ///< Left rotation double s_; ///< Scalar parameter for S @@ -29,42 +29,42 @@ class GTSAM_EXPORT GeneralFundamentalMatrix { public: /// Default constructor - GeneralFundamentalMatrix() : U_(Rot3()), s_(1.0), V_(Rot3()) {} + FundamentalMatrix() : U_(Rot3()), s_(1.0), V_(Rot3()) {} /** * @brief Construct from U, V, and scalar s * - * Initializes the GeneralFundamentalMatrix with the given left rotation U, + * Initializes the FundamentalMatrix with the given left rotation U, * scalar s, and right rotation V. * * @param U Left rotation matrix * @param s Scalar parameter for the fundamental matrix * @param V Right rotation matrix */ - GeneralFundamentalMatrix(const Rot3& U, double s, const Rot3& V) + FundamentalMatrix(const Rot3& U, double s, const Rot3& V) : U_(U), s_(s), V_(V) {} /** * @brief Construct from a 3x3 matrix using SVD * - * Initializes the GeneralFundamentalMatrix by performing SVD on the given + * Initializes the FundamentalMatrix by performing SVD on the given * matrix and ensuring U and V are not reflections. * * @param F A 3x3 matrix representing the fundamental matrix */ - GeneralFundamentalMatrix(const Matrix3& F); + FundamentalMatrix(const Matrix3& F); /// Return the fundamental matrix representation Matrix3 matrix() const; /// @name Testable /// @{ - /// Print the GeneralFundamentalMatrix + /// Print the FundamentalMatrix void print(const std::string& s = "") const; - /// Check if the GeneralFundamentalMatrix is equal to another within a + /// Check if the FundamentalMatrix is equal to another within a /// tolerance - bool equals(const GeneralFundamentalMatrix& other, double tol = 1e-9) const; + bool equals(const FundamentalMatrix& other, double tol = 1e-9) const; /// @} /// @name Manifold @@ -73,11 +73,11 @@ class GTSAM_EXPORT GeneralFundamentalMatrix { inline static size_t Dim() { return dimension; } inline size_t dim() const { return dimension; } - /// Return local coordinates with respect to another GeneralFundamentalMatrix - Vector localCoordinates(const GeneralFundamentalMatrix& F) const; + /// Return local coordinates with respect to another FundamentalMatrix + Vector localCoordinates(const FundamentalMatrix& F) const; - /// Retract the given vector to get a new GeneralFundamentalMatrix - GeneralFundamentalMatrix retract(const Vector& delta) const; + /// Retract the given vector to get a new FundamentalMatrix + FundamentalMatrix retract(const Vector& delta) const; /// @} }; @@ -173,8 +173,8 @@ struct TripleF { }; template <> -struct traits - : public internal::Manifold {}; +struct traits + : public internal::Manifold {}; template <> struct traits diff --git a/gtsam/geometry/tests/testFundamentalMatrix.cpp b/gtsam/geometry/tests/testFundamentalMatrix.cpp index 028db2c1f..a8884e791 100644 --- a/gtsam/geometry/tests/testFundamentalMatrix.cpp +++ b/gtsam/geometry/tests/testFundamentalMatrix.cpp @@ -16,34 +16,34 @@ using namespace std::placeholders; using namespace std; using namespace gtsam; -GTSAM_CONCEPT_TESTABLE_INST(GeneralFundamentalMatrix) -GTSAM_CONCEPT_MANIFOLD_INST(GeneralFundamentalMatrix) +GTSAM_CONCEPT_TESTABLE_INST(FundamentalMatrix) +GTSAM_CONCEPT_MANIFOLD_INST(FundamentalMatrix) //************************************************************************* // Create two rotations and corresponding fundamental matrix F Rot3 trueU = Rot3::Yaw(M_PI_2); Rot3 trueV = Rot3::Yaw(M_PI_4); double trueS = 0.5; -GeneralFundamentalMatrix trueF(trueU, trueS, trueV); +FundamentalMatrix trueF(trueU, trueS, trueV); //************************************************************************* -TEST(GeneralFundamentalMatrix, localCoordinates) { +TEST(FundamentalMatrix, localCoordinates) { Vector expected = Z_7x1; // Assuming 7 dimensions for U, V, and s Vector actual = trueF.localCoordinates(trueF); EXPECT(assert_equal(expected, actual, 1e-8)); } //************************************************************************* -TEST(GeneralFundamentalMatrix, retract) { - GeneralFundamentalMatrix actual = trueF.retract(Z_7x1); +TEST(FundamentalMatrix, retract) { + FundamentalMatrix actual = trueF.retract(Z_7x1); EXPECT(assert_equal(trueF, actual)); } //************************************************************************* -TEST(GeneralFundamentalMatrix, RoundTrip) { +TEST(FundamentalMatrix, RoundTrip) { Vector7 d; d << 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7; - GeneralFundamentalMatrix hx = trueF.retract(d); + FundamentalMatrix hx = trueF.retract(d); Vector actual = trueF.localCoordinates(hx); EXPECT(assert_equal(d, actual, 1e-8)); } @@ -56,7 +56,7 @@ SimpleFundamentalMatrix stereoF(defaultE, 1.0, 1.0, zero, zero); //************************************************************************* TEST(SimpleStereo, Conversion) { - GeneralFundamentalMatrix convertedF(stereoF.matrix()); + FundamentalMatrix convertedF(stereoF.matrix()); EXPECT(assert_equal(stereoF.matrix(), convertedF.matrix(), 1e-8)); } @@ -103,7 +103,7 @@ SimpleFundamentalMatrix pixelStereo(defaultE, focalLength, focalLength, zero, TEST(PixelStereo, Conversion) { auto expected = pixelStereo.matrix(); - GeneralFundamentalMatrix convertedF(pixelStereo.matrix()); + FundamentalMatrix convertedF(pixelStereo.matrix()); // Check equality of F-matrices up to a scale auto actual = convertedF.matrix(); @@ -132,7 +132,7 @@ SimpleFundamentalMatrix rotatedPixelStereo(rotatedE, focalLength, focalLength, TEST(RotatedPixelStereo, Conversion) { auto expected = rotatedPixelStereo.matrix(); - GeneralFundamentalMatrix convertedF(rotatedPixelStereo.matrix()); + FundamentalMatrix convertedF(rotatedPixelStereo.matrix()); // Check equality of F-matrices up to a scale auto actual = convertedF.matrix(); @@ -161,7 +161,7 @@ SimpleFundamentalMatrix stereoWithPrincipalPoints(rotatedE, focalLength, TEST(stereoWithPrincipalPoints, Conversion) { auto expected = stereoWithPrincipalPoints.matrix(); - GeneralFundamentalMatrix convertedF(stereoWithPrincipalPoints.matrix()); + FundamentalMatrix convertedF(stereoWithPrincipalPoints.matrix()); // Check equality of F-matrices up to a scale auto actual = convertedF.matrix(); From 90ae543c7f3a4091b8c6cb9717afc65f5aa5af15 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Fri, 25 Oct 2024 07:33:24 -0700 Subject: [PATCH 182/204] Address review Comments --- gtsam/geometry/FundamentalMatrix.cpp | 5 +++-- gtsam/geometry/FundamentalMatrix.h | 10 +++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/gtsam/geometry/FundamentalMatrix.cpp b/gtsam/geometry/FundamentalMatrix.cpp index 71ca47418..57d59d0a0 100644 --- a/gtsam/geometry/FundamentalMatrix.cpp +++ b/gtsam/geometry/FundamentalMatrix.cpp @@ -10,8 +10,8 @@ namespace gtsam { //************************************************************************* -Point2 Transfer(const Matrix3& Fca, const Point2& pa, // - const Matrix3& Fcb, const Point2& pb) { +Point2 EpipolarTransfer(const Matrix3& Fca, const Point2& pa, // + const Matrix3& Fcb, const Point2& pb) { // Create lines in camera a from projections of the other two cameras Vector3 line_a = Fca * Vector3(pa.x(), pa.y(), 1); Vector3 line_b = Fcb * Vector3(pb.x(), pb.y(), 1); @@ -24,6 +24,7 @@ Point2 Transfer(const Matrix3& Fca, const Point2& pa, // return intersectionPoint.head<2>(); // Return the 2D point } + //************************************************************************* FundamentalMatrix::FundamentalMatrix(const Matrix3& F) { // Perform SVD diff --git a/gtsam/geometry/FundamentalMatrix.h b/gtsam/geometry/FundamentalMatrix.h index 718364ca0..497f95cf7 100644 --- a/gtsam/geometry/FundamentalMatrix.h +++ b/gtsam/geometry/FundamentalMatrix.h @@ -147,8 +147,8 @@ class GTSAM_EXPORT SimpleFundamentalMatrix { * Take two fundamental matrices Fca and Fcb, and two points pa and pb, and * returns the 2D point in view (c) where the epipolar lines intersect. */ -GTSAM_EXPORT Point2 Transfer(const Matrix3& Fca, const Point2& pa, - const Matrix3& Fcb, const Point2& pb); +GTSAM_EXPORT Point2 EpipolarTransfer(const Matrix3& Fca, const Point2& pa, + const Matrix3& Fcb, const Point2& pb); /// Represents a set of three fundamental matrices for transferring points /// between three cameras. @@ -158,17 +158,17 @@ struct TripleF { /// Transfers a point from cameras b,c to camera a. Point2 transferToA(const Point2& pb, const Point2& pc) { - return Transfer(Fab.matrix(), pb, Fca.matrix().transpose(), pc); + return EpipolarTransfer(Fab.matrix(), pb, Fca.matrix().transpose(), pc); } /// Transfers a point from camera a,c to camera b. Point2 transferToB(const Point2& pa, const Point2& pc) { - return Transfer(Fab.matrix().transpose(), pa, Fbc.matrix(), pc); + return EpipolarTransfer(Fab.matrix().transpose(), pa, Fbc.matrix(), pc); } /// Transfers a point from cameras a,b to camera c. Point2 transferToC(const Point2& pa, const Point2& pb) { - return Transfer(Fca.matrix(), pa, Fbc.matrix().transpose(), pb); + return EpipolarTransfer(Fca.matrix(), pa, Fbc.matrix().transpose(), pb); } }; From 8a0ab6e094ed3be2387f44610b7fca7a4adbf236 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 01:09:01 -0700 Subject: [PATCH 183/204] proto-factor with numerical derivative --- .../geometry/tests/testFundamentalMatrix.cpp | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/gtsam/geometry/tests/testFundamentalMatrix.cpp b/gtsam/geometry/tests/testFundamentalMatrix.cpp index a8884e791..80e6b1d3a 100644 --- a/gtsam/geometry/tests/testFundamentalMatrix.cpp +++ b/gtsam/geometry/tests/testFundamentalMatrix.cpp @@ -197,6 +197,35 @@ TripleF generateTripleF( return {F[0], F[1], F[2]}; // Return a TripleF instance } +//************************************************************************* + +struct TripletFactor { + using F = FundamentalMatrix; + using SF = SimpleFundamentalMatrix; + Point2 p0, p1, p2; + + /// vector of errors returns 6D vector + Vector evaluateError(const SF& F01, const SF& F12, const SF& F20, // + Matrix* H01, Matrix* H12, Matrix* H20) const { + Vector error(6); + std::function fn = [&](const SF& F01, const SF& F12, + const SF& F20) { + Vector6 error; + error << F::transfer(F01.matrix(), p1, F20.matrix().transpose(), p2) - p0, + F::transfer(F01.matrix().transpose(), p0, F12.matrix(), p2) - p1, + F::transfer(F20.matrix(), p0, F12.matrix().transpose(), p1) - p2; + return error; + }; + if (H01) + *H01 = numericalDerivative31(fn, F01, F12, F20); + if (H12) + *H12 = numericalDerivative32(fn, F01, F12, F20); + if (H20) + *H20 = numericalDerivative33(fn, F01, F12, F20); + return fn(F01, F12, F20); + } +}; + //************************************************************************* TEST(TripleF, Transfer) { // Generate cameras on a circle, as well as fundamental matrices From 00fc2ecc2ba88a1aaf65a234c7761ba018ffc77d Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 11:43:40 -0700 Subject: [PATCH 184/204] Slight modernization --- examples/SFMdata.h | 82 ++++++++++++++++++++++++++++------------------ 1 file changed, 50 insertions(+), 32 deletions(-) diff --git a/examples/SFMdata.h b/examples/SFMdata.h index 3031828f1..112bd927c 100644 --- a/examples/SFMdata.h +++ b/examples/SFMdata.h @@ -16,56 +16,74 @@ */ /** - * A structure-from-motion example with landmarks, default function arguments give + * A structure-from-motion example with landmarks, default arguments give: * - The landmarks form a 10 meter cube * - The robot rotates around the landmarks, always facing towards the cube - * Passing function argument allows to specificy an initial position, a pose increment and step count. + * Passing function argument allows to specify an initial position, a pose + * increment and step count. */ #pragma once -// As this is a full 3D problem, we will use Pose3 variables to represent the camera -// positions and Point3 variables (x, y, z) to represent the landmark coordinates. -// Camera observations of landmarks (i.e. pixel coordinates) will be stored as Point2 (x, y). -// We will also need a camera object to hold calibration information and perform projections. -#include +// As this is a full 3D problem, we will use Pose3 variables to represent the +// camera positions and Point3 variables (x, y, z) to represent the landmark +// coordinates. Camera observations of landmarks (i.e. pixel coordinates) will +// be stored as Point2 (x, y). #include +#include -// We will also need a camera object to hold calibration information and perform projections. -#include +// We will also need a camera object to hold calibration information and perform +// projections. #include +#include -/* ************************************************************************* */ -std::vector createPoints() { +namespace gtsam { - // Create the set of ground-truth landmarks - std::vector points; - points.push_back(gtsam::Point3(10.0,10.0,10.0)); - points.push_back(gtsam::Point3(-10.0,10.0,10.0)); - points.push_back(gtsam::Point3(-10.0,-10.0,10.0)); - points.push_back(gtsam::Point3(10.0,-10.0,10.0)); - points.push_back(gtsam::Point3(10.0,10.0,-10.0)); - points.push_back(gtsam::Point3(-10.0,10.0,-10.0)); - points.push_back(gtsam::Point3(-10.0,-10.0,-10.0)); - points.push_back(gtsam::Point3(10.0,-10.0,-10.0)); +/// Create a set of ground-truth landmarks +std::vector createPoints() { + std::vector points; + points.push_back(Point3(10.0, 10.0, 10.0)); + points.push_back(Point3(-10.0, 10.0, 10.0)); + points.push_back(Point3(-10.0, -10.0, 10.0)); + points.push_back(Point3(10.0, -10.0, 10.0)); + points.push_back(Point3(10.0, 10.0, -10.0)); + points.push_back(Point3(-10.0, 10.0, -10.0)); + points.push_back(Point3(-10.0, -10.0, -10.0)); + points.push_back(Point3(10.0, -10.0, -10.0)); return points; } -/* ************************************************************************* */ -std::vector createPoses( - const gtsam::Pose3& init = gtsam::Pose3(gtsam::Rot3::Ypr(M_PI/2,0,-M_PI/2), gtsam::Point3(30, 0, 0)), - const gtsam::Pose3& delta = gtsam::Pose3(gtsam::Rot3::Ypr(0,-M_PI/4,0), gtsam::Point3(sin(M_PI/4)*30, 0, 30*(1-sin(M_PI/4)))), - int steps = 8) { +/** + * Create a set of ground-truth poses + * Default values give a circular trajectory, radius 30 at pi/4 intervals, + * always facing the circle center + */ +std::vector createPoses( + const Pose3& init = Pose3(Rot3::Ypr(M_PI_2, 0, -M_PI_2), {30, 0, 0}), + const Pose3& delta = Pose3(Rot3::Ypr(0, -M_PI_4, 0), + {sin(M_PI_4) * 30, 0, 30 * (1 - sin(M_PI_4))}), + int steps = 8) { + std::vector poses; + poses.reserve(steps); - // Create the set of ground-truth poses - // Default values give a circular trajectory, radius 30 at pi/4 intervals, always facing the circle center - std::vector poses; - int i = 1; poses.push_back(init); - for(; i < steps; ++i) { - poses.push_back(poses[i-1].compose(delta)); + for (int i = 1; i < steps; ++i) { + poses.push_back(poses[i - 1].compose(delta)); } return poses; } + +/** + * Create regularly spaced poses with specified radius and number of cameras + */ +std::vector posesOnCircle(int num_cameras = 8, double radius = 30) { + const Pose3 init(Rot3::Ypr(M_PI_2, 0, -M_PI_2), {radius, 0, 0}); + const double theta = M_PI / num_cameras; + const Pose3 delta( + Rot3::Ypr(0, -2 * theta, 0), + {sin(2 * theta) * radius, 0, radius * (1 - sin(2 * theta))}); + return createPoses(init, delta, num_cameras); +} +} // namespace gtsam \ No newline at end of file From d44cca770d661375eabc11ae20e3d66a90a8e43c Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 11:44:21 -0700 Subject: [PATCH 185/204] TransferFactor in sfm --- .../geometry/tests/testFundamentalMatrix.cpp | 29 ------ gtsam/sfm/TransferFactor.h | 48 ++++++++++ gtsam/sfm/tests/testTransferFactor.cpp | 95 +++++++++++++++++++ 3 files changed, 143 insertions(+), 29 deletions(-) create mode 100644 gtsam/sfm/TransferFactor.h create mode 100644 gtsam/sfm/tests/testTransferFactor.cpp diff --git a/gtsam/geometry/tests/testFundamentalMatrix.cpp b/gtsam/geometry/tests/testFundamentalMatrix.cpp index 80e6b1d3a..a8884e791 100644 --- a/gtsam/geometry/tests/testFundamentalMatrix.cpp +++ b/gtsam/geometry/tests/testFundamentalMatrix.cpp @@ -197,35 +197,6 @@ TripleF generateTripleF( return {F[0], F[1], F[2]}; // Return a TripleF instance } -//************************************************************************* - -struct TripletFactor { - using F = FundamentalMatrix; - using SF = SimpleFundamentalMatrix; - Point2 p0, p1, p2; - - /// vector of errors returns 6D vector - Vector evaluateError(const SF& F01, const SF& F12, const SF& F20, // - Matrix* H01, Matrix* H12, Matrix* H20) const { - Vector error(6); - std::function fn = [&](const SF& F01, const SF& F12, - const SF& F20) { - Vector6 error; - error << F::transfer(F01.matrix(), p1, F20.matrix().transpose(), p2) - p0, - F::transfer(F01.matrix().transpose(), p0, F12.matrix(), p2) - p1, - F::transfer(F20.matrix(), p0, F12.matrix().transpose(), p1) - p2; - return error; - }; - if (H01) - *H01 = numericalDerivative31(fn, F01, F12, F20); - if (H12) - *H12 = numericalDerivative32(fn, F01, F12, F20); - if (H20) - *H20 = numericalDerivative33(fn, F01, F12, F20); - return fn(F01, F12, F20); - } -}; - //************************************************************************* TEST(TripleF, Transfer) { // Generate cameras on a circle, as well as fundamental matrices diff --git a/gtsam/sfm/TransferFactor.h b/gtsam/sfm/TransferFactor.h new file mode 100644 index 000000000..43142e762 --- /dev/null +++ b/gtsam/sfm/TransferFactor.h @@ -0,0 +1,48 @@ +/* ---------------------------------------------------------------------------- + * GTSAM Copyright 2010-2024, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + * See LICENSE for the license information + * -------------------------------------------------------------------------- */ + +/* + * @file TransferFactor.h + * @brief TransferFactor class + * @author Frank Dellaert + * @date October 24, 2024 + */ + +#pragma once + +#include +#include +#include + +namespace gtsam { + +template +struct TransferFactor { + Point2 p0, p1, p2; + + /// vector of errors returns 6D vector + Vector evaluateError(const F& F01, const F& F12, const F& F20, // + Matrix* H01, Matrix* H12, Matrix* H20) const { + Vector error(6); + std::function fn = [&](const F& F01, const F& F12, + const F& F20) { + Vector6 error; + error << // + F::transfer(F01.matrix(), p1, F20.matrix().transpose(), p2) - p0, + F::transfer(F01.matrix().transpose(), p0, F12.matrix(), p2) - p1, + F::transfer(F20.matrix(), p0, F12.matrix().transpose(), p1) - p2; + return error; + }; + if (H01) *H01 = numericalDerivative31(fn, F01, F12, F20); + if (H12) *H12 = numericalDerivative32(fn, F01, F12, F20); + if (H20) *H20 = numericalDerivative33(fn, F01, F12, F20); + return fn(F01, F12, F20); + } +}; + +} // namespace gtsam diff --git a/gtsam/sfm/tests/testTransferFactor.cpp b/gtsam/sfm/tests/testTransferFactor.cpp new file mode 100644 index 000000000..3c3e89693 --- /dev/null +++ b/gtsam/sfm/tests/testTransferFactor.cpp @@ -0,0 +1,95 @@ +/* + * @file testTransferFactor.cpp + * @brief Test TransferFactor class + * @author Your Name + * @date October 23, 2024 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace gtsam; + +//************************************************************************* +/// Generate three cameras on a circle, looking in +std::array generateCameraPoses() { + std::array cameraPoses; + const double radius = 1.0; + for (int i = 0; i < 3; ++i) { + double angle = i * 2.0 * M_PI / 3.0; + double c = cos(angle), s = sin(angle); + Rot3 aRb({-s, c, 0}, {0, 0, -1}, {-c, -s, 0}); + cameraPoses[i] = {aRb, Point3(radius * c, radius * s, 0)}; + } + return cameraPoses; +} + +// Function to generate a TripleF from camera poses +TripleF generateTripleF( + const std::array& cameraPoses) { + std::array F; + for (size_t i = 0; i < 3; ++i) { + size_t j = (i + 1) % 3; + const Pose3 iPj = cameraPoses[i].between(cameraPoses[j]); + EssentialMatrix E(iPj.rotation(), Unit3(iPj.translation())); + F[i] = {E, 1000.0, 1000.0, Point2(640 / 2, 480 / 2), + Point2(640 / 2, 480 / 2)}; + } + return {F[0], F[1], F[2]}; // Return a TripleF instance +} + +double focalLength = 1000; +Point2 principalPoint(640 / 2, 480 / 2); + +// Test for TransferFactor +TEST(TransferFactor, Jacobians) { + // Generate cameras on a circle + std::array cameraPoses = generateCameraPoses(); + auto triplet = generateTripleF(cameraPoses); + + // Now project a point into the three cameras + const Point3 P(0.1, 0.2, 0.3); + const Cal3_S2 K(focalLength, focalLength, 0.0, // + principalPoint.x(), principalPoint.y()); + + std::array p; + for (size_t i = 0; i < 3; ++i) { + // Project the point into each camera + PinholeCameraCal3_S2 camera(cameraPoses[i], K); + p[i] = camera.project(P); + } + + // Create a TransferFactor + TransferFactor factor{p[0], p[1], p[2]}; + Matrix H01, H12, H20; + Vector error = factor.evaluateError(triplet.F01, triplet.F12, triplet.F20, + &H01, &H12, &H20); + std::cout << "Error: " << error << std::endl; + std::cout << H01 << std::endl << std::endl; + std::cout << H12 << std::endl << std::endl; + std::cout << H20 << std::endl; + + // Check Jacobians + Values values; + values.insert(0, triplet.F01); + values.insert(1, triplet.F12); + values.insert(2, triplet.F20); + // EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-5, 1e-7); +} + +//************************************************************************* +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +//************************************************************************* From 34182bddda092b7db84899e7cf8cc7e497bfa64e Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 12:21:37 -0700 Subject: [PATCH 186/204] TripletError vs TransferFactor --- gtsam/sfm/TransferFactor.h | 22 ++++++++++++++++++++-- gtsam/sfm/tests/testTransferFactor.cpp | 17 ++++++++++++----- 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/gtsam/sfm/TransferFactor.h b/gtsam/sfm/TransferFactor.h index 43142e762..cec7a445e 100644 --- a/gtsam/sfm/TransferFactor.h +++ b/gtsam/sfm/TransferFactor.h @@ -22,13 +22,12 @@ namespace gtsam { template -struct TransferFactor { +struct TripletError { Point2 p0, p1, p2; /// vector of errors returns 6D vector Vector evaluateError(const F& F01, const F& F12, const F& F20, // Matrix* H01, Matrix* H12, Matrix* H20) const { - Vector error(6); std::function fn = [&](const F& F01, const F& F12, const F& F20) { Vector6 error; @@ -45,4 +44,23 @@ struct TransferFactor { } }; +template +struct TransferFactor { + Point2 p0, p1, p2; + + /// vector of errors returns 2D vector + Vector evaluateError(const F& F12, const F& F20, // + Matrix* H12, Matrix* H20) const { + std::function fn = [&](const F& F12, const F& F20) { + Vector2 error; + error << // + F::transfer(F20.matrix(), p0, F12.matrix().transpose(), p1) - p2; + return error; + }; + if (H12) *H12 = numericalDerivative21(fn, F12, F20); + if (H20) *H20 = numericalDerivative22(fn, F12, F20); + return fn(F12, F20); + } +}; + } // namespace gtsam diff --git a/gtsam/sfm/tests/testTransferFactor.cpp b/gtsam/sfm/tests/testTransferFactor.cpp index 3c3e89693..44b03ad5d 100644 --- a/gtsam/sfm/tests/testTransferFactor.cpp +++ b/gtsam/sfm/tests/testTransferFactor.cpp @@ -70,18 +70,25 @@ TEST(TransferFactor, Jacobians) { } // Create a TransferFactor - TransferFactor factor{p[0], p[1], p[2]}; + TripletError error{p[0], p[1], p[2]}; Matrix H01, H12, H20; - Vector error = factor.evaluateError(triplet.F01, triplet.F12, triplet.F20, - &H01, &H12, &H20); - std::cout << "Error: " << error << std::endl; + Vector e = error.evaluateError(triplet.F01, triplet.F12, triplet.F20, &H01, + &H12, &H20); + std::cout << "Error: " << e << std::endl; std::cout << H01 << std::endl << std::endl; std::cout << H12 << std::endl << std::endl; std::cout << H20 << std::endl; + // Create a TransferFactor + TransferFactor factor{p[0], p[1], p[2]}; + Matrix H0, H1; + Vector e2 = factor.evaluateError(triplet.F12, triplet.F20, &H0, &H1); + std::cout << "Error: " << e2 << std::endl; + std::cout << H0 << std::endl << std::endl; + std::cout << H1 << std::endl << std::endl; + // Check Jacobians Values values; - values.insert(0, triplet.F01); values.insert(1, triplet.F12); values.insert(2, triplet.F20); // EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-5, 1e-7); From 5f9eeb44157e231fecebb99bd0602dc6abc3792e Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 13:09:41 -0700 Subject: [PATCH 187/204] Clean up/format --- gtsam/inference/LabeledSymbol.h | 130 ++++++++++++++++++++------------ 1 file changed, 80 insertions(+), 50 deletions(-) diff --git a/gtsam/inference/LabeledSymbol.h b/gtsam/inference/LabeledSymbol.h index 5a7a98c1d..eb663ef9b 100644 --- a/gtsam/inference/LabeledSymbol.h +++ b/gtsam/inference/LabeledSymbol.h @@ -19,9 +19,11 @@ #pragma once -#include +#include #include +#include + namespace gtsam { /** @@ -33,113 +35,141 @@ namespace gtsam { * which allows expressing "Pose 7 from robot B" as "xB7". */ class GTSAM_EXPORT LabeledSymbol { -protected: + protected: unsigned char c_, label_; std::uint64_t j_; -public: - /** Default constructor */ + public: + /// @name Constructors + /// @{ + + /// Default constructor LabeledSymbol(); - /** Copy constructor */ + /// Copy constructor LabeledSymbol(const LabeledSymbol& key); - /** Constructor */ + /// Constructor fro characters c and label, and integer j LabeledSymbol(unsigned char c, unsigned char label, std::uint64_t j); - /** Constructor that decodes an integer gtsam::Key */ - LabeledSymbol(gtsam::Key key); + /// Constructor that decodes an integer Key + LabeledSymbol(Key key); - /** Cast to integer */ - operator gtsam::Key() const; + /// @} + /// @name Testable + /// @{ - // Testable Requirements + /// Prints the LabeledSymbol with an optional prefix string. void print(const std::string& s = "") const; + /// Checks if this LabeledSymbol is equal to another, tolerance is ignored. bool equals(const LabeledSymbol& expected, double tol = 0.0) const { return (*this) == expected; } - /** return the integer version */ - gtsam::Key key() const { return (gtsam::Key) *this; } + /// @} + /// @name API + /// @{ - /** Retrieve label character */ + /// Cast to Key + operator Key() const; + + /// return the integer version + Key key() const { return (Key) * this; } + + /// Retrieve label character inline unsigned char label() const { return label_; } - /** Retrieve key character */ + /// Retrieve key character inline unsigned char chr() const { return c_; } - /** Retrieve key index */ + /// Retrieve key index inline size_t index() const { return j_; } - /** Create a string from the key */ + /// Create a string from the key operator std::string() const; - /** Comparison for use in maps */ + /// Output stream operator that can be used with key_formatter (see Key.h). + friend GTSAM_EXPORT std::ostream& operator<<(std::ostream&, + const LabeledSymbol&); + + /// @} + /// @name Comparison + /// @{ + bool operator<(const LabeledSymbol& comp) const; bool operator==(const LabeledSymbol& comp) const; - bool operator==(gtsam::Key comp) const; + bool operator==(Key comp) const; bool operator!=(const LabeledSymbol& comp) const; - bool operator!=(gtsam::Key comp) const; + bool operator!=(Key comp) const; - /** Return a filter function that returns true when evaluated on a gtsam::Key whose - * character (when converted to a LabeledSymbol) matches \c c. Use this with the - * Values::filter() function to retrieve all key-value pairs with the + /** Return a filter function that returns true when evaluated on a Key whose + * character (when converted to a LabeledSymbol) matches \c c. Use this with + * the Values::filter() function to retrieve all key-value pairs with the * requested character. */ - // Checks only the type - static std::function TypeTest(unsigned char c); + /// @} + /// @name Advanced API + /// @{ - // Checks only the robot ID (label_) - static std::function LabelTest(unsigned char label); + /// Checks only the type + static std::function TypeTest(unsigned char c); - // Checks both type and the robot ID - static std::function TypeLabelTest(unsigned char c, unsigned char label); + /// Checks only the robot ID (label_) + static std::function LabelTest(unsigned char label); - // Converts to upper/lower versions of labels + /// Checks both type and the robot ID + static std::function TypeLabelTest(unsigned char c, + unsigned char label); + + /// Converts to upper/lower versions of labels LabeledSymbol upper() const { return LabeledSymbol(c_, toupper(label_), j_); } LabeledSymbol lower() const { return LabeledSymbol(c_, tolower(label_), j_); } - // Create a new symbol with a different character. - LabeledSymbol newChr(unsigned char c) const { return LabeledSymbol(c, label_, j_); } + /// Create a new symbol with a different character. + LabeledSymbol newChr(unsigned char c) const { + return LabeledSymbol(c, label_, j_); + } - // Create a new symbol with a different label. - LabeledSymbol newLabel(unsigned char label) const { return LabeledSymbol(c_, label, j_); } + /// Create a new symbol with a different label. + LabeledSymbol newLabel(unsigned char label) const { + return LabeledSymbol(c_, label, j_); + } - /// Output stream operator that can be used with key_formatter (see Key.h). - friend GTSAM_EXPORT std::ostream &operator<<(std::ostream &, const LabeledSymbol &); - -private: + /// @} + private: #ifdef GTSAM_ENABLE_BOOST_SERIALIZATION - /** Serialization function */ + /// Serialization function friend class boost::serialization::access; - template - void serialize(ARCHIVE & ar, const unsigned int /*version*/) { - ar & BOOST_SERIALIZATION_NVP(c_); - ar & BOOST_SERIALIZATION_NVP(label_); - ar & BOOST_SERIALIZATION_NVP(j_); + template + void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + ar& BOOST_SERIALIZATION_NVP(c_); + ar& BOOST_SERIALIZATION_NVP(label_); + ar& BOOST_SERIALIZATION_NVP(j_); } #endif -}; // \class LabeledSymbol +}; // \class LabeledSymbol /** Create a symbol key from a character, label and index, i.e. xA5. */ inline Key mrsymbol(unsigned char c, unsigned char label, size_t j) { - return (Key)LabeledSymbol(c,label,j); + return (Key)LabeledSymbol(c, label, j); } /** Return the character portion of a symbol key. */ inline unsigned char mrsymbolChr(Key key) { return LabeledSymbol(key).chr(); } /** Return the label portion of a symbol key. */ -inline unsigned char mrsymbolLabel(Key key) { return LabeledSymbol(key).label(); } +inline unsigned char mrsymbolLabel(Key key) { + return LabeledSymbol(key).label(); +} /** Return the index portion of a symbol key. */ inline size_t mrsymbolIndex(Key key) { return LabeledSymbol(key).index(); } /// traits -template<> struct traits : public Testable {}; - -} // \namespace gtsam +template <> +struct traits : public Testable {}; +} // namespace gtsam From bec4afa3f8543525b0532dbe8c7c285a158516cb Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 13:09:58 -0700 Subject: [PATCH 188/204] EdgeKey for view graphs etc... --- gtsam/inference/EdgeKey.cpp | 33 ++++++++++++ gtsam/inference/EdgeKey.h | 74 +++++++++++++++++++++++++++ gtsam/inference/tests/testEdgeKey.cpp | 57 +++++++++++++++++++++ 3 files changed, 164 insertions(+) create mode 100644 gtsam/inference/EdgeKey.cpp create mode 100644 gtsam/inference/EdgeKey.h create mode 100644 gtsam/inference/tests/testEdgeKey.cpp diff --git a/gtsam/inference/EdgeKey.cpp b/gtsam/inference/EdgeKey.cpp new file mode 100644 index 000000000..d44f7a2a2 --- /dev/null +++ b/gtsam/inference/EdgeKey.cpp @@ -0,0 +1,33 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file EdgeKey.cpp + * @date Oct 24, 2024 + * @author: Frank Dellaert + * @author: Akshay Krishnan + */ + +#include + +namespace gtsam { + +// Output stream operator implementation +GTSAM_EXPORT std::ostream& operator<<(std::ostream& os, const EdgeKey& key) { + os << "{" << key.i() << ", " << key.j() << "}"; + return os; +} + +void EdgeKey::print(const std::string& s) const { + std::cout << s << *this << std::endl; +} + +} // namespace gtsam diff --git a/gtsam/inference/EdgeKey.h b/gtsam/inference/EdgeKey.h new file mode 100644 index 000000000..13de55f9e --- /dev/null +++ b/gtsam/inference/EdgeKey.h @@ -0,0 +1,74 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +#pragma once + +/** + * @file EdgeKey.h + * @date Oct 24, 2024 + * @author: Frank Dellaert + * @author: Akshay Krishnan + */ + +#include +#include + +namespace gtsam { +class GTSAM_EXPORT EdgeKey { + protected: + std::uint32_t i_; ///< Upper 32 bits + std::uint32_t j_; ///< Lower 32 bits + + public: + /// @name Constructors + /// @{ + + /// Default constructor + EdgeKey() : i_(0), j_(0) {} + + /// Constructor + EdgeKey(std::uint32_t i, std::uint32_t j) : i_(i), j_(j) {} + + /// @} + /// @name API + /// @{ + + /// Cast to Key + operator Key() const { return ((std::uint64_t)i_ << 32) | j_; } + + /// Retrieve high 32 bits + inline std::uint32_t i() const { return i_; } + + /// Retrieve low 32 bits + inline std::uint32_t j() const { return j_; } + + /// Output stream operator + friend GTSAM_EXPORT std::ostream& operator<<(std::ostream&, const EdgeKey&); + + /// @} + /// @name Testable + /// @{ + + /// Prints the EdgeKey with an optional prefix string. + void print(const std::string& s = "") const; + + /// Checks if this EdgeKey is equal to another, tolerance is ignored. + bool equals(const EdgeKey& expected, double tol = 0.0) const { + return (*this) == expected; + } + /// @} +}; + +/// traits +template <> +struct traits : public Testable {}; + +} // namespace gtsam diff --git a/gtsam/inference/tests/testEdgeKey.cpp b/gtsam/inference/tests/testEdgeKey.cpp new file mode 100644 index 000000000..88fafbdd1 --- /dev/null +++ b/gtsam/inference/tests/testEdgeKey.cpp @@ -0,0 +1,57 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/* + * @file testEdgeKey.cpp + * @date Oct 24, 2024 + * @author: Frank Dellaert + * @author: Akshay Krishnan + */ + +#include +#include + +#include + +using namespace std; +using namespace gtsam; + +/* ************************************************************************* */ +TEST(EdgeKey, Construction) { + EdgeKey edge(1, 2); + EXPECT(edge.i() == 1); + EXPECT(edge.j() == 2); +} + +/* ************************************************************************* */ +TEST(EdgeKey, Equality) { + EdgeKey edge1(1, 2); + EdgeKey edge2(1, 2); + EdgeKey edge3(2, 3); + + EXPECT(assert_equal(edge1, edge2)); + EXPECT(!edge1.equals(edge3)); +} + +/* ************************************************************************* */ +TEST(EdgeKey, StreamOutput) { + EdgeKey edge(1, 2); + std::ostringstream oss; + oss << edge; + EXPECT("{1, 2}" == oss.str()); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ From 68c9da8c4e7b3c333dfe394e5b58a38e4bd1d9b5 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 13:21:29 -0700 Subject: [PATCH 189/204] More cleanup --- gtsam/inference/LabeledSymbol.h | 26 ++++++++++++++------------ gtsam/sfm/TranslationFactor.h | 4 ++-- gtsam/sfm/TranslationRecovery.h | 4 ++-- 3 files changed, 18 insertions(+), 16 deletions(-) diff --git a/gtsam/inference/LabeledSymbol.h b/gtsam/inference/LabeledSymbol.h index eb663ef9b..927fb7669 100644 --- a/gtsam/inference/LabeledSymbol.h +++ b/gtsam/inference/LabeledSymbol.h @@ -68,7 +68,7 @@ class GTSAM_EXPORT LabeledSymbol { } /// @} - /// @name API + /// @name Standard API /// @{ /// Cast to Key @@ -103,15 +103,13 @@ class GTSAM_EXPORT LabeledSymbol { bool operator!=(const LabeledSymbol& comp) const; bool operator!=(Key comp) const; - /** Return a filter function that returns true when evaluated on a Key whose - * character (when converted to a LabeledSymbol) matches \c c. Use this with - * the Values::filter() function to retrieve all key-value pairs with the - * requested character. - */ - /// @} - /// @name Advanced API + /// @name Filtering /// @{ + /// Return a filter function that returns true when evaluated on a Key whose + /// character (when converted to a LabeledSymbol) matches \c c. Use this with + /// the Values::filter() function to retrieve all key-value pairs with the + /// requested character. /// Checks only the type static std::function TypeTest(unsigned char c); @@ -123,6 +121,10 @@ class GTSAM_EXPORT LabeledSymbol { static std::function TypeLabelTest(unsigned char c, unsigned char label); + /// @} + /// @name Advanced API + /// @{ + /// Converts to upper/lower versions of labels LabeledSymbol upper() const { return LabeledSymbol(c_, toupper(label_), j_); } LabeledSymbol lower() const { return LabeledSymbol(c_, tolower(label_), j_); } @@ -152,20 +154,20 @@ class GTSAM_EXPORT LabeledSymbol { #endif }; // \class LabeledSymbol -/** Create a symbol key from a character, label and index, i.e. xA5. */ +/// Create a symbol key from a character, label and index, i.e. xA5. inline Key mrsymbol(unsigned char c, unsigned char label, size_t j) { return (Key)LabeledSymbol(c, label, j); } -/** Return the character portion of a symbol key. */ +/// Return the character portion of a symbol key. inline unsigned char mrsymbolChr(Key key) { return LabeledSymbol(key).chr(); } -/** Return the label portion of a symbol key. */ +/// Return the label portion of a symbol key. inline unsigned char mrsymbolLabel(Key key) { return LabeledSymbol(key).label(); } -/** Return the index portion of a symbol key. */ +/// Return the index portion of a symbol key. inline size_t mrsymbolIndex(Key key) { return LabeledSymbol(key).index(); } /// traits diff --git a/gtsam/sfm/TranslationFactor.h b/gtsam/sfm/TranslationFactor.h index 33bc82f5a..cfb9cff20 100644 --- a/gtsam/sfm/TranslationFactor.h +++ b/gtsam/sfm/TranslationFactor.h @@ -55,7 +55,7 @@ class TranslationFactor : public NoiseModelFactorN { : Base(noiseModel, a, b), measured_w_aZb_(w_aZb.point3()) {} /** - * @brief Caclulate error: (norm(Tb - Ta) - measurement) + * @brief Calculate error: (norm(Tb - Ta) - measurement) * where Tb and Ta are Point3 translations and measurement is * the Unit3 translation direction from a to b. * @@ -120,7 +120,7 @@ class BilinearAngleTranslationFactor using NoiseModelFactor2::evaluateError; /** - * @brief Caclulate error: (scale * (Tb - Ta) - measurement) + * @brief Calculate error: (scale * (Tb - Ta) - measurement) * where Tb and Ta are Point3 translations and measurement is * the Unit3 translation direction from a to b. * diff --git a/gtsam/sfm/TranslationRecovery.h b/gtsam/sfm/TranslationRecovery.h index a91ef01f9..f821da975 100644 --- a/gtsam/sfm/TranslationRecovery.h +++ b/gtsam/sfm/TranslationRecovery.h @@ -119,7 +119,7 @@ class GTSAM_EXPORT TranslationRecovery { * @param betweenTranslations relative translations (with scale) between 2 * points in world coordinate frame known a priori. * @param rng random number generator - * @param intialValues (optional) initial values from a prior + * @param initialValues (optional) initial values from a prior * @return Values */ Values initializeRandomly( @@ -156,7 +156,7 @@ class GTSAM_EXPORT TranslationRecovery { * points in world coordinate frame known a priori. Unlike * relativeTranslations, zero-magnitude betweenTranslations are not treated as * hard constraints. - * @param initialValues intial values for optimization. Initializes randomly + * @param initialValues initial values for optimization. Initializes randomly * if not provided. * @return Values */ From e6bfcada40375bd8a742e111bcdfa58d994b0e3b Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 13:23:00 -0700 Subject: [PATCH 190/204] True factor --- gtsam/sfm/TransferFactor.h | 11 +++++++++-- gtsam/sfm/tests/testTransferFactor.cpp | 8 ++++---- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/gtsam/sfm/TransferFactor.h b/gtsam/sfm/TransferFactor.h index cec7a445e..fd65db8b9 100644 --- a/gtsam/sfm/TransferFactor.h +++ b/gtsam/sfm/TransferFactor.h @@ -45,12 +45,19 @@ struct TripletError { }; template -struct TransferFactor { +class TransferFactor : public NoiseModelFactorN { Point2 p0, p1, p2; + public: + // Constructor + TransferFactor(Key key1, Key key2, const Point2& p0, const Point2& p1, + const Point2& p2, const SharedNoiseModel& model = nullptr) + : NoiseModelFactorN(model, key1, key2), p0(p0), p1(p1), p2(p2) {} + /// vector of errors returns 2D vector Vector evaluateError(const F& F12, const F& F20, // - Matrix* H12, Matrix* H20) const { + OptionalMatrixType H12 = nullptr, + OptionalMatrixType H20 = nullptr) const override { std::function fn = [&](const F& F12, const F& F20) { Vector2 error; error << // diff --git a/gtsam/sfm/tests/testTransferFactor.cpp b/gtsam/sfm/tests/testTransferFactor.cpp index 44b03ad5d..afceb8fdb 100644 --- a/gtsam/sfm/tests/testTransferFactor.cpp +++ b/gtsam/sfm/tests/testTransferFactor.cpp @@ -80,7 +80,7 @@ TEST(TransferFactor, Jacobians) { std::cout << H20 << std::endl; // Create a TransferFactor - TransferFactor factor{p[0], p[1], p[2]}; + TransferFactor factor{0, 1, p[0], p[1], p[2]}; Matrix H0, H1; Vector e2 = factor.evaluateError(triplet.F12, triplet.F20, &H0, &H1); std::cout << "Error: " << e2 << std::endl; @@ -89,9 +89,9 @@ TEST(TransferFactor, Jacobians) { // Check Jacobians Values values; - values.insert(1, triplet.F12); - values.insert(2, triplet.F20); - // EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-5, 1e-7); + values.insert(0, triplet.F12); + values.insert(1, triplet.F20); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-5, 1e-7); } //************************************************************************* From ade1207334a7d2acc42f6879501b1a9e63c3d3ed Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 14:50:31 -0700 Subject: [PATCH 191/204] Use EdgeKey logic --- gtsam/sfm/TransferFactor.h | 105 ++++++++++++++++--------- gtsam/sfm/tests/testTransferFactor.cpp | 55 ++++++------- 2 files changed, 91 insertions(+), 69 deletions(-) diff --git a/gtsam/sfm/TransferFactor.h b/gtsam/sfm/TransferFactor.h index fd65db8b9..0f9f95c13 100644 --- a/gtsam/sfm/TransferFactor.h +++ b/gtsam/sfm/TransferFactor.h @@ -17,56 +17,83 @@ #include #include +#include #include namespace gtsam { -template -struct TripletError { - Point2 p0, p1, p2; - - /// vector of errors returns 6D vector - Vector evaluateError(const F& F01, const F& F12, const F& F20, // - Matrix* H01, Matrix* H12, Matrix* H20) const { - std::function fn = [&](const F& F01, const F& F12, - const F& F20) { - Vector6 error; - error << // - F::transfer(F01.matrix(), p1, F20.matrix().transpose(), p2) - p0, - F::transfer(F01.matrix().transpose(), p0, F12.matrix(), p2) - p1, - F::transfer(F20.matrix(), p0, F12.matrix().transpose(), p1) - p2; - return error; - }; - if (H01) *H01 = numericalDerivative31(fn, F01, F12, F20); - if (H12) *H12 = numericalDerivative32(fn, F01, F12, F20); - if (H20) *H20 = numericalDerivative33(fn, F01, F12, F20); - return fn(F01, F12, F20); - } -}; - +/** + * Binary factor in the context of Structure from Motion (SfM). + * It is used to transfer points between different views based on the + * fundamental matrices between these views. The factor computes the error + * between the transferred points `pi` and `pj`, and the actual point `pk` in + * the target view. Jacobians are done using numerical differentiation. + */ template class TransferFactor : public NoiseModelFactorN { - Point2 p0, p1, p2; + EdgeKey key1_, key2_; ///< the two EdgeKeys + Point2 pi, pj, pk; ///< The points in the three views public: - // Constructor - TransferFactor(Key key1, Key key2, const Point2& p0, const Point2& p1, - const Point2& p2, const SharedNoiseModel& model = nullptr) - : NoiseModelFactorN(model, key1, key2), p0(p0), p1(p1), p2(p2) {} + /** + * @brief Constructor for the TransferFactor class. + * + * Uses EdgeKeys to determine how to use the two fundamental matrix unknowns + * F1 and F2, to transfer points pi and pj to the third view, and minimize the + * difference with pk. + * + * The edge keys must represent valid edges for the transfer operation, + * specifically one of the following configurations: + * - (i, k) and (j, k) + * - (i, k) and (k, j) + * - (k, i) and (j, k) + * - (k, i) and (k, j) + * + * @param key1 First EdgeKey specifying F1: (i, k) or (k, i). + * @param key2 Second EdgeKey specifying F2: (j, k) or (k, j). + * @param pi The point in the first view (i). + * @param pj The point in the second view (j). + * @param pk The point in the third (and transfer target) view (k). + * @param model An optional SharedNoiseModel that defines the noise model + * for this factor. Defaults to nullptr. + */ + TransferFactor(EdgeKey key1, EdgeKey key2, const Point2& pi, const Point2& pj, + const Point2& pk, const SharedNoiseModel& model = nullptr) + : NoiseModelFactorN(model, key1, key2), + key1_(key1), + key2_(key2), + pi(pi), + pj(pj), + pk(pk) {} + + // Create Matrix3 objects based on EdgeKey configurations + std::pair getMatrices(const F& F1, const F& F2) const { + // Fill Fki and Fkj based on EdgeKey configurations + if (key1_.i() == key2_.i()) { + return {F1.matrix(), F2.matrix()}; + } else if (key1_.i() == key2_.j()) { + return {F1.matrix(), F2.matrix().transpose()}; + } else if (key1_.j() == key2_.i()) { + return {F1.matrix().transpose(), F2.matrix()}; + } else if (key1_.j() == key2_.j()) { + return {F1.matrix().transpose(), F2.matrix().transpose()}; + } else { + throw std::runtime_error( + "TransferFactor: invalid EdgeKey configuration."); + } + } /// vector of errors returns 2D vector - Vector evaluateError(const F& F12, const F& F20, // - OptionalMatrixType H12 = nullptr, - OptionalMatrixType H20 = nullptr) const override { - std::function fn = [&](const F& F12, const F& F20) { - Vector2 error; - error << // - F::transfer(F20.matrix(), p0, F12.matrix().transpose(), p1) - p2; - return error; + Vector evaluateError(const F& F1, const F& F2, + OptionalMatrixType H1 = nullptr, + OptionalMatrixType H2 = nullptr) const override { + std::function transfer = [&](const F& F1, const F& F2) { + auto [Fki, Fkj] = getMatrices(F1, F2); + return F::transfer(Fki, pi, Fkj, pj); }; - if (H12) *H12 = numericalDerivative21(fn, F12, F20); - if (H20) *H20 = numericalDerivative22(fn, F12, F20); - return fn(F12, F20); + if (H1) *H1 = numericalDerivative21(transfer, F1, F2); + if (H2) *H2 = numericalDerivative22(transfer, F1, F2); + return transfer(F1, F2) - pk; } }; diff --git a/gtsam/sfm/tests/testTransferFactor.cpp b/gtsam/sfm/tests/testTransferFactor.cpp index afceb8fdb..7bcd71b1f 100644 --- a/gtsam/sfm/tests/testTransferFactor.cpp +++ b/gtsam/sfm/tests/testTransferFactor.cpp @@ -6,20 +6,15 @@ */ #include -#include -#include -#include -#include -#include -#include #include -#include -#include #include #include using namespace gtsam; +double focalLength = 1000; +Point2 principalPoint(640 / 2, 480 / 2); + //************************************************************************* /// Generate three cameras on a circle, looking in std::array generateCameraPoses() { @@ -34,6 +29,7 @@ std::array generateCameraPoses() { return cameraPoses; } +//************************************************************************* // Function to generate a TripleF from camera poses TripleF generateTripleF( const std::array& cameraPoses) { @@ -48,9 +44,7 @@ TripleF generateTripleF( return {F[0], F[1], F[2]}; // Return a TripleF instance } -double focalLength = 1000; -Point2 principalPoint(640 / 2, 480 / 2); - +//************************************************************************* // Test for TransferFactor TEST(TransferFactor, Jacobians) { // Generate cameras on a circle @@ -70,28 +64,29 @@ TEST(TransferFactor, Jacobians) { } // Create a TransferFactor - TripletError error{p[0], p[1], p[2]}; - Matrix H01, H12, H20; - Vector e = error.evaluateError(triplet.F01, triplet.F12, triplet.F20, &H01, - &H12, &H20); - std::cout << "Error: " << e << std::endl; - std::cout << H01 << std::endl << std::endl; - std::cout << H12 << std::endl << std::endl; - std::cout << H20 << std::endl; + EdgeKey key01(0, 1), key12(1, 2), key20(2, 0); + TransferFactor // + factor0{key01, key20, p[1], p[2], p[0]}, + factor1{key12, key01, p[2], p[0], p[1]}, + factor2{key20, key12, p[0], p[1], p[2]}; - // Create a TransferFactor - TransferFactor factor{0, 1, p[0], p[1], p[2]}; - Matrix H0, H1; - Vector e2 = factor.evaluateError(triplet.F12, triplet.F20, &H0, &H1); - std::cout << "Error: " << e2 << std::endl; - std::cout << H0 << std::endl << std::endl; - std::cout << H1 << std::endl << std::endl; + // Check that getMatrices is correct + auto [Fki, Fkj] = factor2.getMatrices(triplet.Fca, triplet.Fbc); + EXPECT(assert_equal(triplet.Fca.matrix(), Fki)); + EXPECT(assert_equal(triplet.Fbc.matrix().transpose(), Fkj)); - // Check Jacobians + // Create Values with edge keys Values values; - values.insert(0, triplet.F12); - values.insert(1, triplet.F20); - EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-5, 1e-7); + values.insert(key01, triplet.Fab); + values.insert(key12, triplet.Fbc); + values.insert(key20, triplet.Fca); + + // Check error and Jacobians + for (auto&& f : {factor0, factor1, factor2}) { + Vector error = f.unwhitenedError(values); + EXPECT(assert_equal(Z_2x1, error)); + EXPECT_CORRECT_FACTOR_JACOBIANS(f, values, 1e-5, 1e-7); + } } //************************************************************************* From bf47ef3432e41e46b96b17a3e2a7ca73b31247b8 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 15:08:29 -0700 Subject: [PATCH 192/204] Got rid of inheritance --- gtsam/sfm/TransferFactor.h | 17 ++++++++++------- gtsam/sfm/tests/testTransferFactor.cpp | 2 +- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/gtsam/sfm/TransferFactor.h b/gtsam/sfm/TransferFactor.h index 0f9f95c13..5c46f9b9b 100644 --- a/gtsam/sfm/TransferFactor.h +++ b/gtsam/sfm/TransferFactor.h @@ -67,15 +67,18 @@ class TransferFactor : public NoiseModelFactorN { pk(pk) {} // Create Matrix3 objects based on EdgeKey configurations - std::pair getMatrices(const F& F1, const F& F2) const { + static std::pair getMatrices(const EdgeKey& key1, + const F& F1, + const EdgeKey& key2, + const F& F2) { // Fill Fki and Fkj based on EdgeKey configurations - if (key1_.i() == key2_.i()) { + if (key1.i() == key2.i()) { return {F1.matrix(), F2.matrix()}; - } else if (key1_.i() == key2_.j()) { + } else if (key1.i() == key2.j()) { return {F1.matrix(), F2.matrix().transpose()}; - } else if (key1_.j() == key2_.i()) { + } else if (key1.j() == key2.i()) { return {F1.matrix().transpose(), F2.matrix()}; - } else if (key1_.j() == key2_.j()) { + } else if (key1.j() == key2.j()) { return {F1.matrix().transpose(), F2.matrix().transpose()}; } else { throw std::runtime_error( @@ -88,8 +91,8 @@ class TransferFactor : public NoiseModelFactorN { OptionalMatrixType H1 = nullptr, OptionalMatrixType H2 = nullptr) const override { std::function transfer = [&](const F& F1, const F& F2) { - auto [Fki, Fkj] = getMatrices(F1, F2); - return F::transfer(Fki, pi, Fkj, pj); + auto [Fki, Fkj] = getMatrices(key1_, F1, key2_, F2); + return Transfer(Fki, pi, Fkj, pj); }; if (H1) *H1 = numericalDerivative21(transfer, F1, F2); if (H2) *H2 = numericalDerivative22(transfer, F1, F2); diff --git a/gtsam/sfm/tests/testTransferFactor.cpp b/gtsam/sfm/tests/testTransferFactor.cpp index 7bcd71b1f..d26bad960 100644 --- a/gtsam/sfm/tests/testTransferFactor.cpp +++ b/gtsam/sfm/tests/testTransferFactor.cpp @@ -71,7 +71,7 @@ TEST(TransferFactor, Jacobians) { factor2{key20, key12, p[0], p[1], p[2]}; // Check that getMatrices is correct - auto [Fki, Fkj] = factor2.getMatrices(triplet.Fca, triplet.Fbc); + auto [Fki, Fkj] = factor2.getMatrices(key20, triplet.Fca, key12, triplet.Fbc); EXPECT(assert_equal(triplet.Fca.matrix(), Fki)); EXPECT(assert_equal(triplet.Fbc.matrix().transpose(), Fkj)); From 6f2b49d80d4713c22e86e9888c5a59b02a0a0371 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 16:40:35 -0700 Subject: [PATCH 193/204] Additional API --- gtsam/inference/EdgeKey.cpp | 7 +++++-- gtsam/inference/EdgeKey.h | 7 +++++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/gtsam/inference/EdgeKey.cpp b/gtsam/inference/EdgeKey.cpp index d44f7a2a2..f8caf8ce1 100644 --- a/gtsam/inference/EdgeKey.cpp +++ b/gtsam/inference/EdgeKey.cpp @@ -20,9 +20,12 @@ namespace gtsam { -// Output stream operator implementation +EdgeKey::operator std::string() const { + return "{" + std::to_string(i_) + ", " + std::to_string(j_) + "}"; +} + GTSAM_EXPORT std::ostream& operator<<(std::ostream& os, const EdgeKey& key) { - os << "{" << key.i() << ", " << key.j() << "}"; + os << (std::string)key; return os; } diff --git a/gtsam/inference/EdgeKey.h b/gtsam/inference/EdgeKey.h index 13de55f9e..bf1bf6030 100644 --- a/gtsam/inference/EdgeKey.h +++ b/gtsam/inference/EdgeKey.h @@ -37,6 +37,10 @@ class GTSAM_EXPORT EdgeKey { /// Constructor EdgeKey(std::uint32_t i, std::uint32_t j) : i_(i), j_(j) {} + EdgeKey(Key key) + : i_(static_cast(key >> 32)), + j_(static_cast(key)) {} + /// @} /// @name API /// @{ @@ -50,6 +54,9 @@ class GTSAM_EXPORT EdgeKey { /// Retrieve low 32 bits inline std::uint32_t j() const { return j_; } + /** Create a string from the key */ + operator std::string() const; + /// Output stream operator friend GTSAM_EXPORT std::ostream& operator<<(std::ostream&, const EdgeKey&); From 77754fd69b9a984f4419089d2975f6ebe825cce0 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 16:40:53 -0700 Subject: [PATCH 194/204] Example with SimpleF --- examples/SFMdata.h | 27 ++++++-- examples/ViewGraphExample.cpp | 113 ++++++++++++++++++++++++++++++++++ 2 files changed, 134 insertions(+), 6 deletions(-) create mode 100644 examples/ViewGraphExample.cpp diff --git a/examples/SFMdata.h b/examples/SFMdata.h index 112bd927c..f2561b490 100644 --- a/examples/SFMdata.h +++ b/examples/SFMdata.h @@ -78,12 +78,27 @@ std::vector createPoses( /** * Create regularly spaced poses with specified radius and number of cameras */ -std::vector posesOnCircle(int num_cameras = 8, double radius = 30) { - const Pose3 init(Rot3::Ypr(M_PI_2, 0, -M_PI_2), {radius, 0, 0}); - const double theta = M_PI / num_cameras; - const Pose3 delta( - Rot3::Ypr(0, -2 * theta, 0), - {sin(2 * theta) * radius, 0, radius * (1 - sin(2 * theta))}); +std::vector posesOnCircle(int num_cameras = 8, double R = 30) { + const double theta = 2 * M_PI / num_cameras; + + // Initial pose at angle 0, position (R, 0, 0), facing the center with Y-axis + // pointing down + const Pose3 init(Rot3::Ypr(M_PI_2, 0, -M_PI_2), {R, 0, 0}); + + // Delta rotation: rotate by -theta around Z-axis (counterclockwise movement) + Rot3 delta_rotation = Rot3::Ypr(0, -theta, 0); + + // Delta translation in world frame + Vector3 delta_translation_world(R * (cos(theta) - 1), R * sin(theta), 0); + + // Transform delta translation to local frame of the camera + Vector3 delta_translation_local = + init.rotation().inverse() * delta_translation_world; + + // Define delta pose + const Pose3 delta(delta_rotation, delta_translation_local); + + // Generate poses using createPoses return createPoses(init, delta, num_cameras); } } // namespace gtsam \ No newline at end of file diff --git a/examples/ViewGraphExample.cpp b/examples/ViewGraphExample.cpp new file mode 100644 index 000000000..6d5131942 --- /dev/null +++ b/examples/ViewGraphExample.cpp @@ -0,0 +1,113 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ViewGraphExample.cpp + * @brief View-graph calibration on a simulated dataset, a la Sweeney 2015 + * @author Frank Dellaert + * @author October 2024 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "SFMdata.h" +#include "gtsam/geometry/EssentialMatrix.h" +#include "gtsam/inference/Key.h" + +using namespace std; +using namespace gtsam; + +/* ************************************************************************* */ +int main(int argc, char* argv[]) { + // Define the camera calibration parameters + Cal3_S2::shared_ptr K(new Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)); + + // Create the set of 8 ground-truth landmarks + vector points = createPoints(); + + // Create the set of 4 ground-truth poses + vector poses = posesOnCircle(4, 30); + + // Simulate measurements from each camera pose + std::array, 4> p; + for (size_t i = 0; i < 4; ++i) { + GTSAM_PRINT(poses[i]); + PinholeCamera camera(poses[i], *K); + for (size_t j = 0; j < 8; ++j) { + cout << "Camera index: " << i << ", Landmark index: " << j << endl; + p[i][j] = camera.project(points[j]); + } + } + + // Create a factor graph + NonlinearFactorGraph graph; + + using Factor = TransferFactor; + for (size_t a = 0; a < 4; ++a) { + size_t b = (a + 1) % 4; // Next camera + size_t c = (a + 2) % 4; // Camera after next + for (size_t j = 0; j < 4; ++j) { + graph.emplace_shared(EdgeKey(a, c), EdgeKey(b, c), p[a][j], + p[b][j], p[c][j]); + graph.emplace_shared(EdgeKey(a, b), EdgeKey(b, c), p[a][j], + p[c][j], p[b][j]); + graph.emplace_shared(EdgeKey(a, c), EdgeKey(a, b), p[c][j], + p[b][j], p[a][j]); + } + } + + auto formatter = [](Key key) { + EdgeKey edge(key); + return (std::string)edge; + }; + + graph.print("Factor Graph:\n", formatter); + + // Create the data structure to hold the initial estimate to the solution + Values initialEstimate; + const Point2 center(50, 50); + auto E1 = EssentialMatrix::FromPose3(poses[0].between(poses[1])); + auto E2 = EssentialMatrix::FromPose3(poses[0].between(poses[2])); + for (size_t a = 0; a < 4; ++a) { + size_t b = (a + 1) % 4; // Next camera + size_t c = (a + 2) % 4; // Camera after next + initialEstimate.insert(EdgeKey(a, b), + SimpleFundamentalMatrix(E1, 50, 50, center, center)); + initialEstimate.insert(EdgeKey(a, c), + SimpleFundamentalMatrix(E2, 50, 50, center, center)); + } + initialEstimate.print("Initial Estimates:\n", formatter); + // graph.printErrors(initialEstimate, "errors: ", formatter); + + /* Optimize the graph and print results */ + LevenbergMarquardtParams params; + params.setlambdaInitial(1000.0); // Initialize lambda to a high value + params.setVerbosityLM("SUMMARY"); + Values result = + LevenbergMarquardtOptimizer(graph, initialEstimate, params).optimize(); + result.print("Final results:\n", formatter); + cout << "initial error = " << graph.error(initialEstimate) << endl; + cout << "final error = " << graph.error(result) << endl; + + return 0; +} +/* ************************************************************************* */ From 7d95505d11fd03b2d4ff06da11f8ac9720b2621b Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 17:04:11 -0700 Subject: [PATCH 195/204] Init from pose and Ks --- gtsam/geometry/FundamentalMatrix.cpp | 4 +--- gtsam/geometry/FundamentalMatrix.h | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/gtsam/geometry/FundamentalMatrix.cpp b/gtsam/geometry/FundamentalMatrix.cpp index 57d59d0a0..af322a3c0 100644 --- a/gtsam/geometry/FundamentalMatrix.cpp +++ b/gtsam/geometry/FundamentalMatrix.cpp @@ -72,9 +72,7 @@ Matrix3 FundamentalMatrix::matrix() const { } void FundamentalMatrix::print(const std::string& s) const { - std::cout << s << "U:\n" - << U_.matrix() << "\ns: " << s_ << "\nV:\n" - << V_.matrix() << std::endl; + std::cout << s << matrix() << std::endl; } bool FundamentalMatrix::equals(const FundamentalMatrix& other, diff --git a/gtsam/geometry/FundamentalMatrix.h b/gtsam/geometry/FundamentalMatrix.h index 497f95cf7..f8aa37bf8 100644 --- a/gtsam/geometry/FundamentalMatrix.h +++ b/gtsam/geometry/FundamentalMatrix.h @@ -54,6 +54,23 @@ class GTSAM_EXPORT FundamentalMatrix { */ FundamentalMatrix(const Matrix3& F); + /** + * @brief Construct from calibration matrices Ka, Kb, and pose aPb + * + * Initializes the GeneralFundamentalMatrix from the given calibration + * matrices Ka and Kb, and the pose aPb. + * + * @tparam CAL Calibration type, expected to have a matrix() method + * @param Ka Calibration matrix for the left camera + * @param aPb Pose from the left to the right camera + * @param Kb Calibration matrix for the right camera + */ + template + GeneralFundamentalMatrix(const CAL& Ka, const Pose3& aPb, const CAL& Kb) + : GeneralFundamentalMatrix(Ka.K().transpose().inverse() * + EssentialMatrix::FromPose3(aPb).matrix() * + Kb.K().inverse()) {} + /// Return the fundamental matrix representation Matrix3 matrix() const; From ca199f9c08406f5d363579e667a70d083710aaf4 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 17:04:24 -0700 Subject: [PATCH 196/204] Switch to general F --- examples/SFMExample.cpp | 4 +-- examples/ViewGraphExample.cpp | 48 +++++++++++++++++++++++------------ 2 files changed, 34 insertions(+), 18 deletions(-) diff --git a/examples/SFMExample.cpp b/examples/SFMExample.cpp index fddca8169..0b4dc4036 100644 --- a/examples/SFMExample.cpp +++ b/examples/SFMExample.cpp @@ -39,7 +39,7 @@ // Finally, once all of the factors have been added to our factor graph, we will want to // solve/optimize to graph to find the best (Maximum A Posteriori) set of variable values. // GTSAM includes several nonlinear optimizers to perform this step. Here we will use a -// trust-region method known as Powell's Degleg +// trust-region method known as Powell's Dogleg #include // The nonlinear solvers within GTSAM are iterative solvers, meaning they linearize the @@ -57,7 +57,7 @@ using namespace gtsam; /* ************************************************************************* */ int main(int argc, char* argv[]) { // Define the camera calibration parameters - Cal3_S2::shared_ptr K(new Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)); + auto K = std::make_shared(50.0, 50.0, 0.0, 50.0, 50.0); // Define the camera observation noise model auto measurementNoise = diff --git a/examples/ViewGraphExample.cpp b/examples/ViewGraphExample.cpp index 6d5131942..7b5ed0fc6 100644 --- a/examples/ViewGraphExample.cpp +++ b/examples/ViewGraphExample.cpp @@ -30,7 +30,6 @@ #include #include "SFMdata.h" -#include "gtsam/geometry/EssentialMatrix.h" #include "gtsam/inference/Key.h" using namespace std; @@ -39,7 +38,7 @@ using namespace gtsam; /* ************************************************************************* */ int main(int argc, char* argv[]) { // Define the camera calibration parameters - Cal3_S2::shared_ptr K(new Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)); + Cal3_S2 K(50.0, 50.0, 0.0, 50.0, 50.0); // Create the set of 8 ground-truth landmarks vector points = createPoints(); @@ -47,25 +46,36 @@ int main(int argc, char* argv[]) { // Create the set of 4 ground-truth poses vector poses = posesOnCircle(4, 30); + // Calculate ground truth fundamental matrices, 1 and 2 poses apart + auto F1 = GeneralFundamentalMatrix(K, poses[0].between(poses[1]), K); + auto F2 = GeneralFundamentalMatrix(K, poses[0].between(poses[2]), K); + // Simulate measurements from each camera pose std::array, 4> p; for (size_t i = 0; i < 4; ++i) { - GTSAM_PRINT(poses[i]); - PinholeCamera camera(poses[i], *K); + PinholeCamera camera(poses[i], K); for (size_t j = 0; j < 8; ++j) { - cout << "Camera index: " << i << ", Landmark index: " << j << endl; p[i][j] = camera.project(points[j]); } } - // Create a factor graph + // This section of the code is inspired by the work of Sweeney et al. + // [link](sites.cs.ucsb.edu/~holl/pubs/Sweeney-2015-ICCV.pdf) on view-graph + // calibration. The graph is made up of transfer factors that enforce the + // epipolar constraint between corresponding points across three views, as + // described in the paper. Rather than adding one ternary error term per point + // in a triplet, we add three binary factors for sparsity during optimization. + // In this version, we only include triplets between 3 successive cameras. NonlinearFactorGraph graph; - - using Factor = TransferFactor; + using Factor = TransferFactor; for (size_t a = 0; a < 4; ++a) { size_t b = (a + 1) % 4; // Next camera size_t c = (a + 2) % 4; // Camera after next for (size_t j = 0; j < 4; ++j) { + // Add transfer factors between views a, b, and c. Note that the EdgeKeys + // are crucial in performing the transfer in the right direction. We use + // exactly 8 unique EdgeKeys, corresponding to 8 unknown fundamental + // matrices we will optimize for. graph.emplace_shared(EdgeKey(a, c), EdgeKey(b, c), p[a][j], p[b][j], p[c][j]); graph.emplace_shared(EdgeKey(a, b), EdgeKey(b, c), p[a][j], @@ -82,18 +92,19 @@ int main(int argc, char* argv[]) { graph.print("Factor Graph:\n", formatter); + // Create a delta vector to perturb the ground truth + // We can't really go far before convergence becomes problematic :-( + Vector7 delta; + delta << 1, 2, 3, 4, 5, 6, 7; + delta *= 5e-5; + // Create the data structure to hold the initial estimate to the solution Values initialEstimate; - const Point2 center(50, 50); - auto E1 = EssentialMatrix::FromPose3(poses[0].between(poses[1])); - auto E2 = EssentialMatrix::FromPose3(poses[0].between(poses[2])); for (size_t a = 0; a < 4; ++a) { size_t b = (a + 1) % 4; // Next camera size_t c = (a + 2) % 4; // Camera after next - initialEstimate.insert(EdgeKey(a, b), - SimpleFundamentalMatrix(E1, 50, 50, center, center)); - initialEstimate.insert(EdgeKey(a, c), - SimpleFundamentalMatrix(E2, 50, 50, center, center)); + initialEstimate.insert(EdgeKey(a, b), F1.retract(delta)); + initialEstimate.insert(EdgeKey(a, c), F2.retract(delta)); } initialEstimate.print("Initial Estimates:\n", formatter); // graph.printErrors(initialEstimate, "errors: ", formatter); @@ -104,10 +115,15 @@ int main(int argc, char* argv[]) { params.setVerbosityLM("SUMMARY"); Values result = LevenbergMarquardtOptimizer(graph, initialEstimate, params).optimize(); - result.print("Final results:\n", formatter); + cout << "initial error = " << graph.error(initialEstimate) << endl; cout << "final error = " << graph.error(result) << endl; + result.print("Final results:\n", formatter); + + cout << "Ground Truth F1:\n" << F1.matrix() << endl; + cout << "Ground Truth F2:\n" << F2.matrix() << endl; + return 0; } /* ************************************************************************* */ From 19c75cb95cdeadafdb154c01894d0702d9ed6c99 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 17:36:53 -0700 Subject: [PATCH 197/204] Use FundamentalMatrix now --- examples/ViewGraphExample.cpp | 6 +++--- gtsam/geometry/FundamentalMatrix.h | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/examples/ViewGraphExample.cpp b/examples/ViewGraphExample.cpp index 7b5ed0fc6..d1c7113bc 100644 --- a/examples/ViewGraphExample.cpp +++ b/examples/ViewGraphExample.cpp @@ -47,8 +47,8 @@ int main(int argc, char* argv[]) { vector poses = posesOnCircle(4, 30); // Calculate ground truth fundamental matrices, 1 and 2 poses apart - auto F1 = GeneralFundamentalMatrix(K, poses[0].between(poses[1]), K); - auto F2 = GeneralFundamentalMatrix(K, poses[0].between(poses[2]), K); + auto F1 = FundamentalMatrix(K, poses[0].between(poses[1]), K); + auto F2 = FundamentalMatrix(K, poses[0].between(poses[2]), K); // Simulate measurements from each camera pose std::array, 4> p; @@ -67,7 +67,7 @@ int main(int argc, char* argv[]) { // in a triplet, we add three binary factors for sparsity during optimization. // In this version, we only include triplets between 3 successive cameras. NonlinearFactorGraph graph; - using Factor = TransferFactor; + using Factor = TransferFactor; for (size_t a = 0; a < 4; ++a) { size_t b = (a + 1) % 4; // Next camera size_t c = (a + 2) % 4; // Camera after next diff --git a/gtsam/geometry/FundamentalMatrix.h b/gtsam/geometry/FundamentalMatrix.h index f8aa37bf8..8a3ba86b8 100644 --- a/gtsam/geometry/FundamentalMatrix.h +++ b/gtsam/geometry/FundamentalMatrix.h @@ -57,7 +57,7 @@ class GTSAM_EXPORT FundamentalMatrix { /** * @brief Construct from calibration matrices Ka, Kb, and pose aPb * - * Initializes the GeneralFundamentalMatrix from the given calibration + * Initializes the FundamentalMatrix from the given calibration * matrices Ka and Kb, and the pose aPb. * * @tparam CAL Calibration type, expected to have a matrix() method @@ -66,8 +66,8 @@ class GTSAM_EXPORT FundamentalMatrix { * @param Kb Calibration matrix for the right camera */ template - GeneralFundamentalMatrix(const CAL& Ka, const Pose3& aPb, const CAL& Kb) - : GeneralFundamentalMatrix(Ka.K().transpose().inverse() * + FundamentalMatrix(const CAL& Ka, const Pose3& aPb, const CAL& Kb) + : FundamentalMatrix(Ka.K().transpose().inverse() * EssentialMatrix::FromPose3(aPb).matrix() * Kb.K().inverse()) {} From be68e07ddb22a7ac82e350e33d27aa23fb66b6a5 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 17:54:04 -0700 Subject: [PATCH 198/204] Use abc terminology --- gtsam/sfm/TransferFactor.h | 59 ++++++++++++-------------- gtsam/sfm/tests/testTransferFactor.cpp | 2 +- 2 files changed, 29 insertions(+), 32 deletions(-) diff --git a/gtsam/sfm/TransferFactor.h b/gtsam/sfm/TransferFactor.h index 5c46f9b9b..8acde7b88 100644 --- a/gtsam/sfm/TransferFactor.h +++ b/gtsam/sfm/TransferFactor.h @@ -26,59 +26,56 @@ namespace gtsam { * Binary factor in the context of Structure from Motion (SfM). * It is used to transfer points between different views based on the * fundamental matrices between these views. The factor computes the error - * between the transferred points `pi` and `pj`, and the actual point `pk` in + * between the transferred points `pa` and `pb`, and the actual point `pc` in * the target view. Jacobians are done using numerical differentiation. */ template class TransferFactor : public NoiseModelFactorN { EdgeKey key1_, key2_; ///< the two EdgeKeys - Point2 pi, pj, pk; ///< The points in the three views + Point2 pa, pb, pc; ///< The points in the three views public: /** * @brief Constructor for the TransferFactor class. * * Uses EdgeKeys to determine how to use the two fundamental matrix unknowns - * F1 and F2, to transfer points pi and pj to the third view, and minimize the - * difference with pk. + * F1 and F2, to transfer points pa and pb to the third view, and minimize the + * difference with pc. * * The edge keys must represent valid edges for the transfer operation, * specifically one of the following configurations: - * - (i, k) and (j, k) - * - (i, k) and (k, j) - * - (k, i) and (j, k) - * - (k, i) and (k, j) + * - (a, c) and (b, c) + * - (a, c) and (c, b) + * - (c, a) and (b, c) + * - (c, a) and (c, b) * - * @param key1 First EdgeKey specifying F1: (i, k) or (k, i). - * @param key2 Second EdgeKey specifying F2: (j, k) or (k, j). - * @param pi The point in the first view (i). - * @param pj The point in the second view (j). - * @param pk The point in the third (and transfer target) view (k). + * @param key1 First EdgeKey specifying F1: (a, c) or (c, a). + * @param key2 Second EdgeKey specifying F2: (b, c) or (c, b). + * @param pa The point in the first view (a). + * @param pb The point in the second view (b). + * @param pc The point in the third (and transfer target) view (c). * @param model An optional SharedNoiseModel that defines the noise model * for this factor. Defaults to nullptr. */ - TransferFactor(EdgeKey key1, EdgeKey key2, const Point2& pi, const Point2& pj, - const Point2& pk, const SharedNoiseModel& model = nullptr) + TransferFactor(EdgeKey key1, EdgeKey key2, const Point2& pa, const Point2& pb, + const Point2& pc, const SharedNoiseModel& model = nullptr) : NoiseModelFactorN(model, key1, key2), key1_(key1), key2_(key2), - pi(pi), - pj(pj), - pk(pk) {} + pa(pa), + pb(pb), + pc(pc) {} // Create Matrix3 objects based on EdgeKey configurations - static std::pair getMatrices(const EdgeKey& key1, - const F& F1, - const EdgeKey& key2, - const F& F2) { - // Fill Fki and Fkj based on EdgeKey configurations - if (key1.i() == key2.i()) { + std::pair getMatrices(const F& F1, const F& F2) const { + // Fill Fca and Fcb based on EdgeKey configurations + if (key1_.i() == key2_.i()) { return {F1.matrix(), F2.matrix()}; - } else if (key1.i() == key2.j()) { + } else if (key1_.i() == key2_.j()) { return {F1.matrix(), F2.matrix().transpose()}; - } else if (key1.j() == key2.i()) { + } else if (key1_.j() == key2_.i()) { return {F1.matrix().transpose(), F2.matrix()}; - } else if (key1.j() == key2.j()) { + } else if (key1_.j() == key2_.j()) { return {F1.matrix().transpose(), F2.matrix().transpose()}; } else { throw std::runtime_error( @@ -91,13 +88,13 @@ class TransferFactor : public NoiseModelFactorN { OptionalMatrixType H1 = nullptr, OptionalMatrixType H2 = nullptr) const override { std::function transfer = [&](const F& F1, const F& F2) { - auto [Fki, Fkj] = getMatrices(key1_, F1, key2_, F2); - return Transfer(Fki, pi, Fkj, pj); + auto [Fca, Fcb] = getMatrices(F1, F2); + return Transfer(Fca, pa, Fcb, pb); }; if (H1) *H1 = numericalDerivative21(transfer, F1, F2); if (H2) *H2 = numericalDerivative22(transfer, F1, F2); - return transfer(F1, F2) - pk; + return transfer(F1, F2) - pc; } }; -} // namespace gtsam +} // namespace gtsam \ No newline at end of file diff --git a/gtsam/sfm/tests/testTransferFactor.cpp b/gtsam/sfm/tests/testTransferFactor.cpp index d26bad960..7bcd71b1f 100644 --- a/gtsam/sfm/tests/testTransferFactor.cpp +++ b/gtsam/sfm/tests/testTransferFactor.cpp @@ -71,7 +71,7 @@ TEST(TransferFactor, Jacobians) { factor2{key20, key12, p[0], p[1], p[2]}; // Check that getMatrices is correct - auto [Fki, Fkj] = factor2.getMatrices(key20, triplet.Fca, key12, triplet.Fbc); + auto [Fki, Fkj] = factor2.getMatrices(triplet.Fca, triplet.Fbc); EXPECT(assert_equal(triplet.Fca.matrix(), Fki)); EXPECT(assert_equal(triplet.Fbc.matrix().transpose(), Fkj)); From 38ac1b48374009a0dfe753e5024e7dc3adc311ac Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Thu, 24 Oct 2024 19:17:23 -0700 Subject: [PATCH 199/204] Batched version --- examples/ViewGraphExample.cpp | 35 +++++++++------ gtsam/sfm/TransferFactor.h | 60 +++++++++++++++++--------- gtsam/sfm/tests/testTransferFactor.cpp | 58 +++++++++++++++++++++++++ 3 files changed, 118 insertions(+), 35 deletions(-) diff --git a/examples/ViewGraphExample.cpp b/examples/ViewGraphExample.cpp index d1c7113bc..c23ac084c 100644 --- a/examples/ViewGraphExample.cpp +++ b/examples/ViewGraphExample.cpp @@ -13,7 +13,7 @@ * @file ViewGraphExample.cpp * @brief View-graph calibration on a simulated dataset, a la Sweeney 2015 * @author Frank Dellaert - * @author October 2024 + * @date October 2024 */ #include @@ -68,21 +68,28 @@ int main(int argc, char* argv[]) { // In this version, we only include triplets between 3 successive cameras. NonlinearFactorGraph graph; using Factor = TransferFactor; + for (size_t a = 0; a < 4; ++a) { size_t b = (a + 1) % 4; // Next camera size_t c = (a + 2) % 4; // Camera after next - for (size_t j = 0; j < 4; ++j) { - // Add transfer factors between views a, b, and c. Note that the EdgeKeys - // are crucial in performing the transfer in the right direction. We use - // exactly 8 unique EdgeKeys, corresponding to 8 unknown fundamental - // matrices we will optimize for. - graph.emplace_shared(EdgeKey(a, c), EdgeKey(b, c), p[a][j], - p[b][j], p[c][j]); - graph.emplace_shared(EdgeKey(a, b), EdgeKey(b, c), p[a][j], - p[c][j], p[b][j]); - graph.emplace_shared(EdgeKey(a, c), EdgeKey(a, b), p[c][j], - p[b][j], p[a][j]); + + // Vectors to collect tuples for each factor + std::vector> tuples1, tuples2, tuples3; + + // Collect data for the three factors + for (size_t j = 0; j < 8; ++j) { + tuples1.emplace_back(p[a][j], p[b][j], p[c][j]); + tuples2.emplace_back(p[a][j], p[c][j], p[b][j]); + tuples3.emplace_back(p[c][j], p[b][j], p[a][j]); } + + // Add transfer factors between views a, b, and c. Note that the EdgeKeys + // are crucial in performing the transfer in the right direction. We use + // exactly 8 unique EdgeKeys, corresponding to 8 unknown fundamental + // matrices we will optimize for. + graph.emplace_shared(EdgeKey(a, c), EdgeKey(b, c), tuples1); + graph.emplace_shared(EdgeKey(a, b), EdgeKey(b, c), tuples2); + graph.emplace_shared(EdgeKey(a, c), EdgeKey(a, b), tuples3); } auto formatter = [](Key key) { @@ -96,7 +103,7 @@ int main(int argc, char* argv[]) { // We can't really go far before convergence becomes problematic :-( Vector7 delta; delta << 1, 2, 3, 4, 5, 6, 7; - delta *= 5e-5; + delta *= 1e-5; // Create the data structure to hold the initial estimate to the solution Values initialEstimate; @@ -107,7 +114,7 @@ int main(int argc, char* argv[]) { initialEstimate.insert(EdgeKey(a, c), F2.retract(delta)); } initialEstimate.print("Initial Estimates:\n", formatter); - // graph.printErrors(initialEstimate, "errors: ", formatter); + graph.printErrors(initialEstimate, "errors: ", formatter); /* Optimize the graph and print results */ LevenbergMarquardtParams params; diff --git a/gtsam/sfm/TransferFactor.h b/gtsam/sfm/TransferFactor.h index 8acde7b88..35ef8c1ae 100644 --- a/gtsam/sfm/TransferFactor.h +++ b/gtsam/sfm/TransferFactor.h @@ -32,22 +32,14 @@ namespace gtsam { template class TransferFactor : public NoiseModelFactorN { EdgeKey key1_, key2_; ///< the two EdgeKeys - Point2 pa, pb, pc; ///< The points in the three views + std::vector> + triplets_; ///< Point triplets public: /** - * @brief Constructor for the TransferFactor class. + * @brief Constructor for a single triplet of points * - * Uses EdgeKeys to determine how to use the two fundamental matrix unknowns - * F1 and F2, to transfer points pa and pb to the third view, and minimize the - * difference with pc. - * - * The edge keys must represent valid edges for the transfer operation, - * specifically one of the following configurations: - * - (a, c) and (b, c) - * - (a, c) and (c, b) - * - (c, a) and (b, c) - * - (c, a) and (c, b) + * @note: batching all points for the same transfer will be much faster. * * @param key1 First EdgeKey specifying F1: (a, c) or (c, a). * @param key2 Second EdgeKey specifying F2: (b, c) or (c, b). @@ -62,9 +54,25 @@ class TransferFactor : public NoiseModelFactorN { : NoiseModelFactorN(model, key1, key2), key1_(key1), key2_(key2), - pa(pa), - pb(pb), - pc(pc) {} + triplets_({std::make_tuple(pa, pb, pc)}) {} + + /** + * @brief Constructor that accepts a vector of point triplets. + * + * @param key1 First EdgeKey specifying F1: (a, c) or (c, a). + * @param key2 Second EdgeKey specifying F2: (b, c) or (c, b). + * @param triplets A vector of triplets containing (pa, pb, pc). + * @param model An optional SharedNoiseModel that defines the noise model + * for this factor. Defaults to nullptr. + */ + TransferFactor( + EdgeKey key1, EdgeKey key2, + const std::vector>& triplets, + const SharedNoiseModel& model = nullptr) + : NoiseModelFactorN(model, key1, key2), + key1_(key1), + key2_(key2), + triplets_(triplets) {} // Create Matrix3 objects based on EdgeKey configurations std::pair getMatrices(const F& F1, const F& F2) const { @@ -83,17 +91,27 @@ class TransferFactor : public NoiseModelFactorN { } } - /// vector of errors returns 2D vector + /// vector of errors returns 2*N vector Vector evaluateError(const F& F1, const F& F2, OptionalMatrixType H1 = nullptr, OptionalMatrixType H2 = nullptr) const override { - std::function transfer = [&](const F& F1, const F& F2) { + std::function transfer = [&](const F& F1, + const F& F2) { + Vector errors(2 * triplets_.size()); + size_t idx = 0; auto [Fca, Fcb] = getMatrices(F1, F2); - return Transfer(Fca, pa, Fcb, pb); + for (const auto& tuple : triplets_) { + const auto& [pa, pb, pc] = tuple; + Point2 transferredPoint = Transfer(Fca, pa, Fcb, pb); + Vector2 error = transferredPoint - pc; + errors.segment<2>(idx) = error; + idx += 2; + } + return errors; }; - if (H1) *H1 = numericalDerivative21(transfer, F1, F2); - if (H2) *H2 = numericalDerivative22(transfer, F1, F2); - return transfer(F1, F2) - pc; + if (H1) *H1 = numericalDerivative21(transfer, F1, F2); + if (H2) *H2 = numericalDerivative22(transfer, F1, F2); + return transfer(F1, F2); } }; diff --git a/gtsam/sfm/tests/testTransferFactor.cpp b/gtsam/sfm/tests/testTransferFactor.cpp index 7bcd71b1f..6092ec456 100644 --- a/gtsam/sfm/tests/testTransferFactor.cpp +++ b/gtsam/sfm/tests/testTransferFactor.cpp @@ -89,6 +89,64 @@ TEST(TransferFactor, Jacobians) { } } +//************************************************************************* +// Test for TransferFactor with multiple tuples +TEST(TransferFactor, MultipleTuples) { + // Generate cameras on a circle + std::array cameraPoses = generateCameraPoses(); + auto triplet = generateTripleF(cameraPoses); + + // Now project multiple points into the three cameras + const size_t numPoints = 5; // Number of points to test + std::vector points3D; + std::vector> projectedPoints; + + const Cal3_S2 K(focalLength, focalLength, 0.0, // + principalPoint.x(), principalPoint.y()); + + // Generate random 3D points and project them + for (size_t n = 0; n < numPoints; ++n) { + Point3 P(0.1 * n, 0.2 * n, 0.3 + 0.1 * n); + points3D.push_back(P); + + std::array p; + for (size_t i = 0; i < 3; ++i) { + // Project the point into each camera + PinholeCameraCal3_S2 camera(cameraPoses[i], K); + p[i] = camera.project(P); + } + projectedPoints.push_back(p); + } + + // Create a vector of tuples for the TransferFactor + EdgeKey key01(0, 1), key12(1, 2), key20(2, 0); + std::vector> tuples0, tuples1, tuples2; + + for (size_t n = 0; n < numPoints; ++n) { + const auto& p = projectedPoints[n]; + tuples0.emplace_back(p[1], p[2], p[0]); + } + + // Create TransferFactors using the new constructor + TransferFactor factor{key01, key20, tuples0}; + + // Create Values with edge keys + Values values; + values.insert(key01, triplet.Fab); + values.insert(key12, triplet.Fbc); + values.insert(key20, triplet.Fca); + + // Check error and Jacobians for multiple tuples + Vector error = factor.unwhitenedError(values); + // The error vector should be of size 2 * numPoints + EXPECT_LONGS_EQUAL(2 * numPoints, error.size()); + // Since the points are perfectly matched, the error should be zero + EXPECT(assert_equal(Vector::Zero(2 * numPoints), error, 1e-9)); + + // Check the Jacobians + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-5, 1e-7); +} + //************************************************************************* int main() { TestResult tr; From 00cb637db34e80e3cab6bd316c5602b9e342a51a Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Fri, 25 Oct 2024 07:36:16 -0700 Subject: [PATCH 200/204] Renamed method --- gtsam/sfm/TransferFactor.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/sfm/TransferFactor.h b/gtsam/sfm/TransferFactor.h index 35ef8c1ae..fb2dd63b0 100644 --- a/gtsam/sfm/TransferFactor.h +++ b/gtsam/sfm/TransferFactor.h @@ -102,7 +102,7 @@ class TransferFactor : public NoiseModelFactorN { auto [Fca, Fcb] = getMatrices(F1, F2); for (const auto& tuple : triplets_) { const auto& [pa, pb, pc] = tuple; - Point2 transferredPoint = Transfer(Fca, pa, Fcb, pb); + Point2 transferredPoint = EpipolarTransfer(Fca, pa, Fcb, pb); Vector2 error = transferredPoint - pc; errors.segment<2>(idx) = error; idx += 2; From cbbe2d2e887659ac1c7ad1d96c45264272435777 Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Fri, 25 Oct 2024 20:48:02 -0700 Subject: [PATCH 201/204] Address review comments --- gtsam/geometry/FundamentalMatrix.cpp | 6 ++-- gtsam/geometry/FundamentalMatrix.h | 31 +++++++++------- gtsam/sfm/TransferFactor.h | 4 +-- gtsam/sfm/tests/testTransferFactor.cpp | 50 ++++++++++++++------------ 4 files changed, 52 insertions(+), 39 deletions(-) diff --git a/gtsam/geometry/FundamentalMatrix.cpp b/gtsam/geometry/FundamentalMatrix.cpp index af322a3c0..d65053d19 100644 --- a/gtsam/geometry/FundamentalMatrix.cpp +++ b/gtsam/geometry/FundamentalMatrix.cpp @@ -97,20 +97,20 @@ FundamentalMatrix FundamentalMatrix::retract(const Vector& delta) const { } //************************************************************************* -Matrix3 SimpleFundamentalMatrix::leftK() const { +Matrix3 SimpleFundamentalMatrix::Ka() const { Matrix3 K; K << fa_, 0, ca_.x(), 0, fa_, ca_.y(), 0, 0, 1; return K; } -Matrix3 SimpleFundamentalMatrix::rightK() const { +Matrix3 SimpleFundamentalMatrix::Kb() const { Matrix3 K; K << fb_, 0, cb_.x(), 0, fb_, cb_.y(), 0, 0, 1; return K; } Matrix3 SimpleFundamentalMatrix::matrix() const { - return leftK().transpose().inverse() * E_.matrix() * rightK().inverse(); + return Ka().transpose().inverse() * E_.matrix() * Kb().inverse(); } void SimpleFundamentalMatrix::print(const std::string& s) const { diff --git a/gtsam/geometry/FundamentalMatrix.h b/gtsam/geometry/FundamentalMatrix.h index 8a3ba86b8..6f04f45e8 100644 --- a/gtsam/geometry/FundamentalMatrix.h +++ b/gtsam/geometry/FundamentalMatrix.h @@ -68,8 +68,8 @@ class GTSAM_EXPORT FundamentalMatrix { template FundamentalMatrix(const CAL& Ka, const Pose3& aPb, const CAL& Kb) : FundamentalMatrix(Ka.K().transpose().inverse() * - EssentialMatrix::FromPose3(aPb).matrix() * - Kb.K().inverse()) {} + EssentialMatrix::FromPose3(aPb).matrix() * + Kb.K().inverse()) {} /// Return the fundamental matrix representation Matrix3 matrix() const; @@ -114,25 +114,32 @@ class GTSAM_EXPORT SimpleFundamentalMatrix { Point2 ca_; ///< Principal point for left camera Point2 cb_; ///< Principal point for right camera + /// Return the left calibration matrix + Matrix3 Ka() const; + + /// Return the right calibration matrix + Matrix3 Kb() const; + public: /// Default constructor SimpleFundamentalMatrix() : E_(), fa_(1.0), fb_(1.0), ca_(0.0, 0.0), cb_(0.0, 0.0) {} - /// Construct from essential matrix and focal lengths + /** + * @brief Construct from essential matrix and focal lengths + * @param E Essential matrix + * @param fa Focal length for left camera + * @param fb Focal length for right camera + * @param ca Principal point for left camera + * @param cb Principal point for right camera + */ SimpleFundamentalMatrix(const EssentialMatrix& E, // - double fa, double fb, - const Point2& ca = Point2(0.0, 0.0), - const Point2& cb = Point2(0.0, 0.0)) + double fa, double fb, const Point2& ca, + const Point2& cb) : E_(E), fa_(fa), fb_(fb), ca_(ca), cb_(cb) {} - /// Return the left calibration matrix - Matrix3 leftK() const; - - /// Return the right calibration matrix - Matrix3 rightK() const; - /// Return the fundamental matrix representation + /// F = Ka^(-T) * E * Kb^(-1) Matrix3 matrix() const; /// @name Testable diff --git a/gtsam/sfm/TransferFactor.h b/gtsam/sfm/TransferFactor.h index fb2dd63b0..d2085f57e 100644 --- a/gtsam/sfm/TransferFactor.h +++ b/gtsam/sfm/TransferFactor.h @@ -24,8 +24,8 @@ namespace gtsam { /** * Binary factor in the context of Structure from Motion (SfM). - * It is used to transfer points between different views based on the - * fundamental matrices between these views. The factor computes the error + * It is used to transfer transfer corresponding points from two views to a + * third based on two fundamental matrices. The factor computes the error * between the transferred points `pa` and `pb`, and the actual point `pc` in * the target view. Jacobians are done using numerical differentiation. */ diff --git a/gtsam/sfm/tests/testTransferFactor.cpp b/gtsam/sfm/tests/testTransferFactor.cpp index 6092ec456..2dca12c2a 100644 --- a/gtsam/sfm/tests/testTransferFactor.cpp +++ b/gtsam/sfm/tests/testTransferFactor.cpp @@ -12,9 +12,6 @@ using namespace gtsam; -double focalLength = 1000; -Point2 principalPoint(640 / 2, 480 / 2); - //************************************************************************* /// Generate three cameras on a circle, looking in std::array generateCameraPoses() { @@ -44,17 +41,36 @@ TripleF generateTripleF( return {F[0], F[1], F[2]}; // Return a TripleF instance } +//************************************************************************* +namespace fixture { +// Generate cameras on a circle +std::array cameraPoses = generateCameraPoses(); +auto triplet = generateTripleF(cameraPoses); +double focalLength = 1000; +Point2 principalPoint(640 / 2, 480 / 2); +const Cal3_S2 K(focalLength, focalLength, 0.0, // + principalPoint.x(), principalPoint.y()); +} // namespace fixture + +//************************************************************************* +// Test for getMatrices +TEST(TransferFactor, GetMatrices) { + using namespace fixture; + TransferFactor factor{{2, 0}, {1, 2}, {}}; + + // Check that getMatrices is correct + auto [Fki, Fkj] = factor.getMatrices(triplet.Fca, triplet.Fbc); + EXPECT(assert_equal(triplet.Fca.matrix(), Fki)); + EXPECT(assert_equal(triplet.Fbc.matrix().transpose(), Fkj)); +} + //************************************************************************* // Test for TransferFactor TEST(TransferFactor, Jacobians) { - // Generate cameras on a circle - std::array cameraPoses = generateCameraPoses(); - auto triplet = generateTripleF(cameraPoses); + using namespace fixture; // Now project a point into the three cameras const Point3 P(0.1, 0.2, 0.3); - const Cal3_S2 K(focalLength, focalLength, 0.0, // - principalPoint.x(), principalPoint.y()); std::array p; for (size_t i = 0; i < 3; ++i) { @@ -70,11 +86,6 @@ TEST(TransferFactor, Jacobians) { factor1{key12, key01, p[2], p[0], p[1]}, factor2{key20, key12, p[0], p[1], p[2]}; - // Check that getMatrices is correct - auto [Fki, Fkj] = factor2.getMatrices(triplet.Fca, triplet.Fbc); - EXPECT(assert_equal(triplet.Fca.matrix(), Fki)); - EXPECT(assert_equal(triplet.Fbc.matrix().transpose(), Fkj)); - // Create Values with edge keys Values values; values.insert(key01, triplet.Fab); @@ -92,18 +103,13 @@ TEST(TransferFactor, Jacobians) { //************************************************************************* // Test for TransferFactor with multiple tuples TEST(TransferFactor, MultipleTuples) { - // Generate cameras on a circle - std::array cameraPoses = generateCameraPoses(); - auto triplet = generateTripleF(cameraPoses); + using namespace fixture; // Now project multiple points into the three cameras const size_t numPoints = 5; // Number of points to test std::vector points3D; std::vector> projectedPoints; - const Cal3_S2 K(focalLength, focalLength, 0.0, // - principalPoint.x(), principalPoint.y()); - // Generate random 3D points and project them for (size_t n = 0; n < numPoints; ++n) { Point3 P(0.1 * n, 0.2 * n, 0.3 + 0.1 * n); @@ -120,15 +126,15 @@ TEST(TransferFactor, MultipleTuples) { // Create a vector of tuples for the TransferFactor EdgeKey key01(0, 1), key12(1, 2), key20(2, 0); - std::vector> tuples0, tuples1, tuples2; + std::vector> tuples; for (size_t n = 0; n < numPoints; ++n) { const auto& p = projectedPoints[n]; - tuples0.emplace_back(p[1], p[2], p[0]); + tuples.emplace_back(p[1], p[2], p[0]); } // Create TransferFactors using the new constructor - TransferFactor factor{key01, key20, tuples0}; + TransferFactor factor{key01, key20, tuples}; // Create Values with edge keys Values values; From e7dea395622f2a0c7b267c711e0c2db7f1c0eabe Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 26 Oct 2024 18:08:10 -0400 Subject: [PATCH 202/204] make function inline to avoid multiple definition error --- gtsam/nonlinear/internal/ChiSquaredInverse.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/nonlinear/internal/ChiSquaredInverse.h b/gtsam/nonlinear/internal/ChiSquaredInverse.h index dbf83f92b..6707be1fe 100644 --- a/gtsam/nonlinear/internal/ChiSquaredInverse.h +++ b/gtsam/nonlinear/internal/ChiSquaredInverse.h @@ -36,7 +36,7 @@ namespace internal { * @param alpha Quantile value * @return double */ -double chi_squared_quantile(const double dofs, const double alpha) { +inline double chi_squared_quantile(const double dofs, const double alpha) { return 2 * igami(dofs / 2, alpha); } From 0271eb637c69ac77d4b309765d7318fbaa6c3448 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 26 Oct 2024 18:08:30 -0400 Subject: [PATCH 203/204] remove redunant assignment of boolean flag --- gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index bd106afbe..e3d90a591 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -146,7 +146,7 @@ double lineSearch(const S &system, const V currentValues, const W &gradient) { double newError = system.error(newValues); while (true) { - const bool flag = (maxStep - newStep > newStep - minStep) ? true : false; + const bool flag = (maxStep - newStep > newStep - minStep); const double testStep = flag ? newStep + resphi * (maxStep - newStep) : newStep - resphi * (newStep - minStep); From 92b3272823eaa74a3d8a08bde6d8a2dedb1543c9 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 26 Oct 2024 18:08:43 -0400 Subject: [PATCH 204/204] make shared_ptr a reference --- gtsam/linear/PCGSolver.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/linear/PCGSolver.h b/gtsam/linear/PCGSolver.h index 42bb3fccc..17cc2d3db 100644 --- a/gtsam/linear/PCGSolver.h +++ b/gtsam/linear/PCGSolver.h @@ -42,7 +42,7 @@ struct GTSAM_EXPORT PCGSolverParameters : public ConjugateGradientParameters { PCGSolverParameters() {} PCGSolverParameters( - const std::shared_ptr preconditioner) + const std::shared_ptr &preconditioner) : preconditioner(preconditioner) {} void print(std::ostream &os) const override;