From ecbf3d872ea467c94110b2f822d9e515e6757df6 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 21 Sep 2024 05:15:35 -0400 Subject: [PATCH 01/15] make logNormalizationConstant return -log(k) --- gtsam/inference/Conditional-inst.h | 2 +- gtsam/inference/Conditional.h | 12 +++++++----- gtsam/linear/GaussianBayesNet.cpp | 13 +++++++------ gtsam/linear/GaussianConditional.cpp | 16 ++++++++-------- gtsam/linear/GaussianConditional.h | 6 ++++-- gtsam/linear/NoiseModel.cpp | 10 +++++----- gtsam/linear/NoiseModel.h | 4 ++-- gtsam/linear/tests/testGaussianBayesNet.cpp | 4 ++-- gtsam/linear/tests/testGaussianConditional.cpp | 11 ++++++----- gtsam/linear/tests/testGaussianDensity.cpp | 2 +- gtsam/linear/tests/testNoiseModel.cpp | 12 ++++++------ 11 files changed, 49 insertions(+), 43 deletions(-) diff --git a/gtsam/inference/Conditional-inst.h b/gtsam/inference/Conditional-inst.h index 9377e8cc4..b61a5ac22 100644 --- a/gtsam/inference/Conditional-inst.h +++ b/gtsam/inference/Conditional-inst.h @@ -89,7 +89,7 @@ bool Conditional::CheckInvariants( // normalization constant const double error = conditional.error(values); if (error < 0.0) return false; // prob_or_density is negative. - const double expected = conditional.logNormalizationConstant() - error; + const double expected = -(conditional.logNormalizationConstant() + error); if (std::abs(logProb - expected) > 1e-9) return false; // logProb is not consistent with error return true; diff --git a/gtsam/inference/Conditional.h b/gtsam/inference/Conditional.h index c76c05ab1..936a078a8 100644 --- a/gtsam/inference/Conditional.h +++ b/gtsam/inference/Conditional.h @@ -34,9 +34,10 @@ namespace gtsam { * `logProbability` is the main methods that need to be implemented in derived * classes. These two methods relate to the `error` method in the factor by: * probability(x) = k exp(-error(x)) - * where k is a normalization constant making \int probability(x) == 1.0, and - * logProbability(x) = K - error(x) - * i.e., K = log(K). The normalization constant K is assumed to *not* depend + * where k is a normalization constant making + * \int probability(x) = \int k exp(-error(x)) == 1.0, and + * logProbability(x) = -(K + error(x)) + * i.e., K = -log(k). The normalization constant k is assumed to *not* depend * on any argument, only (possibly) on the conditional parameters. * This class provides a default logNormalizationConstant() == 0.0. * @@ -163,8 +164,9 @@ namespace gtsam { } /** - * All conditional types need to implement a log normalization constant to - * make it such that error>=0. + * All conditional types need to implement a + * (negative) log normalization constant + * to make it such that error>=0. */ virtual double logNormalizationConstant() const; diff --git a/gtsam/linear/GaussianBayesNet.cpp b/gtsam/linear/GaussianBayesNet.cpp index 861e19cc9..b7f296ea5 100644 --- a/gtsam/linear/GaussianBayesNet.cpp +++ b/gtsam/linear/GaussianBayesNet.cpp @@ -246,14 +246,15 @@ namespace gtsam { double GaussianBayesNet::logNormalizationConstant() const { /* normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma)) - logConstant = -0.5 * n*log(2*pi) - 0.5 * log det(Sigma) + logConstant = -log(normalizationConstant) + = 0.5 * n*log(2*pi) + 0.5 * log(det(Sigma)) - log det(Sigma)) = -2.0 * logDeterminant() - thus, logConstant = -0.5*n*log(2*pi) + logDeterminant() + log(det(Sigma)) = -2.0 * logDeterminant() + thus, logConstant = 0.5*n*log(2*pi) - logDeterminant() - BayesNet logConstant = sum(-0.5*n_i*log(2*pi) + logDeterminant_i()) - = sum(-0.5*n_i*log(2*pi)) + sum(logDeterminant_i()) - = sum(-0.5*n_i*log(2*pi)) + bn->logDeterminant() + BayesNet logConstant = sum(0.5*n_i*log(2*pi) - logDeterminant_i()) + = sum(0.5*n_i*log(2*pi)) - sum(logDeterminant_i()) + = sum(0.5*n_i*log(2*pi)) - bn->logDeterminant() */ double logNormConst = 0.0; for (const sharedConditional& cg : *this) { diff --git a/gtsam/linear/GaussianConditional.cpp b/gtsam/linear/GaussianConditional.cpp index 585bbdb33..fc633d04d 100644 --- a/gtsam/linear/GaussianConditional.cpp +++ b/gtsam/linear/GaussianConditional.cpp @@ -181,24 +181,24 @@ namespace gtsam { /* ************************************************************************* */ // normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma)) - // log = - 0.5 * n*log(2*pi) - 0.5 * log det(Sigma) + // neg-log = 0.5 * n*log(2*pi) + 0.5 * log det(Sigma) double GaussianConditional::logNormalizationConstant() const { constexpr double log2pi = 1.8378770664093454835606594728112; size_t n = d().size(); // Sigma = (R'R)^{-1}, det(Sigma) = det((R'R)^{-1}) = det(R'R)^{-1} // log det(Sigma) = -log(det(R'R)) = -2*log(det(R)) // Hence, log det(Sigma)) = -2.0 * logDeterminant() - // which gives log = -0.5*n*log(2*pi) - 0.5*(-2.0 * logDeterminant()) - // = -0.5*n*log(2*pi) + (0.5*2.0 * logDeterminant()) - // = -0.5*n*log(2*pi) + logDeterminant() - return -0.5 * n * log2pi + logDeterminant(); + // which gives neg-log = 0.5*n*log(2*pi) + 0.5*(-2.0 * logDeterminant()) + // = 0.5*n*log(2*pi) - (0.5*2.0 * logDeterminant()) + // = 0.5*n*log(2*pi) - logDeterminant() + return 0.5 * n * log2pi - logDeterminant(); } /* ************************************************************************* */ - // density = k exp(-error(x)) - // log = log(k) - error(x) + // density = 1/k exp(-error(x)) + // log = -log(k) - error(x) double GaussianConditional::logProbability(const VectorValues& x) const { - return logNormalizationConstant() - error(x); + return -logNormalizationConstant() - error(x); } double GaussianConditional::logProbability(const HybridValues& x) const { diff --git a/gtsam/linear/GaussianConditional.h b/gtsam/linear/GaussianConditional.h index 420efabca..757687ed8 100644 --- a/gtsam/linear/GaussianConditional.h +++ b/gtsam/linear/GaussianConditional.h @@ -133,8 +133,10 @@ namespace gtsam { /// @{ /** - * normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma)) - * log = - 0.5 * n*log(2*pi) - 0.5 * log det(Sigma) + * Return normalization constant in negative log space. + * + * normalization constant k = 1.0 / sqrt((2*pi)^n*det(Sigma)) + * -log(k) = 0.5 * n*log(2*pi) + 0.5 * log det(Sigma) */ double logNormalizationConstant() const override; diff --git a/gtsam/linear/NoiseModel.cpp b/gtsam/linear/NoiseModel.cpp index de69fdce9..ea9ec1aec 100644 --- a/gtsam/linear/NoiseModel.cpp +++ b/gtsam/linear/NoiseModel.cpp @@ -257,13 +257,13 @@ double Gaussian::logDeterminant() const { /* *******************************************************************************/ double Gaussian::logNormalizationConstant() const { // log(det(Sigma)) = -2.0 * logDetR - // which gives log = -0.5*n*log(2*pi) - 0.5*(-2.0 * logDetR()) - // = -0.5*n*log(2*pi) + (0.5*2.0 * logDetR()) - // = -0.5*n*log(2*pi) + logDetR() + // which gives neg-log = 0.5*n*log(2*pi) + 0.5*(-2.0 * logDetR()) + // = 0.5*n*log(2*pi) - (0.5*2.0 * logDetR()) + // = 0.5*n*log(2*pi) - logDetR() size_t n = dim(); constexpr double log2pi = 1.8378770664093454835606594728112; - // Get 1/log(\sqrt(|2pi Sigma|)) = -0.5*log(|2pi Sigma|) - return -0.5 * n * log2pi + logDetR(); + // Get -log(1/\sqrt(|2pi Sigma|)) = 0.5*log(|2pi Sigma|) + return 0.5 * n * log2pi - logDetR(); } diff --git a/gtsam/linear/NoiseModel.h b/gtsam/linear/NoiseModel.h index 91c11d410..31ffd1107 100644 --- a/gtsam/linear/NoiseModel.h +++ b/gtsam/linear/NoiseModel.h @@ -274,8 +274,8 @@ namespace gtsam { /** * @brief Method to compute the normalization constant * for a Gaussian noise model k = \sqrt(1/|2πΣ|). - * We compute this in the log-space for numerical accuracy, - * thus returning log(k). + * We compute this in the negative log-space for numerical accuracy, + * thus returning -log(k). * * @return double */ diff --git a/gtsam/linear/tests/testGaussianBayesNet.cpp b/gtsam/linear/tests/testGaussianBayesNet.cpp index 99453ee4e..a186eb2b2 100644 --- a/gtsam/linear/tests/testGaussianBayesNet.cpp +++ b/gtsam/linear/tests/testGaussianBayesNet.cpp @@ -76,11 +76,11 @@ TEST(GaussianBayesNet, Evaluate1) { // the normalization constant 1.0/sqrt((2*pi*Sigma).det()). // The covariance matrix inv(Sigma) = R'*R, so the determinant is const double constant = sqrt((invSigma / (2 * M_PI)).determinant()); - EXPECT_DOUBLES_EQUAL(log(constant), + EXPECT_DOUBLES_EQUAL(-log(constant), smallBayesNet.at(0)->logNormalizationConstant() + smallBayesNet.at(1)->logNormalizationConstant(), 1e-9); - EXPECT_DOUBLES_EQUAL(log(constant), smallBayesNet.logNormalizationConstant(), + EXPECT_DOUBLES_EQUAL(-log(constant), smallBayesNet.logNormalizationConstant(), 1e-9); const double actual = smallBayesNet.evaluate(mean); EXPECT_DOUBLES_EQUAL(constant, actual, 1e-9); diff --git a/gtsam/linear/tests/testGaussianConditional.cpp b/gtsam/linear/tests/testGaussianConditional.cpp index dcd821889..b03e0a060 100644 --- a/gtsam/linear/tests/testGaussianConditional.cpp +++ b/gtsam/linear/tests/testGaussianConditional.cpp @@ -492,9 +492,10 @@ TEST(GaussianConditional, LogNormalizationConstant) { VectorValues x; x.insert(X(0), Vector3::Zero()); Matrix3 Sigma = I_3x3 * sigma * sigma; - double expectedLogNormalizingConstant = log(1 / sqrt((2 * M_PI * Sigma).determinant())); + double expectedLogNormalizationConstant = + -log(1 / sqrt((2 * M_PI * Sigma).determinant())); - EXPECT_DOUBLES_EQUAL(expectedLogNormalizingConstant, + EXPECT_DOUBLES_EQUAL(expectedLogNormalizationConstant, conditional.logNormalizationConstant(), 1e-9); } @@ -516,7 +517,7 @@ TEST(GaussianConditional, Print) { " d = [ 20 40 ]\n" " mean: 1 elements\n" " x0: 20 40\n" - " logNormalizationConstant: -4.0351\n" + " logNormalizationConstant: 4.0351\n" "isotropic dim=2 sigma=3\n"; EXPECT(assert_print_equal(expected, conditional, "GaussianConditional")); @@ -531,7 +532,7 @@ TEST(GaussianConditional, Print) { " S[x1] = [ -1 -2 ]\n" " [ -3 -4 ]\n" " d = [ 20 40 ]\n" - " logNormalizationConstant: -4.0351\n" + " logNormalizationConstant: 4.0351\n" "isotropic dim=2 sigma=3\n"; EXPECT(assert_print_equal(expected1, conditional1, "GaussianConditional")); @@ -547,7 +548,7 @@ TEST(GaussianConditional, Print) { " S[y1] = [ -5 -6 ]\n" " [ -7 -8 ]\n" " d = [ 20 40 ]\n" - " logNormalizationConstant: -4.0351\n" + " logNormalizationConstant: 4.0351\n" "isotropic dim=2 sigma=3\n"; EXPECT(assert_print_equal(expected2, conditional2, "GaussianConditional")); } diff --git a/gtsam/linear/tests/testGaussianDensity.cpp b/gtsam/linear/tests/testGaussianDensity.cpp index 97eb0de70..3226f40ab 100644 --- a/gtsam/linear/tests/testGaussianDensity.cpp +++ b/gtsam/linear/tests/testGaussianDensity.cpp @@ -55,7 +55,7 @@ TEST(GaussianDensity, FromMeanAndStddev) { double expected1 = 0.5 * e.dot(e); EXPECT_DOUBLES_EQUAL(expected1, density.error(values), 1e-9); - double expected2 = density.logNormalizationConstant()- 0.5 * e.dot(e); + double expected2 = -(density.logNormalizationConstant() + 0.5 * e.dot(e)); EXPECT_DOUBLES_EQUAL(expected2, density.logProbability(values), 1e-9); } diff --git a/gtsam/linear/tests/testNoiseModel.cpp b/gtsam/linear/tests/testNoiseModel.cpp index 6aea154ee..59ee05d07 100644 --- a/gtsam/linear/tests/testNoiseModel.cpp +++ b/gtsam/linear/tests/testNoiseModel.cpp @@ -810,9 +810,9 @@ TEST(NoiseModel, NonDiagonalGaussian) TEST(NoiseModel, LogNormalizationConstant1D) { // Very simple 1D noise model, which we can compute by hand. double sigma = 0.1; - // For expected values, we compute 1/log(sqrt(|2πΣ|)) by hand. - // = -0.5*(log(2π) + log(Σ)) (since it is 1D) - double expected_value = -0.5 * log(2 * M_PI * sigma * sigma); + // For expected values, we compute -log(1/sqrt(|2πΣ|)) by hand. + // = 0.5*(log(2π) + log(Σ)) (since it is 1D) + double expected_value = 0.5 * log(2 * M_PI * sigma * sigma); // Gaussian { @@ -839,7 +839,7 @@ TEST(NoiseModel, LogNormalizationConstant1D) { auto noise_model = Unit::Create(1); double actual_value = noise_model->logNormalizationConstant(); double sigma = 1.0; - expected_value = -0.5 * log(2 * M_PI * sigma * sigma); + expected_value = 0.5 * log(2 * M_PI * sigma * sigma); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } } @@ -850,7 +850,7 @@ TEST(NoiseModel, LogNormalizationConstant3D) { size_t n = 3; // We compute the expected values just like in the LogNormalizationConstant1D // test, but we multiply by 3 due to the determinant. - double expected_value = -0.5 * n * log(2 * M_PI * sigma * sigma); + double expected_value = 0.5 * n * log(2 * M_PI * sigma * sigma); // Gaussian { @@ -879,7 +879,7 @@ TEST(NoiseModel, LogNormalizationConstant3D) { auto noise_model = Unit::Create(3); double actual_value = noise_model->logNormalizationConstant(); double sigma = 1.0; - expected_value = -0.5 * n * log(2 * M_PI * sigma * sigma); + expected_value = 0.5 * n * log(2 * M_PI * sigma * sigma); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } } From ceb9496e7c6456a16e7fa812bff944009cfbe88d Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 21 Sep 2024 05:16:14 -0400 Subject: [PATCH 02/15] update hybrid code to use -log(k) consistently --- gtsam/hybrid/HybridGaussianConditional.cpp | 12 ++++---- gtsam/hybrid/HybridGaussianConditional.h | 2 +- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 11 +++---- .../tests/testHybridGaussianConditional.cpp | 15 +++++----- .../hybrid/tests/testHybridGaussianFactor.cpp | 4 +-- .../tests/testHybridNonlinearFactorGraph.cpp | 30 +++++++++---------- 6 files changed, 38 insertions(+), 36 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index fb943366c..4ee5241a3 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -36,7 +36,7 @@ HybridGaussianFactor::FactorValuePairs GetFactorValuePairs( // Check if conditional is pruned if (conditional) { // Assign log(\sqrt(|2πΣ|)) = -log(1 / sqrt(|2πΣ|)) - value = -conditional->logNormalizationConstant(); + value = conditional->logNormalizationConstant(); } return {std::dynamic_pointer_cast(conditional), value}; }; @@ -51,14 +51,14 @@ HybridGaussianConditional::HybridGaussianConditional( discreteParents, GetFactorValuePairs(conditionals)), BaseConditional(continuousFrontals.size()), conditionals_(conditionals) { - // Calculate logConstant_ as the minimum of the log normalizers of the - // conditionals, by visiting the decision tree: + // Calculate logConstant_ as the minimum of the negative-log normalizers of + // the conditionals, by visiting the decision tree: logConstant_ = std::numeric_limits::infinity(); conditionals_.visit( [this](const GaussianConditional::shared_ptr &conditional) { if (conditional) { this->logConstant_ = std::min( - this->logConstant_, -conditional->logNormalizationConstant()); + this->logConstant_, conditional->logNormalizationConstant()); } }); } @@ -85,7 +85,7 @@ GaussianFactorGraphTree HybridGaussianConditional::asGaussianFactorGraphTree() // First check if conditional has not been pruned if (gc) { const double Cgm_Kgcm = - -this->logConstant_ - gc->logNormalizationConstant(); + gc->logNormalizationConstant() - this->logConstant_; // If there is a difference in the covariances, we need to account for // that since the error is dependent on the mode. if (Cgm_Kgcm > 0.0) { @@ -216,7 +216,7 @@ std::shared_ptr HybridGaussianConditional::likelihood( -> GaussianFactorValuePair { const auto likelihood_m = conditional->likelihood(given); const double Cgm_Kgcm = - -logConstant_ - conditional->logNormalizationConstant(); + conditional->logNormalizationConstant() - logConstant_; if (Cgm_Kgcm == 0.0) { return {likelihood_m, 0.0}; } else { diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index 4a5fdcc89..676c45f61 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -152,7 +152,7 @@ class GTSAM_EXPORT HybridGaussianConditional /// The log normalization constant is max of the the individual /// log-normalization constants. - double logNormalizationConstant() const override { return -logConstant_; } + double logNormalizationConstant() const override { return logConstant_; } /** * Create a likelihood factor for a hybrid Gaussian conditional, diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index a6fe955eb..ff9ca7a55 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -329,9 +329,9 @@ static std::shared_ptr createDiscreteFactor( // Logspace version of: // exp(-factor->error(kEmpty)) / conditional->normalizationConstant(); - // We take negative of the logNormalizationConstant `log(k)` - // to get `log(1/k) = log(\sqrt{|2πΣ|})`. - return -factor->error(kEmpty) - conditional->logNormalizationConstant(); + // logNormalizationConstant gives `-log(k)` + // which is `-log(k) = log(1/k) = log(\sqrt{|2πΣ|})`. + return -factor->error(kEmpty) + conditional->logNormalizationConstant(); }; AlgebraicDecisionTree logProbabilities( @@ -355,8 +355,9 @@ static std::shared_ptr createHybridGaussianFactor( auto hf = std::dynamic_pointer_cast(factor); if (!hf) throw std::runtime_error("Expected HessianFactor!"); // Add 2.0 term since the constant term will be premultiplied by 0.5 - // as per the Hessian definition - hf->constantTerm() += 2.0 * conditional->logNormalizationConstant(); + // as per the Hessian definition, + // and negative since we want log(k) + hf->constantTerm() += -2.0 * conditional->logNormalizationConstant(); } return {factor, 0.0}; }; diff --git a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp index dde493685..040cd2ff0 100644 --- a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp @@ -180,12 +180,13 @@ TEST(HybridGaussianConditional, Error2) { // Check result. DiscreteKeys discrete_keys{mode}; - double logNormalizer0 = -conditionals[0]->logNormalizationConstant(); - double logNormalizer1 = -conditionals[1]->logNormalizationConstant(); + double logNormalizer0 = conditionals[0]->logNormalizationConstant(); + double logNormalizer1 = conditionals[1]->logNormalizationConstant(); double minLogNormalizer = std::min(logNormalizer0, logNormalizer1); - // Expected error is e(X) + log(|2πΣ|). - // We normalize log(|2πΣ|) with min(logNormalizers) so it is non-negative. + // Expected error is e(X) + log(sqrt(|2πΣ|)). + // We normalize log(sqrt(|2πΣ|)) with min(logNormalizers) + // so it is non-negative. std::vector leaves = { conditionals[0]->error(vv) + logNormalizer0 - minLogNormalizer, conditionals[1]->error(vv) + logNormalizer1 - minLogNormalizer}; @@ -196,7 +197,7 @@ TEST(HybridGaussianConditional, Error2) { // Check for non-tree version. for (size_t mode : {0, 1}) { const HybridValues hv{vv, {{M(0), mode}}}; - EXPECT_DOUBLES_EQUAL(conditionals[mode]->error(vv) - + EXPECT_DOUBLES_EQUAL(conditionals[mode]->error(vv) + conditionals[mode]->logNormalizationConstant() - minLogNormalizer, hybrid_conditional.error(hv), 1e-8); @@ -230,8 +231,8 @@ TEST(HybridGaussianConditional, Likelihood2) { CHECK(jf1->rows() == 2); // Check that the constant C1 is properly encoded in the JacobianFactor. - const double C1 = hybrid_conditional.logNormalizationConstant() - - conditionals[1]->logNormalizationConstant(); + const double C1 = conditionals[1]->logNormalizationConstant() - + hybrid_conditional.logNormalizationConstant(); const double c1 = std::sqrt(2.0 * C1); Vector expected_unwhitened(2); expected_unwhitened << 4.9 - 5.0, -c1; diff --git a/gtsam/hybrid/tests/testHybridGaussianFactor.cpp b/gtsam/hybrid/tests/testHybridGaussianFactor.cpp index bfa283983..cd5f9bf07 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactor.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactor.cpp @@ -773,8 +773,8 @@ static HybridGaussianFactorGraph CreateFactorGraph( // We take negative since we want // the underlying scalar to be log(\sqrt(|2πΣ|)) std::vector factors{ - {f0, -model0->logNormalizationConstant()}, - {f1, -model1->logNormalizationConstant()}}; + {f0, model0->logNormalizationConstant()}, + {f1, model1->logNormalizationConstant()}}; HybridGaussianFactor motionFactor({X(0), X(1)}, m1, factors); HybridGaussianFactorGraph hfg; diff --git a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp index 347cc5f1f..cd9b24b37 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp @@ -714,26 +714,26 @@ factor 6: P( m1 | m0 ): size: 3 conditional 0: Hybrid P( x0 | x1 m0) Discrete Keys = (m0, 2), - logNormalizationConstant: 1.38862 + logNormalizationConstant: -1.38862 Choice(m0) 0 Leaf p(x0 | x1) R = [ 10.0499 ] S[x1] = [ -0.0995037 ] d = [ -9.85087 ] - logNormalizationConstant: 1.38862 + logNormalizationConstant: -1.38862 No noise model 1 Leaf p(x0 | x1) R = [ 10.0499 ] S[x1] = [ -0.0995037 ] d = [ -9.95037 ] - logNormalizationConstant: 1.38862 + logNormalizationConstant: -1.38862 No noise model conditional 1: Hybrid P( x1 | x2 m0 m1) Discrete Keys = (m0, 2), (m1, 2), - logNormalizationConstant: 1.3935 + logNormalizationConstant: -1.3935 Choice(m1) 0 Choice(m0) @@ -741,14 +741,14 @@ conditional 1: Hybrid P( x1 | x2 m0 m1) R = [ 10.099 ] S[x2] = [ -0.0990196 ] d = [ -9.99901 ] - logNormalizationConstant: 1.3935 + logNormalizationConstant: -1.3935 No noise model 0 1 Leaf p(x1 | x2) R = [ 10.099 ] S[x2] = [ -0.0990196 ] d = [ -9.90098 ] - logNormalizationConstant: 1.3935 + logNormalizationConstant: -1.3935 No noise model 1 Choice(m0) @@ -756,19 +756,19 @@ conditional 1: Hybrid P( x1 | x2 m0 m1) R = [ 10.099 ] S[x2] = [ -0.0990196 ] d = [ -10.098 ] - logNormalizationConstant: 1.3935 + logNormalizationConstant: -1.3935 No noise model 1 1 Leaf p(x1 | x2) R = [ 10.099 ] S[x2] = [ -0.0990196 ] d = [ -10 ] - logNormalizationConstant: 1.3935 + logNormalizationConstant: -1.3935 No noise model conditional 2: Hybrid P( x2 | m0 m1) Discrete Keys = (m0, 2), (m1, 2), - logNormalizationConstant: 1.38857 + logNormalizationConstant: -1.38857 Choice(m1) 0 Choice(m0) @@ -777,7 +777,7 @@ conditional 2: Hybrid P( x2 | m0 m1) d = [ -10.1489 ] mean: 1 elements x2: -1.0099 - logNormalizationConstant: 1.38857 + logNormalizationConstant: -1.38857 No noise model 0 1 Leaf p(x2) @@ -785,7 +785,7 @@ conditional 2: Hybrid P( x2 | m0 m1) d = [ -10.1479 ] mean: 1 elements x2: -1.0098 - logNormalizationConstant: 1.38857 + logNormalizationConstant: -1.38857 No noise model 1 Choice(m0) @@ -794,7 +794,7 @@ conditional 2: Hybrid P( x2 | m0 m1) d = [ -10.0504 ] mean: 1 elements x2: -1.0001 - logNormalizationConstant: 1.38857 + logNormalizationConstant: -1.38857 No noise model 1 1 Leaf p(x2) @@ -802,7 +802,7 @@ conditional 2: Hybrid P( x2 | m0 m1) d = [ -10.0494 ] mean: 1 elements x2: -1 - logNormalizationConstant: 1.38857 + logNormalizationConstant: -1.38857 No noise model )"; @@ -903,8 +903,8 @@ static HybridNonlinearFactorGraph CreateFactorGraph( // We take negative since we want // the underlying scalar to be log(\sqrt(|2πΣ|)) std::vector factors{ - {f0, -model0->logNormalizationConstant()}, - {f1, -model1->logNormalizationConstant()}}; + {f0, model0->logNormalizationConstant()}, + {f1, model1->logNormalizationConstant()}}; HybridNonlinearFactor mixtureFactor({X(0), X(1)}, m1, factors); From d3971b93c82433c7de8bfbc08f9e338733755dcc Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sat, 21 Sep 2024 06:14:36 -0400 Subject: [PATCH 03/15] fix python tests --- python/gtsam/tests/test_HybridBayesNet.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/gtsam/tests/test_HybridBayesNet.py b/python/gtsam/tests/test_HybridBayesNet.py index 2f7dd4a98..bb12ff02b 100644 --- a/python/gtsam/tests/test_HybridBayesNet.py +++ b/python/gtsam/tests/test_HybridBayesNet.py @@ -90,8 +90,8 @@ class TestHybridBayesNet(GtsamTestCase): self.assertTrue(probability >= 0.0) logProb = conditional.logProbability(values) self.assertAlmostEqual(probability, np.exp(logProb)) - expected = conditional.logNormalizationConstant() - \ - conditional.error(values) + expected = -(conditional.logNormalizationConstant() + \ + conditional.error(values)) self.assertAlmostEqual(logProb, expected) From 2d2213e8806afad763c2ee620531b6a9c25950f3 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sun, 22 Sep 2024 22:11:23 -0400 Subject: [PATCH 04/15] add errorConstant method and use it for logNormalizationConstant in Conditional --- gtsam/discrete/DiscreteConditional.cpp | 6 ++++++ gtsam/discrete/DiscreteConditional.h | 5 +++-- gtsam/hybrid/HybridConditional.cpp | 10 +++++----- gtsam/hybrid/HybridConditional.h | 5 +++-- gtsam/hybrid/HybridGaussianConditional.cpp | 12 +++++------- gtsam/hybrid/HybridGaussianConditional.h | 12 +++++++++--- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 6 +++--- gtsam/inference/Conditional-inst.h | 13 ++++++++++--- gtsam/inference/Conditional.h | 13 ++++++++++--- gtsam/linear/GaussianBayesNet.cpp | 15 ++++++++------- gtsam/linear/GaussianConditional.cpp | 8 ++++---- gtsam/linear/GaussianConditional.h | 6 ++++-- gtsam/linear/NoiseModel.cpp | 6 +++++- gtsam/linear/NoiseModel.h | 10 ++++++++-- gtsam/linear/linear.i | 1 + 15 files changed, 84 insertions(+), 44 deletions(-) diff --git a/gtsam/discrete/DiscreteConditional.cpp b/gtsam/discrete/DiscreteConditional.cpp index 9cf19638c..3e74cebb4 100644 --- a/gtsam/discrete/DiscreteConditional.cpp +++ b/gtsam/discrete/DiscreteConditional.cpp @@ -465,6 +465,12 @@ string DiscreteConditional::html(const KeyFormatter& keyFormatter, double DiscreteConditional::evaluate(const HybridValues& x) const { return this->evaluate(x.discrete()); } + +/* ************************************************************************* */ +double DiscreteConditional::errorConstant() const { + return 0.0; +} + /* ************************************************************************* */ } // namespace gtsam diff --git a/gtsam/discrete/DiscreteConditional.h b/gtsam/discrete/DiscreteConditional.h index 4c7af1489..53331a0be 100644 --- a/gtsam/discrete/DiscreteConditional.h +++ b/gtsam/discrete/DiscreteConditional.h @@ -264,11 +264,12 @@ class GTSAM_EXPORT DiscreteConditional } /** - * logNormalizationConstant K is just zero, such that + * errorConstant is just zero, such that * logProbability(x) = log(evaluate(x)) = - error(x) * and hence error(x) = - log(evaluate(x)) > 0 for all x. + * Thus -log(K) for the normalization constant k is 0. */ - double logNormalizationConstant() const override { return 0.0; } + double errorConstant() const override; /// @} diff --git a/gtsam/hybrid/HybridConditional.cpp b/gtsam/hybrid/HybridConditional.cpp index 3cb3bba65..57d8bba39 100644 --- a/gtsam/hybrid/HybridConditional.cpp +++ b/gtsam/hybrid/HybridConditional.cpp @@ -161,18 +161,18 @@ double HybridConditional::logProbability(const HybridValues &values) const { } /* ************************************************************************ */ -double HybridConditional::logNormalizationConstant() const { +double HybridConditional::errorConstant() const { if (auto gc = asGaussian()) { - return gc->logNormalizationConstant(); + return gc->errorConstant(); } if (auto gm = asHybrid()) { - return gm->logNormalizationConstant(); // 0.0! + return gm->errorConstant(); // 0.0! } if (auto dc = asDiscrete()) { - return dc->logNormalizationConstant(); // 0.0! + return dc->errorConstant(); // 0.0! } throw std::runtime_error( - "HybridConditional::logProbability: conditional type not handled"); + "HybridConditional::errorConstant: conditional type not handled"); } /* ************************************************************************ */ diff --git a/gtsam/hybrid/HybridConditional.h b/gtsam/hybrid/HybridConditional.h index 51eeeb5bb..92118385c 100644 --- a/gtsam/hybrid/HybridConditional.h +++ b/gtsam/hybrid/HybridConditional.h @@ -193,11 +193,12 @@ class GTSAM_EXPORT HybridConditional double logProbability(const HybridValues& values) const override; /** - * Return the log normalization constant. + * Return the negative log of the normalization constant. + * This shows up in the error as -(error(x) + errorConstant) * Note this is 0.0 for discrete and hybrid conditionals, but depends * on the continuous parameters for Gaussian conditionals. */ - double logNormalizationConstant() const override; + double errorConstant() const override; /// Return the probability (or density) of the underlying conditional. double evaluate(const HybridValues& values) const override; diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index 4ee5241a3..f6cd3e5f7 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -36,7 +36,7 @@ HybridGaussianFactor::FactorValuePairs GetFactorValuePairs( // Check if conditional is pruned if (conditional) { // Assign log(\sqrt(|2πΣ|)) = -log(1 / sqrt(|2πΣ|)) - value = conditional->logNormalizationConstant(); + value = conditional->errorConstant(); } return {std::dynamic_pointer_cast(conditional), value}; }; @@ -57,8 +57,8 @@ HybridGaussianConditional::HybridGaussianConditional( conditionals_.visit( [this](const GaussianConditional::shared_ptr &conditional) { if (conditional) { - this->logConstant_ = std::min( - this->logConstant_, conditional->logNormalizationConstant()); + this->logConstant_ = + std::min(this->logConstant_, conditional->errorConstant()); } }); } @@ -84,8 +84,7 @@ GaussianFactorGraphTree HybridGaussianConditional::asGaussianFactorGraphTree() auto wrap = [this](const GaussianConditional::shared_ptr &gc) { // First check if conditional has not been pruned if (gc) { - const double Cgm_Kgcm = - gc->logNormalizationConstant() - this->logConstant_; + const double Cgm_Kgcm = gc->errorConstant() - this->logConstant_; // If there is a difference in the covariances, we need to account for // that since the error is dependent on the mode. if (Cgm_Kgcm > 0.0) { @@ -215,8 +214,7 @@ std::shared_ptr HybridGaussianConditional::likelihood( [&](const GaussianConditional::shared_ptr &conditional) -> GaussianFactorValuePair { const auto likelihood_m = conditional->likelihood(given); - const double Cgm_Kgcm = - conditional->logNormalizationConstant() - logConstant_; + const double Cgm_Kgcm = conditional->errorConstant() - logConstant_; if (Cgm_Kgcm == 0.0) { return {likelihood_m, 0.0}; } else { diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index 676c45f61..8851c7741 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -150,9 +150,15 @@ class GTSAM_EXPORT HybridGaussianConditional /// Returns the continuous keys among the parents. KeyVector continuousParents() const; - /// The log normalization constant is max of the the individual - /// log-normalization constants. - double logNormalizationConstant() const override { return logConstant_; } + /** + * @brief Return log normalization constant in negative log space. + * + * The log normalization constant is the max of the individual + * log-normalization constants. + * + * @return double + */ + inline double errorConstant() const override { return logConstant_; } /** * Create a likelihood factor for a hybrid Gaussian conditional, diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index ff9ca7a55..a6c15d1fd 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -329,9 +329,9 @@ static std::shared_ptr createDiscreteFactor( // Logspace version of: // exp(-factor->error(kEmpty)) / conditional->normalizationConstant(); - // logNormalizationConstant gives `-log(k)` + // errorConstant gives `-log(k)` // which is `-log(k) = log(1/k) = log(\sqrt{|2πΣ|})`. - return -factor->error(kEmpty) + conditional->logNormalizationConstant(); + return -factor->error(kEmpty) + conditional->errorConstant(); }; AlgebraicDecisionTree logProbabilities( @@ -357,7 +357,7 @@ static std::shared_ptr createHybridGaussianFactor( // Add 2.0 term since the constant term will be premultiplied by 0.5 // as per the Hessian definition, // and negative since we want log(k) - hf->constantTerm() += -2.0 * conditional->logNormalizationConstant(); + hf->constantTerm() += -2.0 * conditional->errorConstant(); } return {factor, 0.0}; }; diff --git a/gtsam/inference/Conditional-inst.h b/gtsam/inference/Conditional-inst.h index b61a5ac22..ba42a9a24 100644 --- a/gtsam/inference/Conditional-inst.h +++ b/gtsam/inference/Conditional-inst.h @@ -59,10 +59,17 @@ double Conditional::evaluate( /* ************************************************************************* */ template -double Conditional::logNormalizationConstant() +double Conditional::errorConstant() const { throw std::runtime_error( - "Conditional::logNormalizationConstant is not implemented"); + "Conditional::errorConstant is not implemented"); +} + +/* ************************************************************************* */ +template +double Conditional::logNormalizationConstant() + const { + return -errorConstant(); } /* ************************************************************************* */ @@ -89,7 +96,7 @@ bool Conditional::CheckInvariants( // normalization constant const double error = conditional.error(values); if (error < 0.0) return false; // prob_or_density is negative. - const double expected = -(conditional.logNormalizationConstant() + error); + const double expected = -(conditional.errorConstant() + error); if (std::abs(logProb - expected) > 1e-9) return false; // logProb is not consistent with error return true; diff --git a/gtsam/inference/Conditional.h b/gtsam/inference/Conditional.h index 936a078a8..d09341489 100644 --- a/gtsam/inference/Conditional.h +++ b/gtsam/inference/Conditional.h @@ -164,9 +164,16 @@ namespace gtsam { } /** - * All conditional types need to implement a - * (negative) log normalization constant - * to make it such that error>=0. + * @brief All conditional types need to implement this as the negative log + * of the normalization constant. + * + * @return double + */ + virtual double errorConstant() const; + + /** + * All conditional types need to implement a log normalization constant to + * make it such that error>=0. */ virtual double logNormalizationConstant() const; diff --git a/gtsam/linear/GaussianBayesNet.cpp b/gtsam/linear/GaussianBayesNet.cpp index b7f296ea5..978475bcb 100644 --- a/gtsam/linear/GaussianBayesNet.cpp +++ b/gtsam/linear/GaussianBayesNet.cpp @@ -247,14 +247,15 @@ namespace gtsam { /* normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma)) logConstant = -log(normalizationConstant) - = 0.5 * n*log(2*pi) + 0.5 * log(det(Sigma)) - - log(det(Sigma)) = -2.0 * logDeterminant() - thus, logConstant = 0.5*n*log(2*pi) - logDeterminant() + = -0.5 * n*log(2*pi) - 0.5 * log(det(Sigma)) - BayesNet logConstant = sum(0.5*n_i*log(2*pi) - logDeterminant_i()) - = sum(0.5*n_i*log(2*pi)) - sum(logDeterminant_i()) - = sum(0.5*n_i*log(2*pi)) - bn->logDeterminant() + log(det(Sigma)) = -2.0 * logDeterminant() + thus, logConstant = -0.5*n*log(2*pi) + logDeterminant() + + BayesNet logConstant = sum(-0.5*n_i*log(2*pi) + logDeterminant_i()) + = sum(-0.5*n_i*log(2*pi)) + sum(logDeterminant_i()) + = sum(-0.5*n_i*log(2*pi)) + bn->logDeterminant() + = sum(logNormalizationConstant_i) */ double logNormConst = 0.0; for (const sharedConditional& cg : *this) { diff --git a/gtsam/linear/GaussianConditional.cpp b/gtsam/linear/GaussianConditional.cpp index fc633d04d..ca4a1ed33 100644 --- a/gtsam/linear/GaussianConditional.cpp +++ b/gtsam/linear/GaussianConditional.cpp @@ -182,7 +182,7 @@ namespace gtsam { /* ************************************************************************* */ // normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma)) // neg-log = 0.5 * n*log(2*pi) + 0.5 * log det(Sigma) - double GaussianConditional::logNormalizationConstant() const { + double GaussianConditional::errorConstant() const { constexpr double log2pi = 1.8378770664093454835606594728112; size_t n = d().size(); // Sigma = (R'R)^{-1}, det(Sigma) = det((R'R)^{-1}) = det(R'R)^{-1} @@ -195,10 +195,10 @@ namespace gtsam { } /* ************************************************************************* */ - // density = 1/k exp(-error(x)) - // log = -log(k) - error(x) + // density = k exp(-error(x)) + // log = log(k) - error(x) double GaussianConditional::logProbability(const VectorValues& x) const { - return -logNormalizationConstant() - error(x); + return logNormalizationConstant() - error(x); } double GaussianConditional::logProbability(const HybridValues& x) const { diff --git a/gtsam/linear/GaussianConditional.h b/gtsam/linear/GaussianConditional.h index 757687ed8..31f3797d5 100644 --- a/gtsam/linear/GaussianConditional.h +++ b/gtsam/linear/GaussianConditional.h @@ -133,12 +133,14 @@ namespace gtsam { /// @{ /** - * Return normalization constant in negative log space. + * @brief Return the negative log of the normalization constant. * * normalization constant k = 1.0 / sqrt((2*pi)^n*det(Sigma)) * -log(k) = 0.5 * n*log(2*pi) + 0.5 * log det(Sigma) + * + * @return double */ - double logNormalizationConstant() const override; + double errorConstant() const override; /** * Calculate log-probability log(evaluate(x)) for given values `x`: diff --git a/gtsam/linear/NoiseModel.cpp b/gtsam/linear/NoiseModel.cpp index ea9ec1aec..39901584b 100644 --- a/gtsam/linear/NoiseModel.cpp +++ b/gtsam/linear/NoiseModel.cpp @@ -255,7 +255,7 @@ double Gaussian::logDeterminant() const { } /* *******************************************************************************/ -double Gaussian::logNormalizationConstant() const { +double Gaussian::errorConstant() const { // log(det(Sigma)) = -2.0 * logDetR // which gives neg-log = 0.5*n*log(2*pi) + 0.5*(-2.0 * logDetR()) // = 0.5*n*log(2*pi) - (0.5*2.0 * logDetR()) @@ -266,6 +266,10 @@ double Gaussian::logNormalizationConstant() const { return 0.5 * n * log2pi - logDetR(); } +/* *******************************************************************************/ +double Gaussian::logNormalizationConstant() const { + return -errorConstant(); +} /* ************************************************************************* */ // Diagonal diff --git a/gtsam/linear/NoiseModel.h b/gtsam/linear/NoiseModel.h index 31ffd1107..851dc3c1f 100644 --- a/gtsam/linear/NoiseModel.h +++ b/gtsam/linear/NoiseModel.h @@ -271,11 +271,17 @@ namespace gtsam { /// Compute the log of |Σ| double logDeterminant() const; + /** + * @brief Compute the negative log of the normalization constant + * for a Gaussian noise model k = \sqrt(1/|2πΣ|). + * + * @return double + */ + double errorConstant() const; + /** * @brief Method to compute the normalization constant * for a Gaussian noise model k = \sqrt(1/|2πΣ|). - * We compute this in the negative log-space for numerical accuracy, - * thus returning -log(k). * * @return double */ diff --git a/gtsam/linear/linear.i b/gtsam/linear/linear.i index 9c448de50..71eb94dd5 100644 --- a/gtsam/linear/linear.i +++ b/gtsam/linear/linear.i @@ -548,6 +548,7 @@ virtual class GaussianConditional : gtsam::JacobianFactor { bool equals(const gtsam::GaussianConditional& cg, double tol) const; // Standard Interface + double errorConstant() const; double logNormalizationConstant() const; double logProbability(const gtsam::VectorValues& x) const; double evaluate(const gtsam::VectorValues& x) const; From 6488a0ceecfc18dad6d494e8589a507c92a72419 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sun, 22 Sep 2024 22:16:59 -0400 Subject: [PATCH 05/15] updated tests --- .../tests/testHybridGaussianConditional.cpp | 20 ++++++------ .../hybrid/tests/testHybridGaussianFactor.cpp | 5 ++- .../tests/testHybridNonlinearFactorGraph.cpp | 31 +++++++++---------- gtsam/linear/tests/testGaussianBayesNet.cpp | 4 +-- .../linear/tests/testGaussianConditional.cpp | 8 ++--- gtsam/linear/tests/testGaussianDensity.cpp | 2 +- gtsam/linear/tests/testNoiseModel.cpp | 12 +++---- python/gtsam/tests/test_HybridBayesNet.py | 3 +- 8 files changed, 41 insertions(+), 44 deletions(-) diff --git a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp index 040cd2ff0..6c1037e1d 100644 --- a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp @@ -180,16 +180,16 @@ TEST(HybridGaussianConditional, Error2) { // Check result. DiscreteKeys discrete_keys{mode}; - double logNormalizer0 = conditionals[0]->logNormalizationConstant(); - double logNormalizer1 = conditionals[1]->logNormalizationConstant(); - double minLogNormalizer = std::min(logNormalizer0, logNormalizer1); + double errorConstant0 = conditionals[0]->errorConstant(); + double errorConstant1 = conditionals[1]->errorConstant(); + double minErrorConstant = std::min(errorConstant0, errorConstant1); // Expected error is e(X) + log(sqrt(|2πΣ|)). - // We normalize log(sqrt(|2πΣ|)) with min(logNormalizers) + // We normalize log(sqrt(|2πΣ|)) with min(errorConstant) // so it is non-negative. std::vector leaves = { - conditionals[0]->error(vv) + logNormalizer0 - minLogNormalizer, - conditionals[1]->error(vv) + logNormalizer1 - minLogNormalizer}; + conditionals[0]->error(vv) + errorConstant0 - minErrorConstant, + conditionals[1]->error(vv) + errorConstant1 - minErrorConstant}; AlgebraicDecisionTree expected(discrete_keys, leaves); EXPECT(assert_equal(expected, actual, 1e-6)); @@ -198,8 +198,8 @@ TEST(HybridGaussianConditional, Error2) { for (size_t mode : {0, 1}) { const HybridValues hv{vv, {{M(0), mode}}}; EXPECT_DOUBLES_EQUAL(conditionals[mode]->error(vv) + - conditionals[mode]->logNormalizationConstant() - - minLogNormalizer, + conditionals[mode]->errorConstant() - + minErrorConstant, hybrid_conditional.error(hv), 1e-8); } } @@ -231,8 +231,8 @@ TEST(HybridGaussianConditional, Likelihood2) { CHECK(jf1->rows() == 2); // Check that the constant C1 is properly encoded in the JacobianFactor. - const double C1 = conditionals[1]->logNormalizationConstant() - - hybrid_conditional.logNormalizationConstant(); + const double C1 = hybrid_conditional.logNormalizationConstant() - + conditionals[1]->logNormalizationConstant(); const double c1 = std::sqrt(2.0 * C1); Vector expected_unwhitened(2); expected_unwhitened << 4.9 - 5.0, -c1; diff --git a/gtsam/hybrid/tests/testHybridGaussianFactor.cpp b/gtsam/hybrid/tests/testHybridGaussianFactor.cpp index 03130bc10..d84948c75 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactor.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactor.cpp @@ -780,9 +780,8 @@ static HybridGaussianFactorGraph CreateFactorGraph( // Create HybridGaussianFactor // We take negative since we want // the underlying scalar to be log(\sqrt(|2πΣ|)) - std::vector factors{ - {f0, model0->logNormalizationConstant()}, - {f1, model1->logNormalizationConstant()}}; + std::vector factors{{f0, model0->errorConstant()}, + {f1, model1->errorConstant()}}; HybridGaussianFactor motionFactor({X(0), X(1)}, m1, factors); HybridGaussianFactorGraph hfg; diff --git a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp index cd9b24b37..5f1108ace 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp @@ -714,26 +714,26 @@ factor 6: P( m1 | m0 ): size: 3 conditional 0: Hybrid P( x0 | x1 m0) Discrete Keys = (m0, 2), - logNormalizationConstant: -1.38862 + logNormalizationConstant: 1.38862 Choice(m0) 0 Leaf p(x0 | x1) R = [ 10.0499 ] S[x1] = [ -0.0995037 ] d = [ -9.85087 ] - logNormalizationConstant: -1.38862 + logNormalizationConstant: 1.38862 No noise model 1 Leaf p(x0 | x1) R = [ 10.0499 ] S[x1] = [ -0.0995037 ] d = [ -9.95037 ] - logNormalizationConstant: -1.38862 + logNormalizationConstant: 1.38862 No noise model conditional 1: Hybrid P( x1 | x2 m0 m1) Discrete Keys = (m0, 2), (m1, 2), - logNormalizationConstant: -1.3935 + logNormalizationConstant: 1.3935 Choice(m1) 0 Choice(m0) @@ -741,14 +741,14 @@ conditional 1: Hybrid P( x1 | x2 m0 m1) R = [ 10.099 ] S[x2] = [ -0.0990196 ] d = [ -9.99901 ] - logNormalizationConstant: -1.3935 + logNormalizationConstant: 1.3935 No noise model 0 1 Leaf p(x1 | x2) R = [ 10.099 ] S[x2] = [ -0.0990196 ] d = [ -9.90098 ] - logNormalizationConstant: -1.3935 + logNormalizationConstant: 1.3935 No noise model 1 Choice(m0) @@ -756,19 +756,19 @@ conditional 1: Hybrid P( x1 | x2 m0 m1) R = [ 10.099 ] S[x2] = [ -0.0990196 ] d = [ -10.098 ] - logNormalizationConstant: -1.3935 + logNormalizationConstant: 1.3935 No noise model 1 1 Leaf p(x1 | x2) R = [ 10.099 ] S[x2] = [ -0.0990196 ] d = [ -10 ] - logNormalizationConstant: -1.3935 + logNormalizationConstant: 1.3935 No noise model conditional 2: Hybrid P( x2 | m0 m1) Discrete Keys = (m0, 2), (m1, 2), - logNormalizationConstant: -1.38857 + logNormalizationConstant: 1.38857 Choice(m1) 0 Choice(m0) @@ -777,7 +777,7 @@ conditional 2: Hybrid P( x2 | m0 m1) d = [ -10.1489 ] mean: 1 elements x2: -1.0099 - logNormalizationConstant: -1.38857 + logNormalizationConstant: 1.38857 No noise model 0 1 Leaf p(x2) @@ -785,7 +785,7 @@ conditional 2: Hybrid P( x2 | m0 m1) d = [ -10.1479 ] mean: 1 elements x2: -1.0098 - logNormalizationConstant: -1.38857 + logNormalizationConstant: 1.38857 No noise model 1 Choice(m0) @@ -794,7 +794,7 @@ conditional 2: Hybrid P( x2 | m0 m1) d = [ -10.0504 ] mean: 1 elements x2: -1.0001 - logNormalizationConstant: -1.38857 + logNormalizationConstant: 1.38857 No noise model 1 1 Leaf p(x2) @@ -802,7 +802,7 @@ conditional 2: Hybrid P( x2 | m0 m1) d = [ -10.0494 ] mean: 1 elements x2: -1 - logNormalizationConstant: -1.38857 + logNormalizationConstant: 1.38857 No noise model )"; @@ -902,9 +902,8 @@ static HybridNonlinearFactorGraph CreateFactorGraph( // Create HybridNonlinearFactor // We take negative since we want // the underlying scalar to be log(\sqrt(|2πΣ|)) - std::vector factors{ - {f0, model0->logNormalizationConstant()}, - {f1, model1->logNormalizationConstant()}}; + std::vector factors{{f0, model0->errorConstant()}, + {f1, model1->errorConstant()}}; HybridNonlinearFactor mixtureFactor({X(0), X(1)}, m1, factors); diff --git a/gtsam/linear/tests/testGaussianBayesNet.cpp b/gtsam/linear/tests/testGaussianBayesNet.cpp index a186eb2b2..99453ee4e 100644 --- a/gtsam/linear/tests/testGaussianBayesNet.cpp +++ b/gtsam/linear/tests/testGaussianBayesNet.cpp @@ -76,11 +76,11 @@ TEST(GaussianBayesNet, Evaluate1) { // the normalization constant 1.0/sqrt((2*pi*Sigma).det()). // The covariance matrix inv(Sigma) = R'*R, so the determinant is const double constant = sqrt((invSigma / (2 * M_PI)).determinant()); - EXPECT_DOUBLES_EQUAL(-log(constant), + EXPECT_DOUBLES_EQUAL(log(constant), smallBayesNet.at(0)->logNormalizationConstant() + smallBayesNet.at(1)->logNormalizationConstant(), 1e-9); - EXPECT_DOUBLES_EQUAL(-log(constant), smallBayesNet.logNormalizationConstant(), + EXPECT_DOUBLES_EQUAL(log(constant), smallBayesNet.logNormalizationConstant(), 1e-9); const double actual = smallBayesNet.evaluate(mean); EXPECT_DOUBLES_EQUAL(constant, actual, 1e-9); diff --git a/gtsam/linear/tests/testGaussianConditional.cpp b/gtsam/linear/tests/testGaussianConditional.cpp index b03e0a060..26086104c 100644 --- a/gtsam/linear/tests/testGaussianConditional.cpp +++ b/gtsam/linear/tests/testGaussianConditional.cpp @@ -493,7 +493,7 @@ TEST(GaussianConditional, LogNormalizationConstant) { x.insert(X(0), Vector3::Zero()); Matrix3 Sigma = I_3x3 * sigma * sigma; double expectedLogNormalizationConstant = - -log(1 / sqrt((2 * M_PI * Sigma).determinant())); + log(1 / sqrt((2 * M_PI * Sigma).determinant())); EXPECT_DOUBLES_EQUAL(expectedLogNormalizationConstant, conditional.logNormalizationConstant(), 1e-9); @@ -517,7 +517,7 @@ TEST(GaussianConditional, Print) { " d = [ 20 40 ]\n" " mean: 1 elements\n" " x0: 20 40\n" - " logNormalizationConstant: 4.0351\n" + " logNormalizationConstant: -4.0351\n" "isotropic dim=2 sigma=3\n"; EXPECT(assert_print_equal(expected, conditional, "GaussianConditional")); @@ -532,7 +532,7 @@ TEST(GaussianConditional, Print) { " S[x1] = [ -1 -2 ]\n" " [ -3 -4 ]\n" " d = [ 20 40 ]\n" - " logNormalizationConstant: 4.0351\n" + " logNormalizationConstant: -4.0351\n" "isotropic dim=2 sigma=3\n"; EXPECT(assert_print_equal(expected1, conditional1, "GaussianConditional")); @@ -548,7 +548,7 @@ TEST(GaussianConditional, Print) { " S[y1] = [ -5 -6 ]\n" " [ -7 -8 ]\n" " d = [ 20 40 ]\n" - " logNormalizationConstant: 4.0351\n" + " logNormalizationConstant: -4.0351\n" "isotropic dim=2 sigma=3\n"; EXPECT(assert_print_equal(expected2, conditional2, "GaussianConditional")); } diff --git a/gtsam/linear/tests/testGaussianDensity.cpp b/gtsam/linear/tests/testGaussianDensity.cpp index 3226f40ab..e88fd8cc4 100644 --- a/gtsam/linear/tests/testGaussianDensity.cpp +++ b/gtsam/linear/tests/testGaussianDensity.cpp @@ -55,7 +55,7 @@ TEST(GaussianDensity, FromMeanAndStddev) { double expected1 = 0.5 * e.dot(e); EXPECT_DOUBLES_EQUAL(expected1, density.error(values), 1e-9); - double expected2 = -(density.logNormalizationConstant() + 0.5 * e.dot(e)); + double expected2 = -(density.errorConstant() + 0.5 * e.dot(e)); EXPECT_DOUBLES_EQUAL(expected2, density.logProbability(values), 1e-9); } diff --git a/gtsam/linear/tests/testNoiseModel.cpp b/gtsam/linear/tests/testNoiseModel.cpp index 59ee05d07..5e756a483 100644 --- a/gtsam/linear/tests/testNoiseModel.cpp +++ b/gtsam/linear/tests/testNoiseModel.cpp @@ -810,9 +810,9 @@ TEST(NoiseModel, NonDiagonalGaussian) TEST(NoiseModel, LogNormalizationConstant1D) { // Very simple 1D noise model, which we can compute by hand. double sigma = 0.1; - // For expected values, we compute -log(1/sqrt(|2πΣ|)) by hand. - // = 0.5*(log(2π) + log(Σ)) (since it is 1D) - double expected_value = 0.5 * log(2 * M_PI * sigma * sigma); + // For expected values, we compute log(1/sqrt(|2πΣ|)) by hand. + // = -0.5*(log(2π) + log(Σ)) (since it is 1D) + double expected_value = -0.5 * log(2 * M_PI * sigma * sigma); // Gaussian { @@ -839,7 +839,7 @@ TEST(NoiseModel, LogNormalizationConstant1D) { auto noise_model = Unit::Create(1); double actual_value = noise_model->logNormalizationConstant(); double sigma = 1.0; - expected_value = 0.5 * log(2 * M_PI * sigma * sigma); + expected_value = -0.5 * log(2 * M_PI * sigma * sigma); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } } @@ -850,7 +850,7 @@ TEST(NoiseModel, LogNormalizationConstant3D) { size_t n = 3; // We compute the expected values just like in the LogNormalizationConstant1D // test, but we multiply by 3 due to the determinant. - double expected_value = 0.5 * n * log(2 * M_PI * sigma * sigma); + double expected_value = -0.5 * n * log(2 * M_PI * sigma * sigma); // Gaussian { @@ -879,7 +879,7 @@ TEST(NoiseModel, LogNormalizationConstant3D) { auto noise_model = Unit::Create(3); double actual_value = noise_model->logNormalizationConstant(); double sigma = 1.0; - expected_value = 0.5 * n * log(2 * M_PI * sigma * sigma); + expected_value = -0.5 * n * log(2 * M_PI * sigma * sigma); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } } diff --git a/python/gtsam/tests/test_HybridBayesNet.py b/python/gtsam/tests/test_HybridBayesNet.py index bb12ff02b..a72e34062 100644 --- a/python/gtsam/tests/test_HybridBayesNet.py +++ b/python/gtsam/tests/test_HybridBayesNet.py @@ -90,8 +90,7 @@ class TestHybridBayesNet(GtsamTestCase): self.assertTrue(probability >= 0.0) logProb = conditional.logProbability(values) self.assertAlmostEqual(probability, np.exp(logProb)) - expected = -(conditional.logNormalizationConstant() + \ - conditional.error(values)) + expected = -(conditional.errorConstant() + conditional.error(values)) self.assertAlmostEqual(logProb, expected) From 0e676525688e6ff968d46c486dba5aa10103a66a Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sun, 22 Sep 2024 23:04:39 -0400 Subject: [PATCH 06/15] wrap errorConstant for HybridConditional --- gtsam/hybrid/hybrid.i | 1 + 1 file changed, 1 insertion(+) diff --git a/gtsam/hybrid/hybrid.i b/gtsam/hybrid/hybrid.i index ef179eb9d..81cca3c46 100644 --- a/gtsam/hybrid/hybrid.i +++ b/gtsam/hybrid/hybrid.i @@ -61,6 +61,7 @@ virtual class HybridConditional { size_t nrParents() const; // Standard interface: + double errorConstant() const; double logNormalizationConstant() const; double logProbability(const gtsam::HybridValues& values) const; double evaluate(const gtsam::HybridValues& values) const; From 6fd8da829baba9e94d798e965cfaa1ee27544df5 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sun, 22 Sep 2024 23:07:47 -0400 Subject: [PATCH 07/15] wrap errorConstant for DiscreteConditional --- gtsam/discrete/discrete.i | 1 + 1 file changed, 1 insertion(+) diff --git a/gtsam/discrete/discrete.i b/gtsam/discrete/discrete.i index 0bdebd0e1..753c3527a 100644 --- a/gtsam/discrete/discrete.i +++ b/gtsam/discrete/discrete.i @@ -112,6 +112,7 @@ virtual class DiscreteConditional : gtsam::DecisionTreeFactor { const std::vector& table); // Standard interface + double errorConstant() const; double logNormalizationConstant() const; double logProbability(const gtsam::DiscreteValues& values) const; double evaluate(const gtsam::DiscreteValues& values) const; From aae5f9e0405d3364bbd5cc875b80e325f71a79fc Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Sun, 22 Sep 2024 23:15:00 -0400 Subject: [PATCH 08/15] fix numpy deprecation --- python/gtsam/tests/test_NonlinearOptimizer.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/python/gtsam/tests/test_NonlinearOptimizer.py b/python/gtsam/tests/test_NonlinearOptimizer.py index a47c5ad62..1b4209509 100644 --- a/python/gtsam/tests/test_NonlinearOptimizer.py +++ b/python/gtsam/tests/test_NonlinearOptimizer.py @@ -14,15 +14,16 @@ from __future__ import print_function import unittest -import gtsam -from gtsam import ( - DoglegOptimizer, DoglegParams, DummyPreconditionerParameters, GaussNewtonOptimizer, - GaussNewtonParams, GncLMParams, GncLossType, GncLMOptimizer, LevenbergMarquardtOptimizer, - LevenbergMarquardtParams, NonlinearFactorGraph, Ordering, PCGSolverParameters, Point2, - PriorFactorPoint2, Values -) from gtsam.utils.test_case import GtsamTestCase +import gtsam +from gtsam import (DoglegOptimizer, DoglegParams, + DummyPreconditionerParameters, GaussNewtonOptimizer, + GaussNewtonParams, GncLMOptimizer, GncLMParams, GncLossType, + LevenbergMarquardtOptimizer, LevenbergMarquardtParams, + NonlinearFactorGraph, Ordering, PCGSolverParameters, Point2, + PriorFactorPoint2, Values) + KEY1 = 1 KEY2 = 2 @@ -136,7 +137,7 @@ class TestScenario(GtsamTestCase): # Test optimizer params optimizer = GncLMOptimizer(self.fg, self.initial_values, params) for ict_factor in (0.9, 1.1): - new_ict = ict_factor * optimizer.getInlierCostThresholds() + new_ict = ict_factor * optimizer.getInlierCostThresholds().item() optimizer.setInlierCostThresholds(new_ict) self.assertAlmostEqual(optimizer.getInlierCostThresholds(), new_ict) for w_factor in (0.8, 0.9): From e09344c6ba5ab8c779eec624422fca56657c6d35 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 23 Sep 2024 03:37:09 -0400 Subject: [PATCH 09/15] replace errorConstant with negLogConstant --- gtsam/discrete/DiscreteConditional.cpp | 2 +- gtsam/discrete/DiscreteConditional.h | 8 ++++---- gtsam/discrete/discrete.i | 2 +- gtsam/hybrid/HybridConditional.cpp | 10 +++++----- gtsam/hybrid/HybridConditional.h | 4 ++-- gtsam/hybrid/HybridGaussianConditional.cpp | 8 ++++---- gtsam/hybrid/HybridGaussianConditional.h | 2 +- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 6 +++--- gtsam/hybrid/hybrid.i | 2 +- .../hybrid/tests/testHybridGaussianConditional.cpp | 14 +++++++------- gtsam/hybrid/tests/testHybridGaussianFactor.cpp | 4 ++-- .../tests/testHybridNonlinearFactorGraph.cpp | 4 ++-- gtsam/inference/Conditional-inst.h | 8 ++++---- gtsam/inference/Conditional.h | 2 +- gtsam/linear/GaussianConditional.cpp | 2 +- gtsam/linear/GaussianConditional.h | 2 +- gtsam/linear/NoiseModel.cpp | 4 ++-- gtsam/linear/NoiseModel.h | 2 +- gtsam/linear/linear.i | 2 +- gtsam/linear/tests/testGaussianDensity.cpp | 2 +- python/gtsam/tests/test_HybridBayesNet.py | 2 +- 21 files changed, 46 insertions(+), 46 deletions(-) diff --git a/gtsam/discrete/DiscreteConditional.cpp b/gtsam/discrete/DiscreteConditional.cpp index 3e74cebb4..3f0c9f511 100644 --- a/gtsam/discrete/DiscreteConditional.cpp +++ b/gtsam/discrete/DiscreteConditional.cpp @@ -467,7 +467,7 @@ double DiscreteConditional::evaluate(const HybridValues& x) const { } /* ************************************************************************* */ -double DiscreteConditional::errorConstant() const { +double DiscreteConditional::negLogConstant() const { return 0.0; } diff --git a/gtsam/discrete/DiscreteConditional.h b/gtsam/discrete/DiscreteConditional.h index 53331a0be..e16100d0a 100644 --- a/gtsam/discrete/DiscreteConditional.h +++ b/gtsam/discrete/DiscreteConditional.h @@ -264,12 +264,12 @@ class GTSAM_EXPORT DiscreteConditional } /** - * errorConstant is just zero, such that - * logProbability(x) = log(evaluate(x)) = - error(x) - * and hence error(x) = - log(evaluate(x)) > 0 for all x. + * negLogConstant is just zero, such that + * -logProbability(x) = -log(evaluate(x)) = error(x) + * and hence error(x) > 0 for all x. * Thus -log(K) for the normalization constant k is 0. */ - double errorConstant() const override; + double negLogConstant() const override; /// @} diff --git a/gtsam/discrete/discrete.i b/gtsam/discrete/discrete.i index 753c3527a..ab3ff75ca 100644 --- a/gtsam/discrete/discrete.i +++ b/gtsam/discrete/discrete.i @@ -112,7 +112,7 @@ virtual class DiscreteConditional : gtsam::DecisionTreeFactor { const std::vector& table); // Standard interface - double errorConstant() const; + double negLogConstant() const; double logNormalizationConstant() const; double logProbability(const gtsam::DiscreteValues& values) const; double evaluate(const gtsam::DiscreteValues& values) const; diff --git a/gtsam/hybrid/HybridConditional.cpp b/gtsam/hybrid/HybridConditional.cpp index 57d8bba39..cac2adcf8 100644 --- a/gtsam/hybrid/HybridConditional.cpp +++ b/gtsam/hybrid/HybridConditional.cpp @@ -161,18 +161,18 @@ double HybridConditional::logProbability(const HybridValues &values) const { } /* ************************************************************************ */ -double HybridConditional::errorConstant() const { +double HybridConditional::negLogConstant() const { if (auto gc = asGaussian()) { - return gc->errorConstant(); + return gc->negLogConstant(); } if (auto gm = asHybrid()) { - return gm->errorConstant(); // 0.0! + return gm->negLogConstant(); // 0.0! } if (auto dc = asDiscrete()) { - return dc->errorConstant(); // 0.0! + return dc->negLogConstant(); // 0.0! } throw std::runtime_error( - "HybridConditional::errorConstant: conditional type not handled"); + "HybridConditional::negLogConstant: conditional type not handled"); } /* ************************************************************************ */ diff --git a/gtsam/hybrid/HybridConditional.h b/gtsam/hybrid/HybridConditional.h index 92118385c..588c44e0b 100644 --- a/gtsam/hybrid/HybridConditional.h +++ b/gtsam/hybrid/HybridConditional.h @@ -194,11 +194,11 @@ class GTSAM_EXPORT HybridConditional /** * Return the negative log of the normalization constant. - * This shows up in the error as -(error(x) + errorConstant) + * This shows up in the error as -(error(x) + negLogConstant) * Note this is 0.0 for discrete and hybrid conditionals, but depends * on the continuous parameters for Gaussian conditionals. */ - double errorConstant() const override; + double negLogConstant() const override; /// Return the probability (or density) of the underlying conditional. double evaluate(const HybridValues& values) const override; diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index f6cd3e5f7..f1220505d 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -36,7 +36,7 @@ HybridGaussianFactor::FactorValuePairs GetFactorValuePairs( // Check if conditional is pruned if (conditional) { // Assign log(\sqrt(|2πΣ|)) = -log(1 / sqrt(|2πΣ|)) - value = conditional->errorConstant(); + value = conditional->negLogConstant(); } return {std::dynamic_pointer_cast(conditional), value}; }; @@ -58,7 +58,7 @@ HybridGaussianConditional::HybridGaussianConditional( [this](const GaussianConditional::shared_ptr &conditional) { if (conditional) { this->logConstant_ = - std::min(this->logConstant_, conditional->errorConstant()); + std::min(this->logConstant_, conditional->negLogConstant()); } }); } @@ -84,7 +84,7 @@ GaussianFactorGraphTree HybridGaussianConditional::asGaussianFactorGraphTree() auto wrap = [this](const GaussianConditional::shared_ptr &gc) { // First check if conditional has not been pruned if (gc) { - const double Cgm_Kgcm = gc->errorConstant() - this->logConstant_; + const double Cgm_Kgcm = gc->negLogConstant() - this->logConstant_; // If there is a difference in the covariances, we need to account for // that since the error is dependent on the mode. if (Cgm_Kgcm > 0.0) { @@ -214,7 +214,7 @@ std::shared_ptr HybridGaussianConditional::likelihood( [&](const GaussianConditional::shared_ptr &conditional) -> GaussianFactorValuePair { const auto likelihood_m = conditional->likelihood(given); - const double Cgm_Kgcm = conditional->errorConstant() - logConstant_; + const double Cgm_Kgcm = conditional->negLogConstant() - logConstant_; if (Cgm_Kgcm == 0.0) { return {likelihood_m, 0.0}; } else { diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index 8851c7741..8566d47ef 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -158,7 +158,7 @@ class GTSAM_EXPORT HybridGaussianConditional * * @return double */ - inline double errorConstant() const override { return logConstant_; } + inline double negLogConstant() const override { return logConstant_; } /** * Create a likelihood factor for a hybrid Gaussian conditional, diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index a6c15d1fd..603b29fb5 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -329,9 +329,9 @@ static std::shared_ptr createDiscreteFactor( // Logspace version of: // exp(-factor->error(kEmpty)) / conditional->normalizationConstant(); - // errorConstant gives `-log(k)` + // negLogConstant gives `-log(k)` // which is `-log(k) = log(1/k) = log(\sqrt{|2πΣ|})`. - return -factor->error(kEmpty) + conditional->errorConstant(); + return -factor->error(kEmpty) + conditional->negLogConstant(); }; AlgebraicDecisionTree logProbabilities( @@ -357,7 +357,7 @@ static std::shared_ptr createHybridGaussianFactor( // Add 2.0 term since the constant term will be premultiplied by 0.5 // as per the Hessian definition, // and negative since we want log(k) - hf->constantTerm() += -2.0 * conditional->errorConstant(); + hf->constantTerm() += -2.0 * conditional->negLogConstant(); } return {factor, 0.0}; }; diff --git a/gtsam/hybrid/hybrid.i b/gtsam/hybrid/hybrid.i index 81cca3c46..cdeb35046 100644 --- a/gtsam/hybrid/hybrid.i +++ b/gtsam/hybrid/hybrid.i @@ -61,7 +61,7 @@ virtual class HybridConditional { size_t nrParents() const; // Standard interface: - double errorConstant() const; + double negLogConstant() const; double logNormalizationConstant() const; double logProbability(const gtsam::HybridValues& values) const; double evaluate(const gtsam::HybridValues& values) const; diff --git a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp index 6c1037e1d..74f57a740 100644 --- a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp @@ -180,16 +180,16 @@ TEST(HybridGaussianConditional, Error2) { // Check result. DiscreteKeys discrete_keys{mode}; - double errorConstant0 = conditionals[0]->errorConstant(); - double errorConstant1 = conditionals[1]->errorConstant(); - double minErrorConstant = std::min(errorConstant0, errorConstant1); + double negLogConstant0 = conditionals[0]->negLogConstant(); + double negLogConstant1 = conditionals[1]->negLogConstant(); + double minErrorConstant = std::min(negLogConstant0, negLogConstant1); // Expected error is e(X) + log(sqrt(|2πΣ|)). - // We normalize log(sqrt(|2πΣ|)) with min(errorConstant) + // We normalize log(sqrt(|2πΣ|)) with min(negLogConstant) // so it is non-negative. std::vector leaves = { - conditionals[0]->error(vv) + errorConstant0 - minErrorConstant, - conditionals[1]->error(vv) + errorConstant1 - minErrorConstant}; + conditionals[0]->error(vv) + negLogConstant0 - minErrorConstant, + conditionals[1]->error(vv) + negLogConstant1 - minErrorConstant}; AlgebraicDecisionTree expected(discrete_keys, leaves); EXPECT(assert_equal(expected, actual, 1e-6)); @@ -198,7 +198,7 @@ TEST(HybridGaussianConditional, Error2) { for (size_t mode : {0, 1}) { const HybridValues hv{vv, {{M(0), mode}}}; EXPECT_DOUBLES_EQUAL(conditionals[mode]->error(vv) + - conditionals[mode]->errorConstant() - + conditionals[mode]->negLogConstant() - minErrorConstant, hybrid_conditional.error(hv), 1e-8); } diff --git a/gtsam/hybrid/tests/testHybridGaussianFactor.cpp b/gtsam/hybrid/tests/testHybridGaussianFactor.cpp index d84948c75..4b664b8b4 100644 --- a/gtsam/hybrid/tests/testHybridGaussianFactor.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianFactor.cpp @@ -780,8 +780,8 @@ static HybridGaussianFactorGraph CreateFactorGraph( // Create HybridGaussianFactor // We take negative since we want // the underlying scalar to be log(\sqrt(|2πΣ|)) - std::vector factors{{f0, model0->errorConstant()}, - {f1, model1->errorConstant()}}; + std::vector factors{{f0, model0->negLogConstant()}, + {f1, model1->negLogConstant()}}; HybridGaussianFactor motionFactor({X(0), X(1)}, m1, factors); HybridGaussianFactorGraph hfg; diff --git a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp index 5f1108ace..495444c79 100644 --- a/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp +++ b/gtsam/hybrid/tests/testHybridNonlinearFactorGraph.cpp @@ -902,8 +902,8 @@ static HybridNonlinearFactorGraph CreateFactorGraph( // Create HybridNonlinearFactor // We take negative since we want // the underlying scalar to be log(\sqrt(|2πΣ|)) - std::vector factors{{f0, model0->errorConstant()}, - {f1, model1->errorConstant()}}; + std::vector factors{{f0, model0->negLogConstant()}, + {f1, model1->negLogConstant()}}; HybridNonlinearFactor mixtureFactor({X(0), X(1)}, m1, factors); diff --git a/gtsam/inference/Conditional-inst.h b/gtsam/inference/Conditional-inst.h index ba42a9a24..32fae188b 100644 --- a/gtsam/inference/Conditional-inst.h +++ b/gtsam/inference/Conditional-inst.h @@ -59,17 +59,17 @@ double Conditional::evaluate( /* ************************************************************************* */ template -double Conditional::errorConstant() +double Conditional::negLogConstant() const { throw std::runtime_error( - "Conditional::errorConstant is not implemented"); + "Conditional::negLogConstant is not implemented"); } /* ************************************************************************* */ template double Conditional::logNormalizationConstant() const { - return -errorConstant(); + return -negLogConstant(); } /* ************************************************************************* */ @@ -96,7 +96,7 @@ bool Conditional::CheckInvariants( // normalization constant const double error = conditional.error(values); if (error < 0.0) return false; // prob_or_density is negative. - const double expected = -(conditional.errorConstant() + error); + const double expected = -(conditional.negLogConstant() + error); if (std::abs(logProb - expected) > 1e-9) return false; // logProb is not consistent with error return true; diff --git a/gtsam/inference/Conditional.h b/gtsam/inference/Conditional.h index d09341489..c44ed239e 100644 --- a/gtsam/inference/Conditional.h +++ b/gtsam/inference/Conditional.h @@ -169,7 +169,7 @@ namespace gtsam { * * @return double */ - virtual double errorConstant() const; + virtual double negLogConstant() const; /** * All conditional types need to implement a log normalization constant to diff --git a/gtsam/linear/GaussianConditional.cpp b/gtsam/linear/GaussianConditional.cpp index ca4a1ed33..7508524c3 100644 --- a/gtsam/linear/GaussianConditional.cpp +++ b/gtsam/linear/GaussianConditional.cpp @@ -182,7 +182,7 @@ namespace gtsam { /* ************************************************************************* */ // normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma)) // neg-log = 0.5 * n*log(2*pi) + 0.5 * log det(Sigma) - double GaussianConditional::errorConstant() const { + double GaussianConditional::negLogConstant() const { constexpr double log2pi = 1.8378770664093454835606594728112; size_t n = d().size(); // Sigma = (R'R)^{-1}, det(Sigma) = det((R'R)^{-1}) = det(R'R)^{-1} diff --git a/gtsam/linear/GaussianConditional.h b/gtsam/linear/GaussianConditional.h index 31f3797d5..27270fece 100644 --- a/gtsam/linear/GaussianConditional.h +++ b/gtsam/linear/GaussianConditional.h @@ -140,7 +140,7 @@ namespace gtsam { * * @return double */ - double errorConstant() const override; + double negLogConstant() const override; /** * Calculate log-probability log(evaluate(x)) for given values `x`: diff --git a/gtsam/linear/NoiseModel.cpp b/gtsam/linear/NoiseModel.cpp index 39901584b..68c2ffda7 100644 --- a/gtsam/linear/NoiseModel.cpp +++ b/gtsam/linear/NoiseModel.cpp @@ -255,7 +255,7 @@ double Gaussian::logDeterminant() const { } /* *******************************************************************************/ -double Gaussian::errorConstant() const { +double Gaussian::negLogConstant() const { // log(det(Sigma)) = -2.0 * logDetR // which gives neg-log = 0.5*n*log(2*pi) + 0.5*(-2.0 * logDetR()) // = 0.5*n*log(2*pi) - (0.5*2.0 * logDetR()) @@ -268,7 +268,7 @@ double Gaussian::errorConstant() const { /* *******************************************************************************/ double Gaussian::logNormalizationConstant() const { - return -errorConstant(); + return -negLogConstant(); } /* ************************************************************************* */ diff --git a/gtsam/linear/NoiseModel.h b/gtsam/linear/NoiseModel.h index 851dc3c1f..5ad48cade 100644 --- a/gtsam/linear/NoiseModel.h +++ b/gtsam/linear/NoiseModel.h @@ -277,7 +277,7 @@ namespace gtsam { * * @return double */ - double errorConstant() const; + double negLogConstant() const; /** * @brief Method to compute the normalization constant diff --git a/gtsam/linear/linear.i b/gtsam/linear/linear.i index 71eb94dd5..23b6fb3f6 100644 --- a/gtsam/linear/linear.i +++ b/gtsam/linear/linear.i @@ -548,7 +548,7 @@ virtual class GaussianConditional : gtsam::JacobianFactor { bool equals(const gtsam::GaussianConditional& cg, double tol) const; // Standard Interface - double errorConstant() const; + double negLogConstant() const; double logNormalizationConstant() const; double logProbability(const gtsam::VectorValues& x) const; double evaluate(const gtsam::VectorValues& x) const; diff --git a/gtsam/linear/tests/testGaussianDensity.cpp b/gtsam/linear/tests/testGaussianDensity.cpp index e88fd8cc4..b2861e833 100644 --- a/gtsam/linear/tests/testGaussianDensity.cpp +++ b/gtsam/linear/tests/testGaussianDensity.cpp @@ -55,7 +55,7 @@ TEST(GaussianDensity, FromMeanAndStddev) { double expected1 = 0.5 * e.dot(e); EXPECT_DOUBLES_EQUAL(expected1, density.error(values), 1e-9); - double expected2 = -(density.errorConstant() + 0.5 * e.dot(e)); + double expected2 = -(density.negLogConstant() + 0.5 * e.dot(e)); EXPECT_DOUBLES_EQUAL(expected2, density.logProbability(values), 1e-9); } diff --git a/python/gtsam/tests/test_HybridBayesNet.py b/python/gtsam/tests/test_HybridBayesNet.py index a72e34062..bf2b6a033 100644 --- a/python/gtsam/tests/test_HybridBayesNet.py +++ b/python/gtsam/tests/test_HybridBayesNet.py @@ -90,7 +90,7 @@ class TestHybridBayesNet(GtsamTestCase): self.assertTrue(probability >= 0.0) logProb = conditional.logProbability(values) self.assertAlmostEqual(probability, np.exp(logProb)) - expected = -(conditional.errorConstant() + conditional.error(values)) + expected = -(conditional.negLogConstant() + conditional.error(values)) self.assertAlmostEqual(logProb, expected) From e95b8be014e245f4300eddb86c1dd400cf37f9e6 Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 23 Sep 2024 14:54:53 -0400 Subject: [PATCH 10/15] kill logNormalizationConstant in favor of negLogConstant --- gtsam/discrete/discrete.i | 1 - gtsam/hybrid/HybridGaussianConditional.cpp | 2 +- gtsam/hybrid/hybrid.i | 1 - .../tests/testHybridGaussianConditional.cpp | 14 ++++---- gtsam/inference/Conditional-inst.h | 19 +++-------- gtsam/inference/Conditional.h | 17 ++++------ gtsam/linear/GaussianBayesNet.cpp | 21 ++++++------ gtsam/linear/GaussianBayesNet.h | 6 ++-- gtsam/linear/GaussianConditional.cpp | 4 +-- gtsam/linear/NoiseModel.cpp | 5 --- gtsam/linear/NoiseModel.h | 10 +----- gtsam/linear/linear.i | 1 - gtsam/linear/tests/testGaussianBayesNet.cpp | 9 +++-- .../linear/tests/testGaussianConditional.cpp | 10 +++--- gtsam/linear/tests/testNoiseModel.cpp | 34 +++++++++---------- 15 files changed, 61 insertions(+), 93 deletions(-) diff --git a/gtsam/discrete/discrete.i b/gtsam/discrete/discrete.i index ab3ff75ca..55c8f9e22 100644 --- a/gtsam/discrete/discrete.i +++ b/gtsam/discrete/discrete.i @@ -113,7 +113,6 @@ virtual class DiscreteConditional : gtsam::DecisionTreeFactor { // Standard interface double negLogConstant() const; - double logNormalizationConstant() const; double logProbability(const gtsam::DiscreteValues& values) const; double evaluate(const gtsam::DiscreteValues& values) const; double error(const gtsam::DiscreteValues& values) const; diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index f1220505d..a7ed9e939 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -155,7 +155,7 @@ void HybridGaussianConditional::print(const std::string &s, std::cout << "(" << formatter(dk.first) << ", " << dk.second << "), "; } std::cout << std::endl - << " logNormalizationConstant: " << logNormalizationConstant() + << " logNormalizationConstant: " << -negLogConstant() << std::endl << std::endl; conditionals_.print( diff --git a/gtsam/hybrid/hybrid.i b/gtsam/hybrid/hybrid.i index cdeb35046..82881ac47 100644 --- a/gtsam/hybrid/hybrid.i +++ b/gtsam/hybrid/hybrid.i @@ -62,7 +62,6 @@ virtual class HybridConditional { // Standard interface: double negLogConstant() const; - double logNormalizationConstant() const; double logProbability(const gtsam::HybridValues& values) const; double evaluate(const gtsam::HybridValues& values) const; double operator()(const gtsam::HybridValues& values) const; diff --git a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp index 74f57a740..adcf6c70f 100644 --- a/gtsam/hybrid/tests/testHybridGaussianConditional.cpp +++ b/gtsam/hybrid/tests/testHybridGaussianConditional.cpp @@ -61,11 +61,11 @@ const HybridGaussianConditional hybrid_conditional({Z(0)}, {X(0)}, mode, TEST(HybridGaussianConditional, Invariants) { using namespace equal_constants; - // Check that the conditional normalization constant is the max of all - // constants which are all equal, in this case, hence: - const double K = hybrid_conditional.logNormalizationConstant(); - EXPECT_DOUBLES_EQUAL(K, conditionals[0]->logNormalizationConstant(), 1e-8); - EXPECT_DOUBLES_EQUAL(K, conditionals[1]->logNormalizationConstant(), 1e-8); + // Check that the conditional (negative log) normalization constant is the min + // of all constants which are all equal, in this case, hence: + const double K = hybrid_conditional.negLogConstant(); + EXPECT_DOUBLES_EQUAL(K, conditionals[0]->negLogConstant(), 1e-8); + EXPECT_DOUBLES_EQUAL(K, conditionals[1]->negLogConstant(), 1e-8); EXPECT(HybridGaussianConditional::CheckInvariants(hybrid_conditional, hv0)); EXPECT(HybridGaussianConditional::CheckInvariants(hybrid_conditional, hv1)); @@ -231,8 +231,8 @@ TEST(HybridGaussianConditional, Likelihood2) { CHECK(jf1->rows() == 2); // Check that the constant C1 is properly encoded in the JacobianFactor. - const double C1 = hybrid_conditional.logNormalizationConstant() - - conditionals[1]->logNormalizationConstant(); + const double C1 = + conditionals[1]->negLogConstant() - hybrid_conditional.negLogConstant(); const double c1 = std::sqrt(2.0 * C1); Vector expected_unwhitened(2); expected_unwhitened << 4.9 - 5.0, -c1; diff --git a/gtsam/inference/Conditional-inst.h b/gtsam/inference/Conditional-inst.h index 32fae188b..ca4d0981f 100644 --- a/gtsam/inference/Conditional-inst.h +++ b/gtsam/inference/Conditional-inst.h @@ -59,23 +59,14 @@ double Conditional::evaluate( /* ************************************************************************* */ template -double Conditional::negLogConstant() - const { - throw std::runtime_error( - "Conditional::negLogConstant is not implemented"); -} - -/* ************************************************************************* */ -template -double Conditional::logNormalizationConstant() - const { - return -negLogConstant(); +double Conditional::negLogConstant() const { + throw std::runtime_error("Conditional::negLogConstant is not implemented"); } /* ************************************************************************* */ template double Conditional::normalizationConstant() const { - return std::exp(logNormalizationConstant()); + return std::exp(-negLogConstant()); } /* ************************************************************************* */ @@ -90,8 +81,8 @@ bool Conditional::CheckInvariants( const double logProb = conditional.logProbability(values); if (std::abs(prob_or_density - std::exp(logProb)) > 1e-9) return false; // logProb is not consistent with prob_or_density - if (std::abs(conditional.logNormalizationConstant() - - std::log(conditional.normalizationConstant())) > 1e-9) + if (std::abs(conditional.negLogConstant() - + (-std::log(conditional.normalizationConstant()))) > 1e-9) return false; // log normalization constant is not consistent with // normalization constant const double error = conditional.error(values); diff --git a/gtsam/inference/Conditional.h b/gtsam/inference/Conditional.h index c44ed239e..f37a1b7a4 100644 --- a/gtsam/inference/Conditional.h +++ b/gtsam/inference/Conditional.h @@ -39,7 +39,8 @@ namespace gtsam { * logProbability(x) = -(K + error(x)) * i.e., K = -log(k). The normalization constant k is assumed to *not* depend * on any argument, only (possibly) on the conditional parameters. - * This class provides a default logNormalizationConstant() == 0.0. + * This class provides a default negative log normalization constant + * negLogConstant() == 0.0. * * There are four broad classes of conditionals that derive from Conditional: * @@ -165,19 +166,13 @@ namespace gtsam { /** * @brief All conditional types need to implement this as the negative log - * of the normalization constant. + * of the normalization constant to make it such that error>=0. * * @return double */ virtual double negLogConstant() const; - /** - * All conditional types need to implement a log normalization constant to - * make it such that error>=0. - */ - virtual double logNormalizationConstant() const; - - /** Non-virtual, exponentiate logNormalizationConstant. */ + /** Non-virtual, negate and exponentiate negLogConstant. */ double normalizationConstant() const; /// @} @@ -217,9 +212,9 @@ namespace gtsam { * - evaluate >= 0.0 * - evaluate(x) == conditional(x) * - exp(logProbability(x)) == evaluate(x) - * - logNormalizationConstant() = log(normalizationConstant()) + * - negLogConstant() = -log(normalizationConstant()) * - error >= 0.0 - * - logProbability(x) == logNormalizationConstant() - error(x) + * - logProbability(x) == -(negLogConstant() + error(x)) * * @param conditional The conditional to test, as a reference to the derived type. * @tparam VALUES HybridValues, or a more narrow type like DiscreteValues. diff --git a/gtsam/linear/GaussianBayesNet.cpp b/gtsam/linear/GaussianBayesNet.cpp index 978475bcb..f8c4ddc4c 100644 --- a/gtsam/linear/GaussianBayesNet.cpp +++ b/gtsam/linear/GaussianBayesNet.cpp @@ -243,25 +243,24 @@ namespace gtsam { } /* ************************************************************************* */ - double GaussianBayesNet::logNormalizationConstant() const { + double GaussianBayesNet::negLogConstant() const { /* normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma)) - logConstant = -log(normalizationConstant) - = -0.5 * n*log(2*pi) - 0.5 * log(det(Sigma)) + negLogConstant = -log(normalizationConstant) + = 0.5 * n*log(2*pi) + 0.5 * log(det(Sigma)) log(det(Sigma)) = -2.0 * logDeterminant() - thus, logConstant = -0.5*n*log(2*pi) + logDeterminant() + thus, negLogConstant = 0.5*n*log(2*pi) - logDeterminant() - BayesNet logConstant = sum(-0.5*n_i*log(2*pi) + logDeterminant_i()) - = sum(-0.5*n_i*log(2*pi)) + sum(logDeterminant_i()) - = sum(-0.5*n_i*log(2*pi)) + bn->logDeterminant() - = sum(logNormalizationConstant_i) + BayesNet negLogConstant = sum(0.5*n_i*log(2*pi) - logDeterminant_i()) + = sum(0.5*n_i*log(2*pi)) + sum(logDeterminant_i()) + = sum(0.5*n_i*log(2*pi)) + bn->logDeterminant() */ - double logNormConst = 0.0; + double negLogNormConst = 0.0; for (const sharedConditional& cg : *this) { - logNormConst += cg->logNormalizationConstant(); + negLogNormConst += cg->negLogConstant(); } - return logNormConst; + return negLogNormConst; } /* ************************************************************************* */ diff --git a/gtsam/linear/GaussianBayesNet.h b/gtsam/linear/GaussianBayesNet.h index ea1cb8603..e858bbe43 100644 --- a/gtsam/linear/GaussianBayesNet.h +++ b/gtsam/linear/GaussianBayesNet.h @@ -235,12 +235,12 @@ namespace gtsam { double logDeterminant() const; /** - * @brief Get the log of the normalization constant corresponding to the - * joint Gaussian density represented by this Bayes net. + * @brief Get the negative log of the normalization constant corresponding + * to the joint Gaussian density represented by this Bayes net. * * @return double */ - double logNormalizationConstant() const; + double negLogConstant() const; /** * Backsubstitute with a different RHS vector than the one stored in this BayesNet. diff --git a/gtsam/linear/GaussianConditional.cpp b/gtsam/linear/GaussianConditional.cpp index 7508524c3..7735e5129 100644 --- a/gtsam/linear/GaussianConditional.cpp +++ b/gtsam/linear/GaussianConditional.cpp @@ -121,7 +121,7 @@ namespace gtsam { const auto mean = solve({}); // solve for mean. mean.print(" mean", formatter); } - cout << " logNormalizationConstant: " << logNormalizationConstant() << endl; + cout << " logNormalizationConstant: " << -negLogConstant() << endl; if (model_) model_->print(" Noise model: "); else @@ -198,7 +198,7 @@ namespace gtsam { // density = k exp(-error(x)) // log = log(k) - error(x) double GaussianConditional::logProbability(const VectorValues& x) const { - return logNormalizationConstant() - error(x); + return -(negLogConstant() + error(x)); } double GaussianConditional::logProbability(const HybridValues& x) const { diff --git a/gtsam/linear/NoiseModel.cpp b/gtsam/linear/NoiseModel.cpp index 68c2ffda7..1236f1b94 100644 --- a/gtsam/linear/NoiseModel.cpp +++ b/gtsam/linear/NoiseModel.cpp @@ -266,11 +266,6 @@ double Gaussian::negLogConstant() const { return 0.5 * n * log2pi - logDetR(); } -/* *******************************************************************************/ -double Gaussian::logNormalizationConstant() const { - return -negLogConstant(); -} - /* ************************************************************************* */ // Diagonal /* ************************************************************************* */ diff --git a/gtsam/linear/NoiseModel.h b/gtsam/linear/NoiseModel.h index 5ad48cade..34fa63e4c 100644 --- a/gtsam/linear/NoiseModel.h +++ b/gtsam/linear/NoiseModel.h @@ -273,20 +273,12 @@ namespace gtsam { /** * @brief Compute the negative log of the normalization constant - * for a Gaussian noise model k = \sqrt(1/|2πΣ|). + * for a Gaussian noise model k = 1/\sqrt(|2πΣ|). * * @return double */ double negLogConstant() const; - /** - * @brief Method to compute the normalization constant - * for a Gaussian noise model k = \sqrt(1/|2πΣ|). - * - * @return double - */ - double logNormalizationConstant() const; - private: #ifdef GTSAM_ENABLE_BOOST_SERIALIZATION /** Serialization function */ diff --git a/gtsam/linear/linear.i b/gtsam/linear/linear.i index 23b6fb3f6..af6c2ee22 100644 --- a/gtsam/linear/linear.i +++ b/gtsam/linear/linear.i @@ -549,7 +549,6 @@ virtual class GaussianConditional : gtsam::JacobianFactor { // Standard Interface double negLogConstant() const; - double logNormalizationConstant() const; double logProbability(const gtsam::VectorValues& x) const; double evaluate(const gtsam::VectorValues& x) const; double error(const gtsam::VectorValues& x) const; diff --git a/gtsam/linear/tests/testGaussianBayesNet.cpp b/gtsam/linear/tests/testGaussianBayesNet.cpp index 99453ee4e..b1cfbe12c 100644 --- a/gtsam/linear/tests/testGaussianBayesNet.cpp +++ b/gtsam/linear/tests/testGaussianBayesNet.cpp @@ -76,12 +76,11 @@ TEST(GaussianBayesNet, Evaluate1) { // the normalization constant 1.0/sqrt((2*pi*Sigma).det()). // The covariance matrix inv(Sigma) = R'*R, so the determinant is const double constant = sqrt((invSigma / (2 * M_PI)).determinant()); - EXPECT_DOUBLES_EQUAL(log(constant), - smallBayesNet.at(0)->logNormalizationConstant() + - smallBayesNet.at(1)->logNormalizationConstant(), - 1e-9); - EXPECT_DOUBLES_EQUAL(log(constant), smallBayesNet.logNormalizationConstant(), + EXPECT_DOUBLES_EQUAL(-log(constant), + smallBayesNet.at(0)->negLogConstant() + + smallBayesNet.at(1)->negLogConstant(), 1e-9); + EXPECT_DOUBLES_EQUAL(-log(constant), smallBayesNet.negLogConstant(), 1e-9); const double actual = smallBayesNet.evaluate(mean); EXPECT_DOUBLES_EQUAL(constant, actual, 1e-9); } diff --git a/gtsam/linear/tests/testGaussianConditional.cpp b/gtsam/linear/tests/testGaussianConditional.cpp index 26086104c..68894c314 100644 --- a/gtsam/linear/tests/testGaussianConditional.cpp +++ b/gtsam/linear/tests/testGaussianConditional.cpp @@ -486,17 +486,17 @@ TEST(GaussianConditional, Error) { /* ************************************************************************* */ // Similar test for multivariate gaussian but with sigma 2.0 -TEST(GaussianConditional, LogNormalizationConstant) { +TEST(GaussianConditional, NegLogConstant) { double sigma = 2.0; auto conditional = GaussianConditional::FromMeanAndStddev(X(0), Vector3::Zero(), sigma); VectorValues x; x.insert(X(0), Vector3::Zero()); Matrix3 Sigma = I_3x3 * sigma * sigma; - double expectedLogNormalizationConstant = - log(1 / sqrt((2 * M_PI * Sigma).determinant())); + double expectedNegLogConstant = + -log(1 / sqrt((2 * M_PI * Sigma).determinant())); - EXPECT_DOUBLES_EQUAL(expectedLogNormalizationConstant, - conditional.logNormalizationConstant(), 1e-9); + EXPECT_DOUBLES_EQUAL(expectedNegLogConstant, conditional.negLogConstant(), + 1e-9); } /* ************************************************************************* */ diff --git a/gtsam/linear/tests/testNoiseModel.cpp b/gtsam/linear/tests/testNoiseModel.cpp index 5e756a483..2518e4c49 100644 --- a/gtsam/linear/tests/testNoiseModel.cpp +++ b/gtsam/linear/tests/testNoiseModel.cpp @@ -807,50 +807,50 @@ TEST(NoiseModel, NonDiagonalGaussian) } } -TEST(NoiseModel, LogNormalizationConstant1D) { +TEST(NoiseModel, NegLogNormalizationConstant1D) { // Very simple 1D noise model, which we can compute by hand. double sigma = 0.1; - // For expected values, we compute log(1/sqrt(|2πΣ|)) by hand. - // = -0.5*(log(2π) + log(Σ)) (since it is 1D) - double expected_value = -0.5 * log(2 * M_PI * sigma * sigma); + // For expected values, we compute -log(1/sqrt(|2πΣ|)) by hand. + // = 0.5*(log(2π) - log(Σ)) (since it is 1D) + double expected_value = 0.5 * log(2 * M_PI * sigma * sigma); // Gaussian { Matrix11 R; R << 1 / sigma; auto noise_model = Gaussian::SqrtInformation(R); - double actual_value = noise_model->logNormalizationConstant(); + double actual_value = noise_model->negLogConstant(); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } // Diagonal { auto noise_model = Diagonal::Sigmas(Vector1(sigma)); - double actual_value = noise_model->logNormalizationConstant(); + double actual_value = noise_model->negLogConstant(); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } // Isotropic { auto noise_model = Isotropic::Sigma(1, sigma); - double actual_value = noise_model->logNormalizationConstant(); + double actual_value = noise_model->negLogConstant(); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } // Unit { auto noise_model = Unit::Create(1); - double actual_value = noise_model->logNormalizationConstant(); + double actual_value = noise_model->negLogConstant(); double sigma = 1.0; - expected_value = -0.5 * log(2 * M_PI * sigma * sigma); + expected_value = 0.5 * log(2 * M_PI * sigma * sigma); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } } -TEST(NoiseModel, LogNormalizationConstant3D) { +TEST(NoiseModel, NegLogNormalizationConstant3D) { // Simple 3D noise model, which we can compute by hand. double sigma = 0.1; size_t n = 3; - // We compute the expected values just like in the LogNormalizationConstant1D + // We compute the expected values just like in the NegLogNormalizationConstant1D // test, but we multiply by 3 due to the determinant. - double expected_value = -0.5 * n * log(2 * M_PI * sigma * sigma); + double expected_value = 0.5 * n * log(2 * M_PI * sigma * sigma); // Gaussian { @@ -859,27 +859,27 @@ TEST(NoiseModel, LogNormalizationConstant3D) { 0, 1 / sigma, 4, // 0, 0, 1 / sigma; auto noise_model = Gaussian::SqrtInformation(R); - double actual_value = noise_model->logNormalizationConstant(); + double actual_value = noise_model->negLogConstant(); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } // Diagonal { auto noise_model = Diagonal::Sigmas(Vector3(sigma, sigma, sigma)); - double actual_value = noise_model->logNormalizationConstant(); + double actual_value = noise_model->negLogConstant(); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } // Isotropic { auto noise_model = Isotropic::Sigma(n, sigma); - double actual_value = noise_model->logNormalizationConstant(); + double actual_value = noise_model->negLogConstant(); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } // Unit { auto noise_model = Unit::Create(3); - double actual_value = noise_model->logNormalizationConstant(); + double actual_value = noise_model->negLogConstant(); double sigma = 1.0; - expected_value = -0.5 * n * log(2 * M_PI * sigma * sigma); + expected_value = 0.5 * n * log(2 * M_PI * sigma * sigma); EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9); } } From 2c7e5eb65658ceb3c3ecef891817702ea8976c2d Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 23 Sep 2024 15:07:33 -0400 Subject: [PATCH 11/15] fix docstring --- gtsam/hybrid/HybridGaussianConditional.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index 8566d47ef..d2b8982a3 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -153,7 +153,7 @@ class GTSAM_EXPORT HybridGaussianConditional /** * @brief Return log normalization constant in negative log space. * - * The log normalization constant is the max of the individual + * The log normalization constant is the min of the individual * log-normalization constants. * * @return double From 5da7588b669bfc073b4df48e50aa984aff01915b Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 23 Sep 2024 15:08:12 -0400 Subject: [PATCH 12/15] factor errors in negative log space --- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 603b29fb5..4845a4cd5 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -233,8 +233,8 @@ continuousElimination(const HybridGaussianFactorGraph &factors, /* ************************************************************************ */ /** - * @brief Exponentiate log-values, not necessarily normalized, normalize, and - * return as AlgebraicDecisionTree. + * @brief Exponentiate (not necessarily normalized) log-values, normalize, and + * then return as AlgebraicDecisionTree. * * @param logValues DecisionTree of (unnormalized) log values. * @return AlgebraicDecisionTree @@ -242,9 +242,9 @@ continuousElimination(const HybridGaussianFactorGraph &factors, static AlgebraicDecisionTree probabilitiesFromLogValues( const AlgebraicDecisionTree &logValues) { // Perform normalization - double max_log = logValues.max(); + double min_log = logValues.min(); AlgebraicDecisionTree probabilities = DecisionTree( - logValues, [&max_log](const double x) { return exp(x - max_log); }); + logValues, [&min_log](const double x) { return exp(-(x - min_log)); }); probabilities = probabilities.normalize(probabilities.sum()); return probabilities; @@ -265,7 +265,7 @@ discreteElimination(const HybridGaussianFactorGraph &factors, auto logProbability = [&](const GaussianFactor::shared_ptr &factor) -> double { if (!factor) return 0.0; - return -factor->error(VectorValues()); + return factor->error(VectorValues()); }; AlgebraicDecisionTree logProbabilities = DecisionTree(gmf->factors(), logProbability); @@ -327,11 +327,11 @@ static std::shared_ptr createDiscreteFactor( // If the factor is not null, it has no keys, just contains the residual. if (!factor) return 1.0; // TODO(dellaert): not loving this. - // Logspace version of: + // Negative logspace version of: // exp(-factor->error(kEmpty)) / conditional->normalizationConstant(); // negLogConstant gives `-log(k)` // which is `-log(k) = log(1/k) = log(\sqrt{|2πΣ|})`. - return -factor->error(kEmpty) + conditional->negLogConstant(); + return factor->error(kEmpty) - conditional->negLogConstant(); }; AlgebraicDecisionTree logProbabilities( From 31519f48b4983890f016f4e085887095d1d1c3ab Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 23 Sep 2024 15:09:37 -0400 Subject: [PATCH 13/15] rename to negLogProbability --- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 4845a4cd5..85a2de464 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -321,7 +321,7 @@ using Result = std::pair, static std::shared_ptr createDiscreteFactor( const DecisionTree &eliminationResults, const DiscreteKeys &discreteSeparator) { - auto logProbability = [&](const Result &pair) -> double { + auto negLogProbability = [&](const Result &pair) -> double { const auto &[conditional, factor] = pair; static const VectorValues kEmpty; // If the factor is not null, it has no keys, just contains the residual. @@ -334,10 +334,10 @@ static std::shared_ptr createDiscreteFactor( return factor->error(kEmpty) - conditional->negLogConstant(); }; - AlgebraicDecisionTree logProbabilities( - DecisionTree(eliminationResults, logProbability)); + AlgebraicDecisionTree negLogProbabilities( + DecisionTree(eliminationResults, negLogProbability)); AlgebraicDecisionTree probabilities = - probabilitiesFromLogValues(logProbabilities); + probabilitiesFromLogValues(negLogProbabilities); return std::make_shared(discreteSeparator, probabilities); } From 92b829dd55654b708509e911fb84a1720b30195b Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 23 Sep 2024 15:55:19 -0400 Subject: [PATCH 14/15] remove normalizationConstant() --- gtsam/inference/Conditional-inst.h | 10 ---------- gtsam/inference/Conditional.h | 3 --- 2 files changed, 13 deletions(-) diff --git a/gtsam/inference/Conditional-inst.h b/gtsam/inference/Conditional-inst.h index ca4d0981f..c21a75d26 100644 --- a/gtsam/inference/Conditional-inst.h +++ b/gtsam/inference/Conditional-inst.h @@ -63,12 +63,6 @@ double Conditional::negLogConstant() const { throw std::runtime_error("Conditional::negLogConstant is not implemented"); } -/* ************************************************************************* */ -template -double Conditional::normalizationConstant() const { - return std::exp(-negLogConstant()); -} - /* ************************************************************************* */ template template @@ -81,10 +75,6 @@ bool Conditional::CheckInvariants( const double logProb = conditional.logProbability(values); if (std::abs(prob_or_density - std::exp(logProb)) > 1e-9) return false; // logProb is not consistent with prob_or_density - if (std::abs(conditional.negLogConstant() - - (-std::log(conditional.normalizationConstant()))) > 1e-9) - return false; // log normalization constant is not consistent with - // normalization constant const double error = conditional.error(values); if (error < 0.0) return false; // prob_or_density is negative. const double expected = -(conditional.negLogConstant() + error); diff --git a/gtsam/inference/Conditional.h b/gtsam/inference/Conditional.h index f37a1b7a4..f9da36b7b 100644 --- a/gtsam/inference/Conditional.h +++ b/gtsam/inference/Conditional.h @@ -172,9 +172,6 @@ namespace gtsam { */ virtual double negLogConstant() const; - /** Non-virtual, negate and exponentiate negLogConstant. */ - double normalizationConstant() const; - /// @} /// @name Advanced Interface /// @{ From 9b3176e5ef8ed89f59978fa2aa06fdba682431be Mon Sep 17 00:00:00 2001 From: Varun Agrawal Date: Mon, 23 Sep 2024 15:55:27 -0400 Subject: [PATCH 15/15] better naming --- gtsam/hybrid/HybridGaussianConditional.cpp | 15 +++++++-------- gtsam/hybrid/HybridGaussianConditional.h | 4 ++-- gtsam/hybrid/HybridGaussianFactorGraph.cpp | 10 +++++----- 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/gtsam/hybrid/HybridGaussianConditional.cpp b/gtsam/hybrid/HybridGaussianConditional.cpp index a7ed9e939..ef38237f2 100644 --- a/gtsam/hybrid/HybridGaussianConditional.cpp +++ b/gtsam/hybrid/HybridGaussianConditional.cpp @@ -51,14 +51,14 @@ HybridGaussianConditional::HybridGaussianConditional( discreteParents, GetFactorValuePairs(conditionals)), BaseConditional(continuousFrontals.size()), conditionals_(conditionals) { - // Calculate logConstant_ as the minimum of the negative-log normalizers of + // Calculate negLogConstant_ as the minimum of the negative-log normalizers of // the conditionals, by visiting the decision tree: - logConstant_ = std::numeric_limits::infinity(); + negLogConstant_ = std::numeric_limits::infinity(); conditionals_.visit( [this](const GaussianConditional::shared_ptr &conditional) { if (conditional) { - this->logConstant_ = - std::min(this->logConstant_, conditional->negLogConstant()); + this->negLogConstant_ = + std::min(this->negLogConstant_, conditional->negLogConstant()); } }); } @@ -84,7 +84,7 @@ GaussianFactorGraphTree HybridGaussianConditional::asGaussianFactorGraphTree() auto wrap = [this](const GaussianConditional::shared_ptr &gc) { // First check if conditional has not been pruned if (gc) { - const double Cgm_Kgcm = gc->negLogConstant() - this->logConstant_; + const double Cgm_Kgcm = gc->negLogConstant() - this->negLogConstant_; // If there is a difference in the covariances, we need to account for // that since the error is dependent on the mode. if (Cgm_Kgcm > 0.0) { @@ -155,8 +155,7 @@ void HybridGaussianConditional::print(const std::string &s, std::cout << "(" << formatter(dk.first) << ", " << dk.second << "), "; } std::cout << std::endl - << " logNormalizationConstant: " << -negLogConstant() - << std::endl + << " logNormalizationConstant: " << -negLogConstant() << std::endl << std::endl; conditionals_.print( "", [&](Key k) { return formatter(k); }, @@ -214,7 +213,7 @@ std::shared_ptr HybridGaussianConditional::likelihood( [&](const GaussianConditional::shared_ptr &conditional) -> GaussianFactorValuePair { const auto likelihood_m = conditional->likelihood(given); - const double Cgm_Kgcm = conditional->negLogConstant() - logConstant_; + const double Cgm_Kgcm = conditional->negLogConstant() - negLogConstant_; if (Cgm_Kgcm == 0.0) { return {likelihood_m, 0.0}; } else { diff --git a/gtsam/hybrid/HybridGaussianConditional.h b/gtsam/hybrid/HybridGaussianConditional.h index d2b8982a3..9c70cf6cb 100644 --- a/gtsam/hybrid/HybridGaussianConditional.h +++ b/gtsam/hybrid/HybridGaussianConditional.h @@ -66,7 +66,7 @@ class GTSAM_EXPORT HybridGaussianConditional Conditionals conditionals_; ///< a decision tree of Gaussian conditionals. ///< Negative-log of the normalization constant (log(\sqrt(|2πΣ|))). ///< Take advantage of the neg-log space so everything is a minimization - double logConstant_; + double negLogConstant_; /** * @brief Convert a HybridGaussianConditional of conditionals into @@ -158,7 +158,7 @@ class GTSAM_EXPORT HybridGaussianConditional * * @return double */ - inline double negLogConstant() const override { return logConstant_; } + inline double negLogConstant() const override { return negLogConstant_; } /** * Create a likelihood factor for a hybrid Gaussian conditional, diff --git a/gtsam/hybrid/HybridGaussianFactorGraph.cpp b/gtsam/hybrid/HybridGaussianFactorGraph.cpp index 85a2de464..82828bb41 100644 --- a/gtsam/hybrid/HybridGaussianFactorGraph.cpp +++ b/gtsam/hybrid/HybridGaussianFactorGraph.cpp @@ -233,13 +233,13 @@ continuousElimination(const HybridGaussianFactorGraph &factors, /* ************************************************************************ */ /** - * @brief Exponentiate (not necessarily normalized) log-values, normalize, and - * then return as AlgebraicDecisionTree. + * @brief Exponentiate (not necessarily normalized) negative log-values, + * normalize, and then return as AlgebraicDecisionTree. * * @param logValues DecisionTree of (unnormalized) log values. * @return AlgebraicDecisionTree */ -static AlgebraicDecisionTree probabilitiesFromLogValues( +static AlgebraicDecisionTree probabilitiesFromNegativeLogValues( const AlgebraicDecisionTree &logValues) { // Perform normalization double min_log = logValues.min(); @@ -271,7 +271,7 @@ discreteElimination(const HybridGaussianFactorGraph &factors, DecisionTree(gmf->factors(), logProbability); AlgebraicDecisionTree probabilities = - probabilitiesFromLogValues(logProbabilities); + probabilitiesFromNegativeLogValues(logProbabilities); dfg.emplace_shared(gmf->discreteKeys(), probabilities); @@ -337,7 +337,7 @@ static std::shared_ptr createDiscreteFactor( AlgebraicDecisionTree negLogProbabilities( DecisionTree(eliminationResults, negLogProbability)); AlgebraicDecisionTree probabilities = - probabilitiesFromLogValues(negLogProbabilities); + probabilitiesFromNegativeLogValues(negLogProbabilities); return std::make_shared(discreteSeparator, probabilities); }