Merge pull request #1805 from borglab/direct-hybrid-fg

release/4.3a0
Varun Agrawal 2024-09-20 11:05:16 -04:00 committed by GitHub
commit 276747c2c8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 409 additions and 83 deletions

View File

@ -18,8 +18,6 @@
#include <gtsam/discrete/DiscreteBayesNet.h>
#include <gtsam/discrete/DiscreteConditional.h>
#include <gtsam/discrete/DiscreteFactorGraph.h>
#include <gtsam/discrete/DiscreteLookupDAG.h>
#include <gtsam/inference/FactorGraph-inst.h>
namespace gtsam {

View File

@ -222,8 +222,7 @@ std::shared_ptr<HybridGaussianFactor> HybridGaussianConditional::likelihood(
} else {
// Add a constant to the likelihood in case the noise models
// are not all equal.
double c = 2.0 * Cgm_Kgcm;
return {likelihood_m, c};
return {likelihood_m, Cgm_Kgcm};
}
});
return std::make_shared<HybridGaussianFactor>(
@ -232,8 +231,7 @@ std::shared_ptr<HybridGaussianFactor> HybridGaussianConditional::likelihood(
/* ************************************************************************* */
std::set<DiscreteKey> DiscreteKeysAsSet(const DiscreteKeys &discreteKeys) {
std::set<DiscreteKey> s;
s.insert(discreteKeys.begin(), discreteKeys.end());
std::set<DiscreteKey> s(discreteKeys.begin(), discreteKeys.end());
return s;
}

View File

@ -30,8 +30,8 @@ namespace gtsam {
/**
* @brief Helper function to augment the [A|b] matrices in the factor components
* with the normalizer values.
* This is done by storing the normalizer value in
* with the additional scalar values.
* This is done by storing the value in
* the `b` vector as an additional row.
*
* @param factors DecisionTree of GaussianFactors and arbitrary scalars.
@ -46,31 +46,34 @@ HybridGaussianFactor::Factors augment(
AlgebraicDecisionTree<Key> valueTree;
std::tie(gaussianFactors, valueTree) = unzip(factors);
// Normalize
// Compute minimum value for normalization.
double min_value = valueTree.min();
AlgebraicDecisionTree<Key> values =
valueTree.apply([&min_value](double n) { return n - min_value; });
// Finally, update the [A|b] matrices.
auto update = [&values](const Assignment<Key> &assignment,
const HybridGaussianFactor::sharedFactor &gf) {
auto update = [&min_value](const GaussianFactorValuePair &gfv) {
auto [gf, value] = gfv;
auto jf = std::dynamic_pointer_cast<JacobianFactor>(gf);
if (!jf) return gf;
// If the log_normalizer is 0, do nothing
if (values(assignment) == 0.0) return gf;
double normalized_value = value - min_value;
// If the value is 0, do nothing
if (normalized_value == 0.0) return gf;
GaussianFactorGraph gfg;
gfg.push_back(jf);
Vector c(1);
c << std::sqrt(values(assignment));
// When hiding c inside the `b` vector, value == 0.5*c^2
c << std::sqrt(2.0 * normalized_value);
auto constantFactor = std::make_shared<JacobianFactor>(c);
gfg.push_back(constantFactor);
return std::dynamic_pointer_cast<GaussianFactor>(
std::make_shared<JacobianFactor>(gfg));
};
return gaussianFactors.apply(update);
return HybridGaussianFactor::Factors(factors, update);
}
/* *******************************************************************************/

View File

@ -45,6 +45,15 @@ using GaussianFactorValuePair = std::pair<GaussianFactor::shared_ptr, double>;
* where the set of discrete variables indexes to
* the continuous gaussian distribution.
*
* In factor graphs the error function typically returns 0.5*|A*x - b|^2, i.e.,
* the negative log-likelihood for a Gaussian noise model.
* In hybrid factor graphs we allow *adding* an arbitrary scalar dependent on
* the discrete assignment.
* For example, adding a 70/30 mode probability is supported by providing the
* scalars $-log(.7)$ and $-log(.3)$.
* Note that adding a common constant will not make any difference in the
* optimization, so $-log(70)$ and $-log(30)$ work just as well.
*
* @ingroup hybrid
*/
class GTSAM_EXPORT HybridGaussianFactor : public HybridFactor {

View File

@ -45,6 +45,17 @@ using NonlinearFactorValuePair = std::pair<NonlinearFactor::shared_ptr, double>;
* This class stores all factors as HybridFactors which can then be typecast to
* one of (NonlinearFactor, GaussianFactor) which can then be checked to perform
* the correct operation.
*
* In factor graphs the error function typically returns 0.5*|h(x)-z|^2, i.e.,
* the negative log-likelihood for a Gaussian noise model.
* In hybrid factor graphs we allow *adding* an arbitrary scalar dependent on
* the discrete assignment.
* For example, adding a 70/30 mode probability is supported by providing the
* scalars $-log(.7)$ and $-log(.3)$.
* Note that adding a common constant will not make any difference in the
* optimization, so $-log(70)$ and $-log(30)$ work just as well.
*
* @ingroup hybrid
*/
class HybridNonlinearFactor : public HybridFactor {
public:
@ -134,7 +145,7 @@ class HybridNonlinearFactor : public HybridFactor {
auto errorFunc =
[continuousValues](const std::pair<sharedFactor, double>& f) {
auto [factor, val] = f;
return factor->error(continuousValues) + (0.5 * val);
return factor->error(continuousValues) + val;
};
DecisionTree<Key, double> result(factors_, errorFunc);
return result;
@ -153,7 +164,7 @@ class HybridNonlinearFactor : public HybridFactor {
auto [factor, val] = factors_(discreteValues);
// Compute the error for the selected factor
const double factorError = factor->error(continuousValues);
return factorError + (0.5 * val);
return factorError + val;
}
/**

View File

@ -36,9 +36,12 @@ HybridValues::HybridValues(const VectorValues& cv, const DiscreteValues& dv,
void HybridValues::print(const std::string& s,
const KeyFormatter& keyFormatter) const {
std::cout << s << ": \n";
continuous_.print(" Continuous",
keyFormatter); // print continuous components
discrete_.print(" Discrete", keyFormatter); // print discrete components
// print continuous components
continuous_.print(" Continuous", keyFormatter);
// print discrete components
discrete_.print(" Discrete", keyFormatter);
// print nonlinear components
nonlinear_.print(" Nonlinear", keyFormatter);
}
/* ************************************************************************* */

View File

@ -396,7 +396,7 @@ namespace test_two_state_estimation {
DiscreteKey m1(M(1), 2);
void addMeasurement(HybridBayesNet& hbn, Key z_key, Key x_key, double sigma) {
void addMeasurement(HybridBayesNet &hbn, Key z_key, Key x_key, double sigma) {
auto measurement_model = noiseModel::Isotropic::Sigma(1, sigma);
hbn.emplace_shared<GaussianConditional>(z_key, Vector1(0.0), I_1x1, x_key,
-I_1x1, measurement_model);
@ -420,7 +420,7 @@ static HybridGaussianConditional::shared_ptr CreateHybridMotionModel(
/// Create two state Bayes network with 1 or two measurement models
HybridBayesNet CreateBayesNet(
const HybridGaussianConditional::shared_ptr& hybridMotionModel,
const HybridGaussianConditional::shared_ptr &hybridMotionModel,
bool add_second_measurement = false) {
HybridBayesNet hbn;
@ -443,9 +443,9 @@ HybridBayesNet CreateBayesNet(
/// Approximate the discrete marginal P(m1) using importance sampling
std::pair<double, double> approximateDiscreteMarginal(
const HybridBayesNet& hbn,
const HybridGaussianConditional::shared_ptr& hybridMotionModel,
const VectorValues& given, size_t N = 100000) {
const HybridBayesNet &hbn,
const HybridGaussianConditional::shared_ptr &hybridMotionModel,
const VectorValues &given, size_t N = 100000) {
/// Create importance sampling network q(x0,x1,m) = p(x1|x0,m1) q(x0) P(m1),
/// using q(x0) = N(z0, sigmaQ) to sample x0.
HybridBayesNet q;
@ -741,6 +741,171 @@ TEST(HybridGaussianFactor, TwoStateModel4) {
EXPECT(assert_equal(expected, *(bn->at(2)->asDiscrete()), 0.002));
}
namespace test_direct_factor_graph {
/**
* @brief Create a Factor Graph by directly specifying all
* the factors instead of creating conditionals first.
* This way we can directly provide the likelihoods and
* then perform linearization.
*
* @param values Initial values to linearize around.
* @param means The means of the HybridGaussianFactor components.
* @param sigmas The covariances of the HybridGaussianFactor components.
* @param m1 The discrete key.
* @return HybridGaussianFactorGraph
*/
static HybridGaussianFactorGraph CreateFactorGraph(
const gtsam::Values &values, const std::vector<double> &means,
const std::vector<double> &sigmas, DiscreteKey &m1,
double measurement_noise = 1e-3) {
auto model0 = noiseModel::Isotropic::Sigma(1, sigmas[0]);
auto model1 = noiseModel::Isotropic::Sigma(1, sigmas[1]);
auto prior_noise = noiseModel::Isotropic::Sigma(1, measurement_noise);
auto f0 =
std::make_shared<BetweenFactor<double>>(X(0), X(1), means[0], model0)
->linearize(values);
auto f1 =
std::make_shared<BetweenFactor<double>>(X(0), X(1), means[1], model1)
->linearize(values);
// Create HybridGaussianFactor
// We take negative since we want
// the underlying scalar to be log(\sqrt(|2πΣ|))
std::vector<GaussianFactorValuePair> factors{
{f0, -model0->logNormalizationConstant()},
{f1, -model1->logNormalizationConstant()}};
HybridGaussianFactor motionFactor({X(0), X(1)}, m1, factors);
HybridGaussianFactorGraph hfg;
hfg.push_back(motionFactor);
hfg.push_back(PriorFactor<double>(X(0), values.at<double>(X(0)), prior_noise)
.linearize(values));
return hfg;
}
} // namespace test_direct_factor_graph
/* ************************************************************************* */
/**
* @brief Test components with differing means but the same covariances.
* The factor graph is
* *-X1-*-X2
* |
* M1
*/
TEST(HybridGaussianFactor, DifferentMeansFG) {
using namespace test_direct_factor_graph;
DiscreteKey m1(M(1), 2);
Values values;
double x1 = 0.0, x2 = 1.75;
values.insert(X(0), x1);
values.insert(X(1), x2);
std::vector<double> means = {0.0, 2.0}, sigmas = {1e-0, 1e-0};
HybridGaussianFactorGraph hfg = CreateFactorGraph(values, means, sigmas, m1);
{
auto bn = hfg.eliminateSequential();
HybridValues actual = bn->optimize();
HybridValues expected(
VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(-1.75)}},
DiscreteValues{{M(1), 0}});
EXPECT(assert_equal(expected, actual));
DiscreteValues dv0{{M(1), 0}};
VectorValues cont0 = bn->optimize(dv0);
double error0 = bn->error(HybridValues(cont0, dv0));
// regression
EXPECT_DOUBLES_EQUAL(0.69314718056, error0, 1e-9);
DiscreteValues dv1{{M(1), 1}};
VectorValues cont1 = bn->optimize(dv1);
double error1 = bn->error(HybridValues(cont1, dv1));
EXPECT_DOUBLES_EQUAL(error0, error1, 1e-9);
}
{
auto prior_noise = noiseModel::Isotropic::Sigma(1, 1e-3);
hfg.push_back(
PriorFactor<double>(X(1), means[1], prior_noise).linearize(values));
auto bn = hfg.eliminateSequential();
HybridValues actual = bn->optimize();
HybridValues expected(
VectorValues{{X(0), Vector1(0.0)}, {X(1), Vector1(0.25)}},
DiscreteValues{{M(1), 1}});
EXPECT(assert_equal(expected, actual));
{
DiscreteValues dv{{M(1), 0}};
VectorValues cont = bn->optimize(dv);
double error = bn->error(HybridValues(cont, dv));
// regression
EXPECT_DOUBLES_EQUAL(2.12692448787, error, 1e-9);
}
{
DiscreteValues dv{{M(1), 1}};
VectorValues cont = bn->optimize(dv);
double error = bn->error(HybridValues(cont, dv));
// regression
EXPECT_DOUBLES_EQUAL(0.126928487854, error, 1e-9);
}
}
}
/* ************************************************************************* */
/**
* @brief Test components with differing covariances but the same means.
* The factor graph is
* *-X1-*-X2
* |
* M1
*/
TEST(HybridGaussianFactor, DifferentCovariancesFG) {
using namespace test_direct_factor_graph;
DiscreteKey m1(M(1), 2);
Values values;
double x1 = 1.0, x2 = 1.0;
values.insert(X(0), x1);
values.insert(X(1), x2);
std::vector<double> means = {0.0, 0.0}, sigmas = {1e2, 1e-2};
// Create FG with HybridGaussianFactor and prior on X1
HybridGaussianFactorGraph fg = CreateFactorGraph(values, means, sigmas, m1);
auto hbn = fg.eliminateSequential();
VectorValues cv;
cv.insert(X(0), Vector1(0.0));
cv.insert(X(1), Vector1(0.0));
// Check that the error values at the MLE point μ.
AlgebraicDecisionTree<Key> errorTree = hbn->errorTree(cv);
DiscreteValues dv0{{M(1), 0}};
DiscreteValues dv1{{M(1), 1}};
// regression
EXPECT_DOUBLES_EQUAL(9.90348755254, errorTree(dv0), 1e-9);
EXPECT_DOUBLES_EQUAL(0.69314718056, errorTree(dv1), 1e-9);
DiscreteConditional expected_m1(m1, "0.5/0.5");
DiscreteConditional actual_m1 = *(hbn->at(2)->asDiscrete());
EXPECT(assert_equal(expected_m1, actual_m1));
}
/* ************************************************************************* */
int main() {
TestResult tr;

View File

@ -857,10 +857,10 @@ namespace test_relinearization {
*/
static HybridNonlinearFactorGraph CreateFactorGraph(
const std::vector<double> &means, const std::vector<double> &sigmas,
DiscreteKey &m1, double x0_measurement) {
DiscreteKey &m1, double x0_measurement, double measurement_noise = 1e-3) {
auto model0 = noiseModel::Isotropic::Sigma(1, sigmas[0]);
auto model1 = noiseModel::Isotropic::Sigma(1, sigmas[1]);
auto prior_noise = noiseModel::Isotropic::Sigma(1, 1e-3);
auto prior_noise = noiseModel::Isotropic::Sigma(1, measurement_noise);
auto f0 =
std::make_shared<BetweenFactor<double>>(X(0), X(1), means[0], model0);
@ -868,10 +868,13 @@ static HybridNonlinearFactorGraph CreateFactorGraph(
std::make_shared<BetweenFactor<double>>(X(0), X(1), means[1], model1);
// Create HybridNonlinearFactor
// We take negative since we want
// the underlying scalar to be log(\sqrt(|2πΣ|))
std::vector<NonlinearFactorValuePair> factors{
{f0, ComputeLogNormalizer(model0)}, {f1, ComputeLogNormalizer(model1)}};
{f0, -model0->logNormalizationConstant()},
{f1, -model1->logNormalizationConstant()}};
HybridNonlinearFactor mixtureFactor({X(0), X(1)}, {m1}, factors);
HybridNonlinearFactor mixtureFactor({X(0), X(1)}, m1, factors);
HybridNonlinearFactorGraph hfg;
hfg.push_back(mixtureFactor);
@ -968,7 +971,7 @@ TEST(HybridNonlinearFactorGraph, DifferentMeans) {
* |
* M1
*/
TEST_DISABLED(HybridNonlinearFactorGraph, DifferentCovariances) {
TEST(HybridNonlinearFactorGraph, DifferentCovariances) {
using namespace test_relinearization;
DiscreteKey m1(M(1), 2);
@ -982,8 +985,10 @@ TEST_DISABLED(HybridNonlinearFactorGraph, DifferentCovariances) {
// Create FG with HybridNonlinearFactor and prior on X1
HybridNonlinearFactorGraph hfg = CreateFactorGraph(means, sigmas, m1, x0);
// Linearize and eliminate
auto hbn = hfg.linearize(values)->eliminateSequential();
// Linearize
auto hgfg = hfg.linearize(values);
// and eliminate
auto hbn = hgfg->eliminateSequential();
VectorValues cv;
cv.insert(X(0), Vector1(0.0));
@ -1005,6 +1010,52 @@ TEST_DISABLED(HybridNonlinearFactorGraph, DifferentCovariances) {
EXPECT(assert_equal(expected_m1, actual_m1));
}
TEST(HybridNonlinearFactorGraph, Relinearization) {
using namespace test_relinearization;
DiscreteKey m1(M(1), 2);
Values values;
double x0 = 0.0, x1 = 0.8;
values.insert(X(0), x0);
values.insert(X(1), x1);
std::vector<double> means = {0.0, 1.0}, sigmas = {1e-2, 1e-2};
double prior_sigma = 1e-2;
// Create FG with HybridNonlinearFactor and prior on X1
HybridNonlinearFactorGraph hfg =
CreateFactorGraph(means, sigmas, m1, 0.0, prior_sigma);
hfg.push_back(PriorFactor<double>(
X(1), 1.2, noiseModel::Isotropic::Sigma(1, prior_sigma)));
// Linearize
auto hgfg = hfg.linearize(values);
// and eliminate
auto hbn = hgfg->eliminateSequential();
HybridValues delta = hbn->optimize();
values = values.retract(delta.continuous());
Values expected_first_result;
expected_first_result.insert(X(0), 0.0666666666667);
expected_first_result.insert(X(1), 1.13333333333);
EXPECT(assert_equal(expected_first_result, values));
// Re-linearize
hgfg = hfg.linearize(values);
// and eliminate
hbn = hgfg->eliminateSequential();
delta = hbn->optimize();
HybridValues result(delta.continuous(), delta.discrete(),
values.retract(delta.continuous()));
HybridValues expected_result(
VectorValues{{X(0), Vector1(0)}, {X(1), Vector1(0)}},
DiscreteValues{{M(1), 1}}, expected_first_result);
EXPECT(assert_equal(expected_result, result));
}
/* *************************************************************************
*/
int main() {

View File

@ -238,6 +238,35 @@ void Gaussian::WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const
Matrix Gaussian::information() const { return R().transpose() * R(); }
/* *******************************************************************************/
double Gaussian::logDetR() const {
double logDetR =
R().diagonal().unaryExpr([](double x) { return log(x); }).sum();
return logDetR;
}
/* *******************************************************************************/
double Gaussian::logDeterminant() const {
// Since noise models are Gaussian, we can get the logDeterminant easily
// Sigma = (R'R)^{-1}, det(Sigma) = det((R'R)^{-1}) = det(R'R)^{-1}
// log det(Sigma) = -log(det(R'R)) = -2*log(det(R))
// Hence, log det(Sigma)) = -2.0 * logDetR()
return -2.0 * logDetR();
}
/* *******************************************************************************/
double Gaussian::logNormalizationConstant() const {
// log(det(Sigma)) = -2.0 * logDetR
// which gives log = -0.5*n*log(2*pi) - 0.5*(-2.0 * logDetR())
// = -0.5*n*log(2*pi) + (0.5*2.0 * logDetR())
// = -0.5*n*log(2*pi) + logDetR()
size_t n = dim();
constexpr double log2pi = 1.8378770664093454835606594728112;
// Get 1/log(\sqrt(|2pi Sigma|)) = -0.5*log(|2pi Sigma|)
return -0.5 * n * log2pi + logDetR();
}
/* ************************************************************************* */
// Diagonal
/* ************************************************************************* */
@ -314,6 +343,11 @@ void Diagonal::WhitenInPlace(Eigen::Block<Matrix> H) const {
H = invsigmas().asDiagonal() * H;
}
/* *******************************************************************************/
double Diagonal::logDetR() const {
return invsigmas_.unaryExpr([](double x) { return log(x); }).sum();
}
/* ************************************************************************* */
// Constrained
/* ************************************************************************* */
@ -642,6 +676,9 @@ void Isotropic::WhitenInPlace(Eigen::Block<Matrix> H) const {
H *= invsigma_;
}
/* *******************************************************************************/
double Isotropic::logDetR() const { return log(invsigma_) * dim(); }
/* ************************************************************************* */
// Unit
/* ************************************************************************* */
@ -654,6 +691,9 @@ double Unit::squaredMahalanobisDistance(const Vector& v) const {
return v.dot(v);
}
/* *******************************************************************************/
double Unit::logDetR() const { return 0.0; }
/* ************************************************************************* */
// Robust
/* ************************************************************************* */
@ -708,24 +748,4 @@ const RobustModel::shared_ptr &robust, const NoiseModel::shared_ptr noise){
/* ************************************************************************* */
} // namespace noiseModel
/* *******************************************************************************/
double ComputeLogNormalizer(
const noiseModel::Gaussian::shared_ptr& noise_model) {
// Since noise models are Gaussian, we can get the logDeterminant using
// the same trick as in GaussianConditional
// Sigma = (R'R)^{-1}, det(Sigma) = det((R'R)^{-1}) = det(R'R)^{-1}
// log det(Sigma) = -log(det(R'R)) = -2*log(det(R))
// Hence, log det(Sigma)) = -2.0 * logDetR()
double logDetR = noise_model->R()
.diagonal()
.unaryExpr([](double x) { return log(x); })
.sum();
double logDeterminantSigma = -2.0 * logDetR;
size_t n = noise_model->dim();
constexpr double log2pi = 1.8378770664093454835606594728112;
return n * log2pi + logDeterminantSigma;
}
} // gtsam

View File

@ -183,6 +183,8 @@ namespace gtsam {
return *sqrt_information_;
}
/// Compute the log of |R|. Used for computing log(|Σ|)
virtual double logDetR() const;
public:
@ -266,7 +268,20 @@ namespace gtsam {
/// Compute covariance matrix
virtual Matrix covariance() const;
private:
/// Compute the log of |Σ|
double logDeterminant() const;
/**
* @brief Method to compute the normalization constant
* for a Gaussian noise model k = \sqrt(1/|2πΣ|).
* We compute this in the log-space for numerical accuracy,
* thus returning log(k).
*
* @return double
*/
double logNormalizationConstant() const;
private:
#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION
/** Serialization function */
friend class boost::serialization::access;
@ -295,11 +310,12 @@ namespace gtsam {
*/
Vector sigmas_, invsigmas_, precisions_;
protected:
/** constructor to allow for disabling initialization of invsigmas */
Diagonal(const Vector& sigmas);
/// Compute the log of |R|. Used for computing log(|Σ|)
virtual double logDetR() const override;
public:
/** constructor - no initializations, for serialization */
Diagonal();
@ -532,6 +548,9 @@ namespace gtsam {
Isotropic(size_t dim, double sigma) :
Diagonal(Vector::Constant(dim, sigma)),sigma_(sigma),invsigma_(1.0/sigma) {}
/// Compute the log of |R|. Used for computing log(|Σ|)
virtual double logDetR() const override;
public:
/* dummy constructor to allow for serialization */
@ -595,6 +614,10 @@ namespace gtsam {
* Unit: i.i.d. unit-variance noise on all m dimensions.
*/
class GTSAM_EXPORT Unit : public Isotropic {
protected:
/// Compute the log of |R|. Used for computing log(|Σ|)
virtual double logDetR() const override;
public:
typedef std::shared_ptr<Unit> shared_ptr;
@ -751,18 +774,6 @@ namespace gtsam {
template<> struct traits<noiseModel::Isotropic> : public Testable<noiseModel::Isotropic> {};
template<> struct traits<noiseModel::Unit> : public Testable<noiseModel::Unit> {};
/**
* @brief Helper function to compute the log(|2πΣ|) normalizer values
* for a Gaussian noise model.
* We compute this in the log-space for numerical accuracy.
*
* @param noise_model The Gaussian noise model
* whose normalizer we wish to compute.
* @return double
*/
GTSAM_EXPORT double ComputeLogNormalizer(
const noiseModel::Gaussian::shared_ptr& noise_model);
} //\ namespace gtsam

View File

@ -807,24 +807,81 @@ TEST(NoiseModel, NonDiagonalGaussian)
}
}
TEST(NoiseModel, ComputeLogNormalizer) {
TEST(NoiseModel, LogNormalizationConstant1D) {
// Very simple 1D noise model, which we can compute by hand.
double sigma = 0.1;
auto noise_model = Isotropic::Sigma(1, sigma);
double actual_value = ComputeLogNormalizer(noise_model);
// Compute log(|2πΣ|) by hand.
// = log(2π) + log(Σ) (since it is 1D)
constexpr double log2pi = 1.8378770664093454835606594728112;
double expected_value = log2pi + log(sigma * sigma);
EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9);
// For expected values, we compute 1/log(sqrt(|2πΣ|)) by hand.
// = -0.5*(log(2π) + log(Σ)) (since it is 1D)
double expected_value = -0.5 * log(2 * M_PI * sigma * sigma);
// Similar situation in the 3D case
// Gaussian
{
Matrix11 R;
R << 1 / sigma;
auto noise_model = Gaussian::SqrtInformation(R);
double actual_value = noise_model->logNormalizationConstant();
EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9);
}
// Diagonal
{
auto noise_model = Diagonal::Sigmas(Vector1(sigma));
double actual_value = noise_model->logNormalizationConstant();
EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9);
}
// Isotropic
{
auto noise_model = Isotropic::Sigma(1, sigma);
double actual_value = noise_model->logNormalizationConstant();
EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9);
}
// Unit
{
auto noise_model = Unit::Create(1);
double actual_value = noise_model->logNormalizationConstant();
double sigma = 1.0;
expected_value = -0.5 * log(2 * M_PI * sigma * sigma);
EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9);
}
}
TEST(NoiseModel, LogNormalizationConstant3D) {
// Simple 3D noise model, which we can compute by hand.
double sigma = 0.1;
size_t n = 3;
auto noise_model2 = Isotropic::Sigma(n, sigma);
double actual_value2 = ComputeLogNormalizer(noise_model2);
// We multiply by 3 due to the determinant
double expected_value2 = n * (log2pi + log(sigma * sigma));
EXPECT_DOUBLES_EQUAL(expected_value2, actual_value2, 1e-9);
// We compute the expected values just like in the LogNormalizationConstant1D
// test, but we multiply by 3 due to the determinant.
double expected_value = -0.5 * n * log(2 * M_PI * sigma * sigma);
// Gaussian
{
Matrix33 R;
R << 1 / sigma, 2, 3, //
0, 1 / sigma, 4, //
0, 0, 1 / sigma;
auto noise_model = Gaussian::SqrtInformation(R);
double actual_value = noise_model->logNormalizationConstant();
EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9);
}
// Diagonal
{
auto noise_model = Diagonal::Sigmas(Vector3(sigma, sigma, sigma));
double actual_value = noise_model->logNormalizationConstant();
EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9);
}
// Isotropic
{
auto noise_model = Isotropic::Sigma(n, sigma);
double actual_value = noise_model->logNormalizationConstant();
EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9);
}
// Unit
{
auto noise_model = Unit::Create(3);
double actual_value = noise_model->logNormalizationConstant();
double sigma = 1.0;
expected_value = -0.5 * n * log(2 * M_PI * sigma * sigma);
EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9);
}
}
/* ************************************************************************* */