Merge pull request #1839 from borglab/improved-api-3
commit
fd7df61d45
|
@ -465,6 +465,12 @@ string DiscreteConditional::html(const KeyFormatter& keyFormatter,
|
|||
double DiscreteConditional::evaluate(const HybridValues& x) const {
|
||||
return this->evaluate(x.discrete());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double DiscreteConditional::negLogConstant() const {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -264,11 +264,12 @@ class GTSAM_EXPORT DiscreteConditional
|
|||
}
|
||||
|
||||
/**
|
||||
* logNormalizationConstant K is just zero, such that
|
||||
* logProbability(x) = log(evaluate(x)) = - error(x)
|
||||
* and hence error(x) = - log(evaluate(x)) > 0 for all x.
|
||||
* negLogConstant is just zero, such that
|
||||
* -logProbability(x) = -log(evaluate(x)) = error(x)
|
||||
* and hence error(x) > 0 for all x.
|
||||
* Thus -log(K) for the normalization constant k is 0.
|
||||
*/
|
||||
double logNormalizationConstant() const override { return 0.0; }
|
||||
double negLogConstant() const override;
|
||||
|
||||
/// @}
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ virtual class DiscreteConditional : gtsam::DecisionTreeFactor {
|
|||
const std::vector<double>& table);
|
||||
|
||||
// Standard interface
|
||||
double logNormalizationConstant() const;
|
||||
double negLogConstant() const;
|
||||
double logProbability(const gtsam::DiscreteValues& values) const;
|
||||
double evaluate(const gtsam::DiscreteValues& values) const;
|
||||
double error(const gtsam::DiscreteValues& values) const;
|
||||
|
|
|
@ -161,18 +161,18 @@ double HybridConditional::logProbability(const HybridValues &values) const {
|
|||
}
|
||||
|
||||
/* ************************************************************************ */
|
||||
double HybridConditional::logNormalizationConstant() const {
|
||||
double HybridConditional::negLogConstant() const {
|
||||
if (auto gc = asGaussian()) {
|
||||
return gc->logNormalizationConstant();
|
||||
return gc->negLogConstant();
|
||||
}
|
||||
if (auto gm = asHybrid()) {
|
||||
return gm->logNormalizationConstant(); // 0.0!
|
||||
return gm->negLogConstant(); // 0.0!
|
||||
}
|
||||
if (auto dc = asDiscrete()) {
|
||||
return dc->logNormalizationConstant(); // 0.0!
|
||||
return dc->negLogConstant(); // 0.0!
|
||||
}
|
||||
throw std::runtime_error(
|
||||
"HybridConditional::logProbability: conditional type not handled");
|
||||
"HybridConditional::negLogConstant: conditional type not handled");
|
||||
}
|
||||
|
||||
/* ************************************************************************ */
|
||||
|
|
|
@ -193,11 +193,12 @@ class GTSAM_EXPORT HybridConditional
|
|||
double logProbability(const HybridValues& values) const override;
|
||||
|
||||
/**
|
||||
* Return the log normalization constant.
|
||||
* Return the negative log of the normalization constant.
|
||||
* This shows up in the error as -(error(x) + negLogConstant)
|
||||
* Note this is 0.0 for discrete and hybrid conditionals, but depends
|
||||
* on the continuous parameters for Gaussian conditionals.
|
||||
*/
|
||||
double logNormalizationConstant() const override;
|
||||
double negLogConstant() const override;
|
||||
|
||||
/// Return the probability (or density) of the underlying conditional.
|
||||
double evaluate(const HybridValues& values) const override;
|
||||
|
|
|
@ -36,7 +36,7 @@ HybridGaussianFactor::FactorValuePairs GetFactorValuePairs(
|
|||
// Check if conditional is pruned
|
||||
if (conditional) {
|
||||
// Assign log(\sqrt(|2πΣ|)) = -log(1 / sqrt(|2πΣ|))
|
||||
value = -conditional->logNormalizationConstant();
|
||||
value = conditional->negLogConstant();
|
||||
}
|
||||
return {std::dynamic_pointer_cast<GaussianFactor>(conditional), value};
|
||||
};
|
||||
|
@ -51,14 +51,14 @@ HybridGaussianConditional::HybridGaussianConditional(
|
|||
discreteParents, GetFactorValuePairs(conditionals)),
|
||||
BaseConditional(continuousFrontals.size()),
|
||||
conditionals_(conditionals) {
|
||||
// Calculate logConstant_ as the minimum of the log normalizers of the
|
||||
// conditionals, by visiting the decision tree:
|
||||
logConstant_ = std::numeric_limits<double>::infinity();
|
||||
// Calculate negLogConstant_ as the minimum of the negative-log normalizers of
|
||||
// the conditionals, by visiting the decision tree:
|
||||
negLogConstant_ = std::numeric_limits<double>::infinity();
|
||||
conditionals_.visit(
|
||||
[this](const GaussianConditional::shared_ptr &conditional) {
|
||||
if (conditional) {
|
||||
this->logConstant_ = std::min(
|
||||
this->logConstant_, -conditional->logNormalizationConstant());
|
||||
this->negLogConstant_ =
|
||||
std::min(this->negLogConstant_, conditional->negLogConstant());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -84,8 +84,7 @@ GaussianFactorGraphTree HybridGaussianConditional::asGaussianFactorGraphTree()
|
|||
auto wrap = [this](const GaussianConditional::shared_ptr &gc) {
|
||||
// First check if conditional has not been pruned
|
||||
if (gc) {
|
||||
const double Cgm_Kgcm =
|
||||
-this->logConstant_ - gc->logNormalizationConstant();
|
||||
const double Cgm_Kgcm = gc->negLogConstant() - this->negLogConstant_;
|
||||
// If there is a difference in the covariances, we need to account for
|
||||
// that since the error is dependent on the mode.
|
||||
if (Cgm_Kgcm > 0.0) {
|
||||
|
@ -156,8 +155,7 @@ void HybridGaussianConditional::print(const std::string &s,
|
|||
std::cout << "(" << formatter(dk.first) << ", " << dk.second << "), ";
|
||||
}
|
||||
std::cout << std::endl
|
||||
<< " logNormalizationConstant: " << logNormalizationConstant()
|
||||
<< std::endl
|
||||
<< " logNormalizationConstant: " << -negLogConstant() << std::endl
|
||||
<< std::endl;
|
||||
conditionals_.print(
|
||||
"", [&](Key k) { return formatter(k); },
|
||||
|
@ -215,8 +213,7 @@ std::shared_ptr<HybridGaussianFactor> HybridGaussianConditional::likelihood(
|
|||
[&](const GaussianConditional::shared_ptr &conditional)
|
||||
-> GaussianFactorValuePair {
|
||||
const auto likelihood_m = conditional->likelihood(given);
|
||||
const double Cgm_Kgcm =
|
||||
-logConstant_ - conditional->logNormalizationConstant();
|
||||
const double Cgm_Kgcm = conditional->negLogConstant() - negLogConstant_;
|
||||
if (Cgm_Kgcm == 0.0) {
|
||||
return {likelihood_m, 0.0};
|
||||
} else {
|
||||
|
|
|
@ -66,7 +66,7 @@ class GTSAM_EXPORT HybridGaussianConditional
|
|||
Conditionals conditionals_; ///< a decision tree of Gaussian conditionals.
|
||||
///< Negative-log of the normalization constant (log(\sqrt(|2πΣ|))).
|
||||
///< Take advantage of the neg-log space so everything is a minimization
|
||||
double logConstant_;
|
||||
double negLogConstant_;
|
||||
|
||||
/**
|
||||
* @brief Convert a HybridGaussianConditional of conditionals into
|
||||
|
@ -150,9 +150,15 @@ class GTSAM_EXPORT HybridGaussianConditional
|
|||
/// Returns the continuous keys among the parents.
|
||||
KeyVector continuousParents() const;
|
||||
|
||||
/// The log normalization constant is max of the the individual
|
||||
/// log-normalization constants.
|
||||
double logNormalizationConstant() const override { return -logConstant_; }
|
||||
/**
|
||||
* @brief Return log normalization constant in negative log space.
|
||||
*
|
||||
* The log normalization constant is the min of the individual
|
||||
* log-normalization constants.
|
||||
*
|
||||
* @return double
|
||||
*/
|
||||
inline double negLogConstant() const override { return negLogConstant_; }
|
||||
|
||||
/**
|
||||
* Create a likelihood factor for a hybrid Gaussian conditional,
|
||||
|
|
|
@ -233,18 +233,18 @@ continuousElimination(const HybridGaussianFactorGraph &factors,
|
|||
|
||||
/* ************************************************************************ */
|
||||
/**
|
||||
* @brief Exponentiate log-values, not necessarily normalized, normalize, and
|
||||
* return as AlgebraicDecisionTree<Key>.
|
||||
* @brief Exponentiate (not necessarily normalized) negative log-values,
|
||||
* normalize, and then return as AlgebraicDecisionTree<Key>.
|
||||
*
|
||||
* @param logValues DecisionTree of (unnormalized) log values.
|
||||
* @return AlgebraicDecisionTree<Key>
|
||||
*/
|
||||
static AlgebraicDecisionTree<Key> probabilitiesFromLogValues(
|
||||
static AlgebraicDecisionTree<Key> probabilitiesFromNegativeLogValues(
|
||||
const AlgebraicDecisionTree<Key> &logValues) {
|
||||
// Perform normalization
|
||||
double max_log = logValues.max();
|
||||
double min_log = logValues.min();
|
||||
AlgebraicDecisionTree<Key> probabilities = DecisionTree<Key, double>(
|
||||
logValues, [&max_log](const double x) { return exp(x - max_log); });
|
||||
logValues, [&min_log](const double x) { return exp(-(x - min_log)); });
|
||||
probabilities = probabilities.normalize(probabilities.sum());
|
||||
|
||||
return probabilities;
|
||||
|
@ -265,13 +265,13 @@ discreteElimination(const HybridGaussianFactorGraph &factors,
|
|||
auto logProbability =
|
||||
[&](const GaussianFactor::shared_ptr &factor) -> double {
|
||||
if (!factor) return 0.0;
|
||||
return -factor->error(VectorValues());
|
||||
return factor->error(VectorValues());
|
||||
};
|
||||
AlgebraicDecisionTree<Key> logProbabilities =
|
||||
DecisionTree<Key, double>(gmf->factors(), logProbability);
|
||||
|
||||
AlgebraicDecisionTree<Key> probabilities =
|
||||
probabilitiesFromLogValues(logProbabilities);
|
||||
probabilitiesFromNegativeLogValues(logProbabilities);
|
||||
dfg.emplace_shared<DecisionTreeFactor>(gmf->discreteKeys(),
|
||||
probabilities);
|
||||
|
||||
|
@ -321,23 +321,23 @@ using Result = std::pair<std::shared_ptr<GaussianConditional>,
|
|||
static std::shared_ptr<Factor> createDiscreteFactor(
|
||||
const DecisionTree<Key, Result> &eliminationResults,
|
||||
const DiscreteKeys &discreteSeparator) {
|
||||
auto logProbability = [&](const Result &pair) -> double {
|
||||
auto negLogProbability = [&](const Result &pair) -> double {
|
||||
const auto &[conditional, factor] = pair;
|
||||
static const VectorValues kEmpty;
|
||||
// If the factor is not null, it has no keys, just contains the residual.
|
||||
if (!factor) return 1.0; // TODO(dellaert): not loving this.
|
||||
|
||||
// Logspace version of:
|
||||
// Negative logspace version of:
|
||||
// exp(-factor->error(kEmpty)) / conditional->normalizationConstant();
|
||||
// We take negative of the logNormalizationConstant `log(k)`
|
||||
// to get `log(1/k) = log(\sqrt{|2πΣ|})`.
|
||||
return -factor->error(kEmpty) - conditional->logNormalizationConstant();
|
||||
// negLogConstant gives `-log(k)`
|
||||
// which is `-log(k) = log(1/k) = log(\sqrt{|2πΣ|})`.
|
||||
return factor->error(kEmpty) - conditional->negLogConstant();
|
||||
};
|
||||
|
||||
AlgebraicDecisionTree<Key> logProbabilities(
|
||||
DecisionTree<Key, double>(eliminationResults, logProbability));
|
||||
AlgebraicDecisionTree<Key> negLogProbabilities(
|
||||
DecisionTree<Key, double>(eliminationResults, negLogProbability));
|
||||
AlgebraicDecisionTree<Key> probabilities =
|
||||
probabilitiesFromLogValues(logProbabilities);
|
||||
probabilitiesFromNegativeLogValues(negLogProbabilities);
|
||||
|
||||
return std::make_shared<DecisionTreeFactor>(discreteSeparator, probabilities);
|
||||
}
|
||||
|
@ -355,8 +355,9 @@ static std::shared_ptr<Factor> createHybridGaussianFactor(
|
|||
auto hf = std::dynamic_pointer_cast<HessianFactor>(factor);
|
||||
if (!hf) throw std::runtime_error("Expected HessianFactor!");
|
||||
// Add 2.0 term since the constant term will be premultiplied by 0.5
|
||||
// as per the Hessian definition
|
||||
hf->constantTerm() += 2.0 * conditional->logNormalizationConstant();
|
||||
// as per the Hessian definition,
|
||||
// and negative since we want log(k)
|
||||
hf->constantTerm() += -2.0 * conditional->negLogConstant();
|
||||
}
|
||||
return {factor, 0.0};
|
||||
};
|
||||
|
|
|
@ -61,7 +61,7 @@ virtual class HybridConditional {
|
|||
size_t nrParents() const;
|
||||
|
||||
// Standard interface:
|
||||
double logNormalizationConstant() const;
|
||||
double negLogConstant() const;
|
||||
double logProbability(const gtsam::HybridValues& values) const;
|
||||
double evaluate(const gtsam::HybridValues& values) const;
|
||||
double operator()(const gtsam::HybridValues& values) const;
|
||||
|
|
|
@ -61,11 +61,11 @@ const HybridGaussianConditional hybrid_conditional({Z(0)}, {X(0)}, mode,
|
|||
TEST(HybridGaussianConditional, Invariants) {
|
||||
using namespace equal_constants;
|
||||
|
||||
// Check that the conditional normalization constant is the max of all
|
||||
// constants which are all equal, in this case, hence:
|
||||
const double K = hybrid_conditional.logNormalizationConstant();
|
||||
EXPECT_DOUBLES_EQUAL(K, conditionals[0]->logNormalizationConstant(), 1e-8);
|
||||
EXPECT_DOUBLES_EQUAL(K, conditionals[1]->logNormalizationConstant(), 1e-8);
|
||||
// Check that the conditional (negative log) normalization constant is the min
|
||||
// of all constants which are all equal, in this case, hence:
|
||||
const double K = hybrid_conditional.negLogConstant();
|
||||
EXPECT_DOUBLES_EQUAL(K, conditionals[0]->negLogConstant(), 1e-8);
|
||||
EXPECT_DOUBLES_EQUAL(K, conditionals[1]->negLogConstant(), 1e-8);
|
||||
|
||||
EXPECT(HybridGaussianConditional::CheckInvariants(hybrid_conditional, hv0));
|
||||
EXPECT(HybridGaussianConditional::CheckInvariants(hybrid_conditional, hv1));
|
||||
|
@ -180,15 +180,16 @@ TEST(HybridGaussianConditional, Error2) {
|
|||
|
||||
// Check result.
|
||||
DiscreteKeys discrete_keys{mode};
|
||||
double logNormalizer0 = -conditionals[0]->logNormalizationConstant();
|
||||
double logNormalizer1 = -conditionals[1]->logNormalizationConstant();
|
||||
double minLogNormalizer = std::min(logNormalizer0, logNormalizer1);
|
||||
double negLogConstant0 = conditionals[0]->negLogConstant();
|
||||
double negLogConstant1 = conditionals[1]->negLogConstant();
|
||||
double minErrorConstant = std::min(negLogConstant0, negLogConstant1);
|
||||
|
||||
// Expected error is e(X) + log(|2πΣ|).
|
||||
// We normalize log(|2πΣ|) with min(logNormalizers) so it is non-negative.
|
||||
// Expected error is e(X) + log(sqrt(|2πΣ|)).
|
||||
// We normalize log(sqrt(|2πΣ|)) with min(negLogConstant)
|
||||
// so it is non-negative.
|
||||
std::vector<double> leaves = {
|
||||
conditionals[0]->error(vv) + logNormalizer0 - minLogNormalizer,
|
||||
conditionals[1]->error(vv) + logNormalizer1 - minLogNormalizer};
|
||||
conditionals[0]->error(vv) + negLogConstant0 - minErrorConstant,
|
||||
conditionals[1]->error(vv) + negLogConstant1 - minErrorConstant};
|
||||
AlgebraicDecisionTree<Key> expected(discrete_keys, leaves);
|
||||
|
||||
EXPECT(assert_equal(expected, actual, 1e-6));
|
||||
|
@ -196,9 +197,9 @@ TEST(HybridGaussianConditional, Error2) {
|
|||
// Check for non-tree version.
|
||||
for (size_t mode : {0, 1}) {
|
||||
const HybridValues hv{vv, {{M(0), mode}}};
|
||||
EXPECT_DOUBLES_EQUAL(conditionals[mode]->error(vv) -
|
||||
conditionals[mode]->logNormalizationConstant() -
|
||||
minLogNormalizer,
|
||||
EXPECT_DOUBLES_EQUAL(conditionals[mode]->error(vv) +
|
||||
conditionals[mode]->negLogConstant() -
|
||||
minErrorConstant,
|
||||
hybrid_conditional.error(hv), 1e-8);
|
||||
}
|
||||
}
|
||||
|
@ -230,8 +231,8 @@ TEST(HybridGaussianConditional, Likelihood2) {
|
|||
CHECK(jf1->rows() == 2);
|
||||
|
||||
// Check that the constant C1 is properly encoded in the JacobianFactor.
|
||||
const double C1 = hybrid_conditional.logNormalizationConstant() -
|
||||
conditionals[1]->logNormalizationConstant();
|
||||
const double C1 =
|
||||
conditionals[1]->negLogConstant() - hybrid_conditional.negLogConstant();
|
||||
const double c1 = std::sqrt(2.0 * C1);
|
||||
Vector expected_unwhitened(2);
|
||||
expected_unwhitened << 4.9 - 5.0, -c1;
|
||||
|
|
|
@ -780,9 +780,8 @@ static HybridGaussianFactorGraph CreateFactorGraph(
|
|||
// Create HybridGaussianFactor
|
||||
// We take negative since we want
|
||||
// the underlying scalar to be log(\sqrt(|2πΣ|))
|
||||
std::vector<GaussianFactorValuePair> factors{
|
||||
{f0, -model0->logNormalizationConstant()},
|
||||
{f1, -model1->logNormalizationConstant()}};
|
||||
std::vector<GaussianFactorValuePair> factors{{f0, model0->negLogConstant()},
|
||||
{f1, model1->negLogConstant()}};
|
||||
HybridGaussianFactor motionFactor({X(0), X(1)}, m1, factors);
|
||||
|
||||
HybridGaussianFactorGraph hfg;
|
||||
|
|
|
@ -902,9 +902,8 @@ static HybridNonlinearFactorGraph CreateFactorGraph(
|
|||
// Create HybridNonlinearFactor
|
||||
// We take negative since we want
|
||||
// the underlying scalar to be log(\sqrt(|2πΣ|))
|
||||
std::vector<NonlinearFactorValuePair> factors{
|
||||
{f0, -model0->logNormalizationConstant()},
|
||||
{f1, -model1->logNormalizationConstant()}};
|
||||
std::vector<NonlinearFactorValuePair> factors{{f0, model0->negLogConstant()},
|
||||
{f1, model1->negLogConstant()}};
|
||||
|
||||
HybridNonlinearFactor mixtureFactor({X(0), X(1)}, m1, factors);
|
||||
|
||||
|
|
|
@ -59,16 +59,8 @@ double Conditional<FACTOR, DERIVEDCONDITIONAL>::evaluate(
|
|||
|
||||
/* ************************************************************************* */
|
||||
template <class FACTOR, class DERIVEDCONDITIONAL>
|
||||
double Conditional<FACTOR, DERIVEDCONDITIONAL>::logNormalizationConstant()
|
||||
const {
|
||||
throw std::runtime_error(
|
||||
"Conditional::logNormalizationConstant is not implemented");
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template <class FACTOR, class DERIVEDCONDITIONAL>
|
||||
double Conditional<FACTOR, DERIVEDCONDITIONAL>::normalizationConstant() const {
|
||||
return std::exp(logNormalizationConstant());
|
||||
double Conditional<FACTOR, DERIVEDCONDITIONAL>::negLogConstant() const {
|
||||
throw std::runtime_error("Conditional::negLogConstant is not implemented");
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -83,13 +75,9 @@ bool Conditional<FACTOR, DERIVEDCONDITIONAL>::CheckInvariants(
|
|||
const double logProb = conditional.logProbability(values);
|
||||
if (std::abs(prob_or_density - std::exp(logProb)) > 1e-9)
|
||||
return false; // logProb is not consistent with prob_or_density
|
||||
if (std::abs(conditional.logNormalizationConstant() -
|
||||
std::log(conditional.normalizationConstant())) > 1e-9)
|
||||
return false; // log normalization constant is not consistent with
|
||||
// normalization constant
|
||||
const double error = conditional.error(values);
|
||||
if (error < 0.0) return false; // prob_or_density is negative.
|
||||
const double expected = conditional.logNormalizationConstant() - error;
|
||||
const double expected = -(conditional.negLogConstant() + error);
|
||||
if (std::abs(logProb - expected) > 1e-9)
|
||||
return false; // logProb is not consistent with error
|
||||
return true;
|
||||
|
|
|
@ -34,11 +34,13 @@ namespace gtsam {
|
|||
* `logProbability` is the main methods that need to be implemented in derived
|
||||
* classes. These two methods relate to the `error` method in the factor by:
|
||||
* probability(x) = k exp(-error(x))
|
||||
* where k is a normalization constant making \int probability(x) == 1.0, and
|
||||
* logProbability(x) = K - error(x)
|
||||
* i.e., K = log(K). The normalization constant K is assumed to *not* depend
|
||||
* where k is a normalization constant making
|
||||
* \int probability(x) = \int k exp(-error(x)) == 1.0, and
|
||||
* logProbability(x) = -(K + error(x))
|
||||
* i.e., K = -log(k). The normalization constant k is assumed to *not* depend
|
||||
* on any argument, only (possibly) on the conditional parameters.
|
||||
* This class provides a default logNormalizationConstant() == 0.0.
|
||||
* This class provides a default negative log normalization constant
|
||||
* negLogConstant() == 0.0.
|
||||
*
|
||||
* There are four broad classes of conditionals that derive from Conditional:
|
||||
*
|
||||
|
@ -163,13 +165,12 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/**
|
||||
* All conditional types need to implement a log normalization constant to
|
||||
* make it such that error>=0.
|
||||
* @brief All conditional types need to implement this as the negative log
|
||||
* of the normalization constant to make it such that error>=0.
|
||||
*
|
||||
* @return double
|
||||
*/
|
||||
virtual double logNormalizationConstant() const;
|
||||
|
||||
/** Non-virtual, exponentiate logNormalizationConstant. */
|
||||
double normalizationConstant() const;
|
||||
virtual double negLogConstant() const;
|
||||
|
||||
/// @}
|
||||
/// @name Advanced Interface
|
||||
|
@ -208,9 +209,9 @@ namespace gtsam {
|
|||
* - evaluate >= 0.0
|
||||
* - evaluate(x) == conditional(x)
|
||||
* - exp(logProbability(x)) == evaluate(x)
|
||||
* - logNormalizationConstant() = log(normalizationConstant())
|
||||
* - negLogConstant() = -log(normalizationConstant())
|
||||
* - error >= 0.0
|
||||
* - logProbability(x) == logNormalizationConstant() - error(x)
|
||||
* - logProbability(x) == -(negLogConstant() + error(x))
|
||||
*
|
||||
* @param conditional The conditional to test, as a reference to the derived type.
|
||||
* @tparam VALUES HybridValues, or a more narrow type like DiscreteValues.
|
||||
|
|
|
@ -243,23 +243,24 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double GaussianBayesNet::logNormalizationConstant() const {
|
||||
double GaussianBayesNet::negLogConstant() const {
|
||||
/*
|
||||
normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma))
|
||||
logConstant = -0.5 * n*log(2*pi) - 0.5 * log det(Sigma)
|
||||
negLogConstant = -log(normalizationConstant)
|
||||
= 0.5 * n*log(2*pi) + 0.5 * log(det(Sigma))
|
||||
|
||||
log det(Sigma)) = -2.0 * logDeterminant()
|
||||
thus, logConstant = -0.5*n*log(2*pi) + logDeterminant()
|
||||
log(det(Sigma)) = -2.0 * logDeterminant()
|
||||
thus, negLogConstant = 0.5*n*log(2*pi) - logDeterminant()
|
||||
|
||||
BayesNet logConstant = sum(-0.5*n_i*log(2*pi) + logDeterminant_i())
|
||||
= sum(-0.5*n_i*log(2*pi)) + sum(logDeterminant_i())
|
||||
= sum(-0.5*n_i*log(2*pi)) + bn->logDeterminant()
|
||||
BayesNet negLogConstant = sum(0.5*n_i*log(2*pi) - logDeterminant_i())
|
||||
= sum(0.5*n_i*log(2*pi)) + sum(logDeterminant_i())
|
||||
= sum(0.5*n_i*log(2*pi)) + bn->logDeterminant()
|
||||
*/
|
||||
double logNormConst = 0.0;
|
||||
double negLogNormConst = 0.0;
|
||||
for (const sharedConditional& cg : *this) {
|
||||
logNormConst += cg->logNormalizationConstant();
|
||||
negLogNormConst += cg->negLogConstant();
|
||||
}
|
||||
return logNormConst;
|
||||
return negLogNormConst;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -235,12 +235,12 @@ namespace gtsam {
|
|||
double logDeterminant() const;
|
||||
|
||||
/**
|
||||
* @brief Get the log of the normalization constant corresponding to the
|
||||
* joint Gaussian density represented by this Bayes net.
|
||||
* @brief Get the negative log of the normalization constant corresponding
|
||||
* to the joint Gaussian density represented by this Bayes net.
|
||||
*
|
||||
* @return double
|
||||
*/
|
||||
double logNormalizationConstant() const;
|
||||
double negLogConstant() const;
|
||||
|
||||
/**
|
||||
* Backsubstitute with a different RHS vector than the one stored in this BayesNet.
|
||||
|
|
|
@ -121,7 +121,7 @@ namespace gtsam {
|
|||
const auto mean = solve({}); // solve for mean.
|
||||
mean.print(" mean", formatter);
|
||||
}
|
||||
cout << " logNormalizationConstant: " << logNormalizationConstant() << endl;
|
||||
cout << " logNormalizationConstant: " << -negLogConstant() << endl;
|
||||
if (model_)
|
||||
model_->print(" Noise model: ");
|
||||
else
|
||||
|
@ -181,24 +181,24 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
// normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma))
|
||||
// log = - 0.5 * n*log(2*pi) - 0.5 * log det(Sigma)
|
||||
double GaussianConditional::logNormalizationConstant() const {
|
||||
// neg-log = 0.5 * n*log(2*pi) + 0.5 * log det(Sigma)
|
||||
double GaussianConditional::negLogConstant() const {
|
||||
constexpr double log2pi = 1.8378770664093454835606594728112;
|
||||
size_t n = d().size();
|
||||
// Sigma = (R'R)^{-1}, det(Sigma) = det((R'R)^{-1}) = det(R'R)^{-1}
|
||||
// log det(Sigma) = -log(det(R'R)) = -2*log(det(R))
|
||||
// Hence, log det(Sigma)) = -2.0 * logDeterminant()
|
||||
// which gives log = -0.5*n*log(2*pi) - 0.5*(-2.0 * logDeterminant())
|
||||
// = -0.5*n*log(2*pi) + (0.5*2.0 * logDeterminant())
|
||||
// = -0.5*n*log(2*pi) + logDeterminant()
|
||||
return -0.5 * n * log2pi + logDeterminant();
|
||||
// which gives neg-log = 0.5*n*log(2*pi) + 0.5*(-2.0 * logDeterminant())
|
||||
// = 0.5*n*log(2*pi) - (0.5*2.0 * logDeterminant())
|
||||
// = 0.5*n*log(2*pi) - logDeterminant()
|
||||
return 0.5 * n * log2pi - logDeterminant();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// density = k exp(-error(x))
|
||||
// log = log(k) - error(x)
|
||||
double GaussianConditional::logProbability(const VectorValues& x) const {
|
||||
return logNormalizationConstant() - error(x);
|
||||
return -(negLogConstant() + error(x));
|
||||
}
|
||||
|
||||
double GaussianConditional::logProbability(const HybridValues& x) const {
|
||||
|
|
|
@ -133,10 +133,14 @@ namespace gtsam {
|
|||
/// @{
|
||||
|
||||
/**
|
||||
* normalization constant = 1.0 / sqrt((2*pi)^n*det(Sigma))
|
||||
* log = - 0.5 * n*log(2*pi) - 0.5 * log det(Sigma)
|
||||
* @brief Return the negative log of the normalization constant.
|
||||
*
|
||||
* normalization constant k = 1.0 / sqrt((2*pi)^n*det(Sigma))
|
||||
* -log(k) = 0.5 * n*log(2*pi) + 0.5 * log det(Sigma)
|
||||
*
|
||||
* @return double
|
||||
*/
|
||||
double logNormalizationConstant() const override;
|
||||
double negLogConstant() const override;
|
||||
|
||||
/**
|
||||
* Calculate log-probability log(evaluate(x)) for given values `x`:
|
||||
|
|
|
@ -255,18 +255,17 @@ double Gaussian::logDeterminant() const {
|
|||
}
|
||||
|
||||
/* *******************************************************************************/
|
||||
double Gaussian::logNormalizationConstant() const {
|
||||
double Gaussian::negLogConstant() const {
|
||||
// log(det(Sigma)) = -2.0 * logDetR
|
||||
// which gives log = -0.5*n*log(2*pi) - 0.5*(-2.0 * logDetR())
|
||||
// = -0.5*n*log(2*pi) + (0.5*2.0 * logDetR())
|
||||
// = -0.5*n*log(2*pi) + logDetR()
|
||||
// which gives neg-log = 0.5*n*log(2*pi) + 0.5*(-2.0 * logDetR())
|
||||
// = 0.5*n*log(2*pi) - (0.5*2.0 * logDetR())
|
||||
// = 0.5*n*log(2*pi) - logDetR()
|
||||
size_t n = dim();
|
||||
constexpr double log2pi = 1.8378770664093454835606594728112;
|
||||
// Get 1/log(\sqrt(|2pi Sigma|)) = -0.5*log(|2pi Sigma|)
|
||||
return -0.5 * n * log2pi + logDetR();
|
||||
// Get -log(1/\sqrt(|2pi Sigma|)) = 0.5*log(|2pi Sigma|)
|
||||
return 0.5 * n * log2pi - logDetR();
|
||||
}
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Diagonal
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -272,14 +272,12 @@ namespace gtsam {
|
|||
double logDeterminant() const;
|
||||
|
||||
/**
|
||||
* @brief Method to compute the normalization constant
|
||||
* for a Gaussian noise model k = \sqrt(1/|2πΣ|).
|
||||
* We compute this in the log-space for numerical accuracy,
|
||||
* thus returning log(k).
|
||||
* @brief Compute the negative log of the normalization constant
|
||||
* for a Gaussian noise model k = 1/\sqrt(|2πΣ|).
|
||||
*
|
||||
* @return double
|
||||
*/
|
||||
double logNormalizationConstant() const;
|
||||
double negLogConstant() const;
|
||||
|
||||
private:
|
||||
#ifdef GTSAM_ENABLE_BOOST_SERIALIZATION
|
||||
|
|
|
@ -548,7 +548,7 @@ virtual class GaussianConditional : gtsam::JacobianFactor {
|
|||
bool equals(const gtsam::GaussianConditional& cg, double tol) const;
|
||||
|
||||
// Standard Interface
|
||||
double logNormalizationConstant() const;
|
||||
double negLogConstant() const;
|
||||
double logProbability(const gtsam::VectorValues& x) const;
|
||||
double evaluate(const gtsam::VectorValues& x) const;
|
||||
double error(const gtsam::VectorValues& x) const;
|
||||
|
|
|
@ -76,12 +76,11 @@ TEST(GaussianBayesNet, Evaluate1) {
|
|||
// the normalization constant 1.0/sqrt((2*pi*Sigma).det()).
|
||||
// The covariance matrix inv(Sigma) = R'*R, so the determinant is
|
||||
const double constant = sqrt((invSigma / (2 * M_PI)).determinant());
|
||||
EXPECT_DOUBLES_EQUAL(log(constant),
|
||||
smallBayesNet.at(0)->logNormalizationConstant() +
|
||||
smallBayesNet.at(1)->logNormalizationConstant(),
|
||||
1e-9);
|
||||
EXPECT_DOUBLES_EQUAL(log(constant), smallBayesNet.logNormalizationConstant(),
|
||||
EXPECT_DOUBLES_EQUAL(-log(constant),
|
||||
smallBayesNet.at(0)->negLogConstant() +
|
||||
smallBayesNet.at(1)->negLogConstant(),
|
||||
1e-9);
|
||||
EXPECT_DOUBLES_EQUAL(-log(constant), smallBayesNet.negLogConstant(), 1e-9);
|
||||
const double actual = smallBayesNet.evaluate(mean);
|
||||
EXPECT_DOUBLES_EQUAL(constant, actual, 1e-9);
|
||||
}
|
||||
|
|
|
@ -486,16 +486,17 @@ TEST(GaussianConditional, Error) {
|
|||
|
||||
/* ************************************************************************* */
|
||||
// Similar test for multivariate gaussian but with sigma 2.0
|
||||
TEST(GaussianConditional, LogNormalizationConstant) {
|
||||
TEST(GaussianConditional, NegLogConstant) {
|
||||
double sigma = 2.0;
|
||||
auto conditional = GaussianConditional::FromMeanAndStddev(X(0), Vector3::Zero(), sigma);
|
||||
VectorValues x;
|
||||
x.insert(X(0), Vector3::Zero());
|
||||
Matrix3 Sigma = I_3x3 * sigma * sigma;
|
||||
double expectedLogNormalizingConstant = log(1 / sqrt((2 * M_PI * Sigma).determinant()));
|
||||
double expectedNegLogConstant =
|
||||
-log(1 / sqrt((2 * M_PI * Sigma).determinant()));
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(expectedLogNormalizingConstant,
|
||||
conditional.logNormalizationConstant(), 1e-9);
|
||||
EXPECT_DOUBLES_EQUAL(expectedNegLogConstant, conditional.negLogConstant(),
|
||||
1e-9);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -55,7 +55,7 @@ TEST(GaussianDensity, FromMeanAndStddev) {
|
|||
double expected1 = 0.5 * e.dot(e);
|
||||
EXPECT_DOUBLES_EQUAL(expected1, density.error(values), 1e-9);
|
||||
|
||||
double expected2 = density.logNormalizationConstant()- 0.5 * e.dot(e);
|
||||
double expected2 = -(density.negLogConstant() + 0.5 * e.dot(e));
|
||||
EXPECT_DOUBLES_EQUAL(expected2, density.logProbability(values), 1e-9);
|
||||
}
|
||||
|
||||
|
|
|
@ -807,50 +807,50 @@ TEST(NoiseModel, NonDiagonalGaussian)
|
|||
}
|
||||
}
|
||||
|
||||
TEST(NoiseModel, LogNormalizationConstant1D) {
|
||||
TEST(NoiseModel, NegLogNormalizationConstant1D) {
|
||||
// Very simple 1D noise model, which we can compute by hand.
|
||||
double sigma = 0.1;
|
||||
// For expected values, we compute 1/log(sqrt(|2πΣ|)) by hand.
|
||||
// = -0.5*(log(2π) + log(Σ)) (since it is 1D)
|
||||
double expected_value = -0.5 * log(2 * M_PI * sigma * sigma);
|
||||
// For expected values, we compute -log(1/sqrt(|2πΣ|)) by hand.
|
||||
// = 0.5*(log(2π) - log(Σ)) (since it is 1D)
|
||||
double expected_value = 0.5 * log(2 * M_PI * sigma * sigma);
|
||||
|
||||
// Gaussian
|
||||
{
|
||||
Matrix11 R;
|
||||
R << 1 / sigma;
|
||||
auto noise_model = Gaussian::SqrtInformation(R);
|
||||
double actual_value = noise_model->logNormalizationConstant();
|
||||
double actual_value = noise_model->negLogConstant();
|
||||
EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9);
|
||||
}
|
||||
// Diagonal
|
||||
{
|
||||
auto noise_model = Diagonal::Sigmas(Vector1(sigma));
|
||||
double actual_value = noise_model->logNormalizationConstant();
|
||||
double actual_value = noise_model->negLogConstant();
|
||||
EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9);
|
||||
}
|
||||
// Isotropic
|
||||
{
|
||||
auto noise_model = Isotropic::Sigma(1, sigma);
|
||||
double actual_value = noise_model->logNormalizationConstant();
|
||||
double actual_value = noise_model->negLogConstant();
|
||||
EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9);
|
||||
}
|
||||
// Unit
|
||||
{
|
||||
auto noise_model = Unit::Create(1);
|
||||
double actual_value = noise_model->logNormalizationConstant();
|
||||
double actual_value = noise_model->negLogConstant();
|
||||
double sigma = 1.0;
|
||||
expected_value = -0.5 * log(2 * M_PI * sigma * sigma);
|
||||
expected_value = 0.5 * log(2 * M_PI * sigma * sigma);
|
||||
EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(NoiseModel, LogNormalizationConstant3D) {
|
||||
TEST(NoiseModel, NegLogNormalizationConstant3D) {
|
||||
// Simple 3D noise model, which we can compute by hand.
|
||||
double sigma = 0.1;
|
||||
size_t n = 3;
|
||||
// We compute the expected values just like in the LogNormalizationConstant1D
|
||||
// We compute the expected values just like in the NegLogNormalizationConstant1D
|
||||
// test, but we multiply by 3 due to the determinant.
|
||||
double expected_value = -0.5 * n * log(2 * M_PI * sigma * sigma);
|
||||
double expected_value = 0.5 * n * log(2 * M_PI * sigma * sigma);
|
||||
|
||||
// Gaussian
|
||||
{
|
||||
|
@ -859,27 +859,27 @@ TEST(NoiseModel, LogNormalizationConstant3D) {
|
|||
0, 1 / sigma, 4, //
|
||||
0, 0, 1 / sigma;
|
||||
auto noise_model = Gaussian::SqrtInformation(R);
|
||||
double actual_value = noise_model->logNormalizationConstant();
|
||||
double actual_value = noise_model->negLogConstant();
|
||||
EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9);
|
||||
}
|
||||
// Diagonal
|
||||
{
|
||||
auto noise_model = Diagonal::Sigmas(Vector3(sigma, sigma, sigma));
|
||||
double actual_value = noise_model->logNormalizationConstant();
|
||||
double actual_value = noise_model->negLogConstant();
|
||||
EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9);
|
||||
}
|
||||
// Isotropic
|
||||
{
|
||||
auto noise_model = Isotropic::Sigma(n, sigma);
|
||||
double actual_value = noise_model->logNormalizationConstant();
|
||||
double actual_value = noise_model->negLogConstant();
|
||||
EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9);
|
||||
}
|
||||
// Unit
|
||||
{
|
||||
auto noise_model = Unit::Create(3);
|
||||
double actual_value = noise_model->logNormalizationConstant();
|
||||
double actual_value = noise_model->negLogConstant();
|
||||
double sigma = 1.0;
|
||||
expected_value = -0.5 * n * log(2 * M_PI * sigma * sigma);
|
||||
expected_value = 0.5 * n * log(2 * M_PI * sigma * sigma);
|
||||
EXPECT_DOUBLES_EQUAL(expected_value, actual_value, 1e-9);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,8 +90,7 @@ class TestHybridBayesNet(GtsamTestCase):
|
|||
self.assertTrue(probability >= 0.0)
|
||||
logProb = conditional.logProbability(values)
|
||||
self.assertAlmostEqual(probability, np.exp(logProb))
|
||||
expected = conditional.logNormalizationConstant() - \
|
||||
conditional.error(values)
|
||||
expected = -(conditional.negLogConstant() + conditional.error(values))
|
||||
self.assertAlmostEqual(logProb, expected)
|
||||
|
||||
|
||||
|
|
|
@ -14,15 +14,16 @@ from __future__ import print_function
|
|||
|
||||
import unittest
|
||||
|
||||
import gtsam
|
||||
from gtsam import (
|
||||
DoglegOptimizer, DoglegParams, DummyPreconditionerParameters, GaussNewtonOptimizer,
|
||||
GaussNewtonParams, GncLMParams, GncLossType, GncLMOptimizer, LevenbergMarquardtOptimizer,
|
||||
LevenbergMarquardtParams, NonlinearFactorGraph, Ordering, PCGSolverParameters, Point2,
|
||||
PriorFactorPoint2, Values
|
||||
)
|
||||
from gtsam.utils.test_case import GtsamTestCase
|
||||
|
||||
import gtsam
|
||||
from gtsam import (DoglegOptimizer, DoglegParams,
|
||||
DummyPreconditionerParameters, GaussNewtonOptimizer,
|
||||
GaussNewtonParams, GncLMOptimizer, GncLMParams, GncLossType,
|
||||
LevenbergMarquardtOptimizer, LevenbergMarquardtParams,
|
||||
NonlinearFactorGraph, Ordering, PCGSolverParameters, Point2,
|
||||
PriorFactorPoint2, Values)
|
||||
|
||||
KEY1 = 1
|
||||
KEY2 = 2
|
||||
|
||||
|
@ -136,7 +137,7 @@ class TestScenario(GtsamTestCase):
|
|||
# Test optimizer params
|
||||
optimizer = GncLMOptimizer(self.fg, self.initial_values, params)
|
||||
for ict_factor in (0.9, 1.1):
|
||||
new_ict = ict_factor * optimizer.getInlierCostThresholds()
|
||||
new_ict = ict_factor * optimizer.getInlierCostThresholds().item()
|
||||
optimizer.setInlierCostThresholds(new_ict)
|
||||
self.assertAlmostEqual(optimizer.getInlierCostThresholds(), new_ict)
|
||||
for w_factor in (0.8, 0.9):
|
||||
|
|
Loading…
Reference in New Issue